content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def main_locust():
""" Performance test with locust: parse command line options and run commands.
"""
try:
from ate import locusts
except ImportError:
print("Locust is not installed, exit.")
exit(1)
sys.argv[0] = 'locust'
if len(sys.argv) == 1:
sys.argv.extend(["-h"])
if sys.argv[1] in ["-h", "--help", "-V", "--version"]:
locusts.main()
sys.exit(0)
try:
testcase_index = sys.argv.index('-f') + 1
assert testcase_index < len(sys.argv)
except (ValueError, AssertionError):
print("Testcase file is not specified, exit.")
sys.exit(1)
testcase_file_path = sys.argv[testcase_index]
sys.argv[testcase_index] = locusts.parse_locustfile(testcase_file_path)
if "--full-speed" in sys.argv:
locusts.run_locusts_at_full_speed(sys.argv)
else:
locusts.main() | 32,500 |
def secret_delete(
config, validate,
repository, profile_name, secret_name
):
"""[sd] Delete secrets from multiple repositories providing a string delimited by commas ","\n
Example: ghs secret-delete -p willy -r 'githubsecrets, serverless-template'""" # noqa: 501
profile = Profile(config, profile_name)
repositories = list_by_comma(repository)
responses = []
for repo in repositories:
secret = Secret(config, profile, repo, secret_name)
responses.append(secret.delete())
print_pretty_json(responses) | 32,501 |
def test_vuln_list_route(cl_operator):
"""vuln list route test"""
response = cl_operator.get(url_for('storage.vuln_list_route'))
assert response.status_code == HTTPStatus.OK | 32,502 |
def load_html_file(file_dir):
""" Uses BeautifulSoup to load an html """
with open(file_dir, 'rb') as fp:
data = fp.read()
if os.name == 'nt' or sys.version_info[0] == 3:
data = data.decode(encoding='utf-8', errors='strict')
data = re.sub(r'(\>)([ ]+)', lambda match: match.group(1) + ('!space!' * len(match.group(2))), data)
data = re.sub(r'([ ]+)(\<)', lambda match: ('!space!' * len(match.group(1))) + match.group(2), data)
if os.name == 'nt' or sys.version_info[0] == 3:
data = data.encode('utf-8', 'ignore')
soup = BeautifulSoup(data, 'html.parser')
return soup | 32,503 |
def read_single_keypress():
"""Waits for a single keypress on stdin.
This is a silly function to call if you need to do it a lot because it has
to store stdin's current setup, setup stdin for reading single keystrokes
then read the single keystroke then revert stdin back after reading the
keystroke.
Returns a tuple of characters of the key that was pressed - on Linux,
pressing keys like up arrow results in a sequence of characters. Returns
('\x03',) on KeyboardInterrupt which can happen when a signal gets
handled.
"""
import termios, fcntl, sys, os
fd = sys.stdin.fileno()
# save old state
flags_save = fcntl.fcntl(fd, fcntl.F_GETFL)
attrs_save = termios.tcgetattr(fd)
# make raw - the way to do this comes from the termios(3) man page.
attrs = list(attrs_save) # copy the stored version to update
# iflag
attrs[0] &= ~(
termios.IGNBRK
| termios.BRKINT
| termios.PARMRK
| termios.ISTRIP
| termios.INLCR
| termios.IGNCR
| termios.ICRNL
| termios.IXON
)
# oflag
attrs[1] &= ~termios.OPOST
# cflag
attrs[2] &= ~(termios.CSIZE | termios.PARENB)
attrs[2] |= termios.CS8
# lflag
attrs[3] &= ~(
termios.ECHONL | termios.ECHO | termios.ICANON | termios.ISIG | termios.IEXTEN
)
termios.tcsetattr(fd, termios.TCSANOW, attrs)
# turn off non-blocking
fcntl.fcntl(fd, fcntl.F_SETFL, flags_save & ~os.O_NONBLOCK)
# read a single keystroke
ret = []
try:
ret.append(sys.stdin.read(1)) # returns a single character
fcntl.fcntl(fd, fcntl.F_SETFL, flags_save | os.O_NONBLOCK)
c = sys.stdin.read(1) # returns a single character
while len(c) > 0:
ret.append(c)
c = sys.stdin.read(1)
except KeyboardInterrupt:
ret.append("\x03")
finally:
# restore old state
termios.tcsetattr(fd, termios.TCSAFLUSH, attrs_save)
fcntl.fcntl(fd, fcntl.F_SETFL, flags_save)
return tuple(ret) | 32,504 |
def reject(node_id, plugin):
"""Mark a given node_id as reject for future connections.
"""
print("Rejecting connections from {}".format(node_id))
plugin.reject_ids.append(node_id) | 32,505 |
def set_table(table, fold_test, inner_number_folds, index_table, y_name):
""" Set the table containing the data information
Set the table by adding to each entry (patient) its start and end indexes in the concatenated data object.
In fact each patients i is composed by `n_i` tiles so that for example patient 0 will have as starts and ends indices 0 and `n_0`.
It then separates the dataset into test and train sets (according to `fold_test`).
Finally, several splits of the train sets are done for cross validation, preserving relative class frequency.
Obviously, dataset is shuffled, and splitted at the patient level, so that the indexes returned are the table indexes,
not the concatenated object indexes.
Parameters
----------
table : pd.DataFrame
data information.
fold_test : int
number of the fold which will be used for testing.
inner_number_folds : int
number of splits used in the cross validation.
index_table : dict
maps each file (key) to its start and end index in the data object (concatenated encoded bags)
y_name : str
or "y_interest", is the name of the target variable.
Returns
-------
pd.DataFrame, list(tuple), list
returns 1: the table DataFrame augmented with start and end indexes
2: The `inner_number_folds` splits for cross_validation, each containing (list(train_indexes), list(val_indexes)).
3: List containing indexes of the test dataset.
"""
## add index_table to table so that all the info is in table
table = add_index(table, index_table)
train_table = table[table["fold"] != fold_test]
test_index = table[table["fold"] == fold_test].index
stratified_variable = train_table[y_name].round(0)
skf = StratifiedKFold(n_splits=inner_number_folds, shuffle=True) # Assures that relative class frequency is preserve in each folds.
obj = skf.split(train_table.index, stratified_variable)
# index_folds = [(train_index, val_index) for train_index, val_index in obj]
index_folds = [(np.array(train_table.index[train_index]), np.array(train_table.index[val_index])) for train_index, val_index in obj]
# import pdb; pdb.set_trace()
return table, index_folds, test_index | 32,506 |
def parse_args():
"""
引数パース
"""
argparser = ArgumentParser()
argparser.add_argument(
"-b",
"--bucket-name",
help="S3 bucket name",
)
argparser.add_argument(
"-d",
"--days",
type=int,
help="Number of days",
)
return argparser.parse_args() | 32,507 |
def main():
"""
Re-construct the ALPHABET with the secret number offered by user.
And encrypt the garbled message for the user to get the meaningful message.
"""
# user's secret number
secret = int(input('Secret number: '))
# construct new alphabet system with the secret number
new_alphabet == new_alphabet(secret)
# user's garbled message
code = input('What\'s the ciphered string? ')
code = code.upper()
print('The deciphered string is: ' + de_code(secret, code)) | 32,508 |
def predict(file):
"""
Returns values predicted
"""
x = load_img(file, target_size=(WIDTH, HEIGHT))
x = img_to_array(x)
x = np.expand_dims(x, axis=0)
array = NET.predict(x)
result = array[0]
answer = np.argmax(result)
return CLASSES[answer], result | 32,509 |
def sample_categorical(pmf):
"""Sample from a categorical distribution.
Args:
pmf: Probablity mass function. Output of a softmax over categories.
Array of shape [batch_size, number of categories]. Rows sum to 1.
Returns:
idxs: Array of size [batch_size, 1]. Integer of category sampled.
"""
if pmf.ndim == 1:
pmf = np.expand_dims(pmf, 0)
batch_size = pmf.shape[0]
cdf = np.cumsum(pmf, axis=1)
rand_vals = np.random.rand(batch_size)
idxs = np.zeros([batch_size, 1])
for i in range(batch_size):
idxs[i] = cdf[i].searchsorted(rand_vals[i])
return idxs | 32,510 |
def make_Dex_3D(dL, shape, bloch_x=0.0):
""" Forward derivative in x """
Nx, Ny , Nz= shape
phasor_x = np.exp(1j * bloch_x)
Dex = sp.diags([-1, 1, phasor_x], [0, Nz*Ny, -Nx*Ny*Nz+Nz*Ny], shape=(Nx*Ny*Nz, Nx*Ny*Nz))
Dex = 1 / dL * sp.kron(sp.eye(1),Dex)
return Dex | 32,511 |
def test_2d_array_support():
"""
Test with 2d array
"""
result = variance_thresholding(
[[1, 6, 0, 5], [1, 2, 4, 5], [1, 7, 8, 5]]
)
assert np.array_equal(result, [1, 2]) | 32,512 |
def feature_decoder(proto_bytes):
"""Deserializes the ``ProtoFeature`` bytes into Python.
Args:
proto_bytes (bytes): The ProtoBuf encoded bytes of the ProtoBuf class.
Returns:
:class:`~geopyspark.vector_pipe.Feature`
"""
pb_feature = ProtoFeature.FromString(proto_bytes)
return from_pb_feature(pb_feature) | 32,513 |
async def calculate_board_fitness_report(
board: list, zone_height: int, zone_length: int
) -> tuple:
"""Calculate Board Fitness Report
This function uses the general solver functions api to calculate and return all the different collisions on a given board array
representation.
Args:
board (list): A full filled board representation.
zone_height (int): The zones height.
zone_length (int): The zones length.
Returns:
int: Total collisions on the board.
int: Total collisions on the board columns.
int: Total collisions on the board rows.
int: Total collisions on the board zones.
"""
body = {"zoneHeight": zone_height, "zoneLength": zone_length, "board": board}
url = str(environ["FITNESS_REPORT_SCORE_LINK"])
response_body = dict()
headers = {"Authorization": api_key, "Content-Type": "application/json"}
async with ClientSession(headers=headers) as session:
async with session.post(url=url, json=body) as response:
response_body = await response.json()
return (
response_body["totalCollisions"],
response_body["columnCollisions"],
response_body["rowCollisions"],
response_body["zoneCollisions"],
) | 32,514 |
def del_record(conn, hosted_zone_id, name, type, values, ttl=600,
identifier=None, weight=None, comment=""):
"""Delete a record from a zone: name, type, ttl, identifier, and weight must match."""
_add_del(conn, hosted_zone_id, "DELETE", name, type, identifier,
weight, values, ttl, comment) | 32,515 |
def quote_fqident(s):
"""Quote fully qualified SQL identifier.
The '.' is taken as namespace separator and
all parts are quoted separately
Example:
>>> quote_fqident('tbl')
'public.tbl'
>>> quote_fqident('Baz.Foo.Bar')
'"Baz"."Foo.Bar"'
"""
tmp = s.split('.', 1)
if len(tmp) == 1:
return 'public.' + quote_ident(s)
return '.'.join(map(quote_ident, tmp)) | 32,516 |
def export_model(save_path, output_file, gc_metadata=None):
""" Export the variables from a TF v1 or v2 model to a given output folder and
optionally validate them against a given json metadata file.
"""
meta_file = save_path + ".meta"
pb_file = os.path.join(save_path, "saved_model.pb")
pbtxt_file = os.path.join(save_path, "saved_model.pbtxt")
with Session(graph=tf.Graph()) as session:
if os.path.isfile(meta_file):
logging.info("Loading v1 saved model from folder %s", save_path)
MetaGraph(session, save_path).export_variables(output_file, gc_metadata)
elif os.path.isfile(pb_file) or os.path.isfile(pbtxt_file):
logging.info("Loading v2 Keras SavedModel from folder %s", save_path)
SavedModel(session, save_path).export_variables(output_file, gc_metadata)
else:
logging.fatal(("Could not find any TensorFlow v1 (%s) or v2 (%s or %s)"
" models"), meta_file, pb_file, pbtxt_file)
exit(1) | 32,517 |
def row_generator(x, H, W, C):
"""Returns a single entry in the generated dataset.
Return a bunch of random values as an example."""
return {'frame_id': x,
'frame_data': np.random.randint(0, 10,
dtype=np.uint8, size=(H, W, C))} | 32,518 |
def find(ctx, acronyms, tags):
"""Searches for acronyms."""
lookups = LookupAggregate(LookupFactory.from_config(ctx.obj))
lookups.request(acronyms)
if tags:
lookups.filter_tags(tags)
lookups.show_results() | 32,519 |
def update_mac_to_name_entries():
"""Updates the mac_to_name table with new entries from the log
"""
_logger.debug("Updating mac_to_name table")
connection = get_saib_connection().cursor()
try:
with connection.cursor() as cursor:
sql = '''
INSERT INTO mac_to_name (mac_address)
SELECT DISTINCT(mac_address) FROM log_in_range
WHERE mac_address NOT IN (SELECT mac_address FROM mac_to_name)
'''
_logger.debug("Running %s" % sql)
cursor.execute(sql)
connection.commit()
finally:
connection.close() | 32,520 |
def accuracy(output, target, topk=(1,)):
"""Computes the precor@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res | 32,521 |
def define_model(input_shape, output_shape, FLAGS):
"""
Define the model along with the TensorBoard summaries
"""
data_format = "channels_last"
concat_axis = -1
n_cl_out = 1 # Number of output classes
dropout = 0.2 # Percentage of dropout for network layers
num_datapoints = input_shape[0]
imgs = tf.placeholder(tf.float32,
shape=([None] + list(input_shape[1:])))
msks = tf.placeholder(tf.float32,
shape=([None] + list(output_shape[1:])))
inputs = K.layers.Input(tensor=imgs, name="Images")
params = dict(kernel_size=(3, 3), activation="relu",
padding="same", data_format=data_format,
kernel_initializer="he_uniform")
trans_params = dict(kernel_size=(2, 2), strides=(2, 2),
data_format=data_format,
kernel_initializer="he_uniform",
padding="same")
conv1 = K.layers.Conv2D(name="conv1a", filters=32, **params)(inputs)
conv1 = K.layers.Conv2D(name="conv1b", filters=32, **params)(conv1)
pool1 = K.layers.MaxPooling2D(name="pool1", pool_size=(2, 2))(conv1)
conv2 = K.layers.Conv2D(name="conv2a", filters=64, **params)(pool1)
conv2 = K.layers.Conv2D(name="conv2b", filters=64, **params)(conv2)
pool2 = K.layers.MaxPooling2D(name="pool2", pool_size=(2, 2))(conv2)
conv3 = K.layers.Conv2D(name="conv3a", filters=128, **params)(pool2)
# Trying dropout layers earlier on, as indicated in the paper
conv3 = K.layers.Dropout(dropout)(conv3)
conv3 = K.layers.Conv2D(name="conv3b", filters=128, **params)(conv3)
pool3 = K.layers.MaxPooling2D(name="pool3", pool_size=(2, 2))(conv3)
conv4 = K.layers.Conv2D(name="conv4a", filters=256, **params)(pool3)
# Trying dropout layers earlier on, as indicated in the paper
conv4 = K.layers.Dropout(dropout)(conv4)
conv4 = K.layers.Conv2D(name="conv4b", filters=256, **params)(conv4)
pool4 = K.layers.MaxPooling2D(name="pool4", pool_size=(2, 2))(conv4)
conv5 = K.layers.Conv2D(name="conv5a", filters=512, **params)(pool4)
conv5 = K.layers.Conv2D(name="conv5b", filters=512, **params)(conv5)
if FLAGS.use_upsampling:
up = K.layers.UpSampling2D(name="up6", size=(2, 2))(conv5)
else:
up = K.layers.Conv2DTranspose(name="transConv6", filters=256,
**trans_params)(conv5)
up6 = K.layers.concatenate([up, conv4], axis=concat_axis)
conv6 = K.layers.Conv2D(name="conv6a", filters=256, **params)(up6)
conv6 = K.layers.Conv2D(name="conv6b", filters=256, **params)(conv6)
if FLAGS.use_upsampling:
up = K.layers.UpSampling2D(name="up7", size=(2, 2))(conv6)
else:
up = K.layers.Conv2DTranspose(name="transConv7", filters=128,
**trans_params)(conv6)
up7 = K.layers.concatenate([up, conv3], axis=concat_axis)
conv7 = K.layers.Conv2D(name="conv7a", filters=128, **params)(up7)
conv7 = K.layers.Conv2D(name="conv7b", filters=128, **params)(conv7)
if FLAGS.use_upsampling:
up = K.layers.UpSampling2D(name="up8", size=(2, 2))(conv7)
else:
up = K.layers.Conv2DTranspose(name="transConv8", filters=64,
**trans_params)(conv7)
up8 = K.layers.concatenate([up, conv2], axis=concat_axis)
conv8 = K.layers.Conv2D(name="conv8a", filters=64, **params)(up8)
conv8 = K.layers.Conv2D(name="conv8b", filters=64, **params)(conv8)
if FLAGS.use_upsampling:
up = K.layers.UpSampling2D(name="up9", size=(2, 2))(conv8)
else:
up = K.layers.Conv2DTranspose(name="transConv9", filters=32,
**trans_params)(conv8)
up9 = K.layers.concatenate([up, conv1], axis=concat_axis)
conv9 = K.layers.Conv2D(name="conv9a", filters=32, **params)(up9)
conv9 = K.layers.Conv2D(name="conv9b", filters=32, **params)(conv9)
predictionMask = K.layers.Conv2D(name="Mask", filters=n_cl_out,
kernel_size=(1, 1),
data_format=data_format,
activation="sigmoid")(conv9)
"""
Define the variables, losses, and metrics
We"ll return these as a dictionary called "model"
"""
model = {}
model["input"] = imgs
model["label"] = msks
model["output"] = predictionMask
model["loss"] = dice_coef_loss(msks, predictionMask)
model["metric_dice"] = dice_coef(msks, predictionMask)
model["metric_sensitivity"] = sensitivity(msks, predictionMask)
model["metric_specificity"] = specificity(msks, predictionMask)
model["metric_dice_test"] = dice_coef(msks, predictionMask)
model["loss_test"] = dice_coef_loss(msks, predictionMask)
model["metric_sensitivity_test"] = sensitivity(msks, predictionMask)
model["metric_specificity_test"] = specificity(msks, predictionMask)
"""
Summaries for TensorBoard
"""
tf.summary.scalar("loss", model["loss"])
tf.summary.histogram("loss", model["loss"])
tf.summary.scalar("dice", model["metric_dice"])
tf.summary.histogram("dice", model["metric_dice"])
tf.summary.scalar("sensitivity", model["metric_sensitivity"])
tf.summary.histogram("sensitivity", model["metric_sensitivity"])
tf.summary.scalar("specificity", model["metric_specificity"])
tf.summary.histogram("specificity", model["metric_specificity"])
tf.summary.image("predictions", predictionMask, max_outputs=3)
tf.summary.image("ground_truth", msks, max_outputs=3)
tf.summary.image("images", imgs, max_outputs=3)
summary_op = tf.summary.merge_all()
return model | 32,522 |
def test_has_version(module):
"""Test has_version function."""
assert imported.has_version(module) | 32,523 |
def group_files(config_files, group_regex, group_alias="\\1"):
"""group input files by regular expression"""
rx = re.compile(group_regex)
for key, files in list(config_files.items()):
if isinstance(files, list):
groups = collections.defaultdict(list)
unmatched = []
for fn in sorted(files):
r = rx.search(fn)
if r is None:
unmatched.append(fn)
continue
group_name = r.expand(group_alias)
groups[group_name].append(fn)
if len(unmatched) == len(files):
pass
elif len(unmatched) == 0:
config_files[key] = [{x: y} for x, y in list(groups.items())]
else:
raise ValueError(
"input files not matching regular expression {}: {}"
.format(group_regex, str(unmatched)))
return config_files | 32,524 |
def integration_test(
name,
srcs,
deps = [],
defs = [],
native_srcs = [],
main_class = None,
enable_gwt = False,
gwt_deps = [],
closure_defines = dict(),
disable_uncompiled_test = False,
disable_compiled_test = False,
suppress = [],
j2cl_library_tags = [],
tags = [],
plugins = []):
"""Macro that turns Java files into integration test targets.
deps are Labels of j2cl_library() rules. NOT labels of
java_library() rules.
"""
# figure out the current location
java_package = get_java_package(native.package_name())
if not main_class:
main_class = java_package + ".Main"
optimized_extra_defs = [
# Turn on asserts since the integration tests rely on them.
# TODO: Enable once the option is made available.
# "--remove_j2cl_asserts=false",
# Avoid 'use strict' noise.
# "--emit_use_strict=false",
# Polyfill re-write is disabled so that size tracking only focuses on
# size issues that are actionable outside of JSCompiler or are expected
# to eventually be addressed inside of JSCompiler.
# TODO: Phantomjs needs polyfills for some features used in tests.
#"--rewrite_polyfills=false",
# Cuts optimize time nearly in half and the optimization leaks that it
# previously hid no longer exist.
"--closure_entry_point=gen.opt.Harness",
# Since integration tests are used for optimized size tracking, set
# behavior to the mode with the smallest output size which is what we
# expect to be used for customer application production releases.
# TODO: Enable once the remove_j2cl_asserts option is made available.
# "--define=jre.checks.checkLevel=MINIMAL",
]
define_flags = ["--define=%s=%s" % (k, v) for (k, v) in closure_defines.items()]
defs = defs + define_flags
j2cl_library(
name = name,
srcs = srcs,
generate_build_test = False,
deps = deps,
javacopts = JAVAC_FLAGS,
native_srcs = native_srcs,
plugins = plugins,
tags = tags + j2cl_library_tags,
js_suppress = suppress,
)
# blaze test :uncompiled_test
# blaze test :compiled_test
test_harness = """
goog.module('gen.test.Harness');
goog.setTestOnly();
var testSuite = goog.require('goog.testing.testSuite');
var Main = goog.require('%s');
testSuite({
test_Main: function() {
return Main.m_main__arrayOf_java_lang_String([]);
}
});
""" % (main_class)
_genfile("TestHarness_test.js", test_harness, tags)
closure_js_test(
name = "compiled_test",
srcs = ["TestHarness_test.js"],
deps = [
":" + name,
"@io_bazel_rules_closure//closure/library:testing",
],
defs = J2CL_TEST_DEFS + optimized_extra_defs + defs,
suppress = suppress,
testonly = True,
tags = tags,
entry_points = ["gen.test.Harness"],
) | 32,525 |
def get_current_icmp_seq():
"""See help(scapy.arch.windows.native) for more information.
Returns the current ICMP seq number."""
return GetIcmpStatistics()['stats']['icmpOutStats']['dwEchos'] | 32,526 |
def text_mocked_request(data: str, **kwargs) -> web.Request:
"""For testng purposes."""
return mocked_request(data.encode(), content_type="text/plain", **kwargs) | 32,527 |
def doCVSUpdate(topDir, root, outDir):
""" do a CVS update of the repository named root. topDir is the full path
to the directory containing root. outDir is the full path to the
directory where we will store the CVS output """
os.chdir(topDir)
print "cvs update %s" % (root)
os.system("cvs update %s >& cvs.%s.out" % (root, root))
print "\n"
# make sure that the cvs update was not aborted -- this can happen, for
# instance, if the CVSROOT was not defined
try:
cf = open("cvs.%s.out" % (root), 'r')
except IOError:
fail("ERROR: no CVS output")
else:
cvsLines = cf.readlines()
cvsFailed = 0
for line in cvsLines:
if (string.find(line, "update aborted") >= 0):
cvsFailed = 1
break
cf.close()
if (cvsFailed):
fail("ERROR: cvs update was aborted. See cvs.%s.out for details" % (root) )
shutil.copy("cvs.%s.out" % (root), outDir) | 32,528 |
def get_imu_data():
"""Returns a 2d array containing the following
* ``senses[0] = accel[x, y, z]`` for accelerometer data
* ``senses[1] = gyro[x, y, z]`` for gyroscope data
* ``senses[2] = mag[x, y, z]`` for magnetometer data
.. note:: Not all data may be aggregated depending on the IMU device connected to the robot.
"""
senses = [
[100, 50, 25],
[-100, -50, -25],
[100, -50, 25]
]
for imu in IMUs:
if isinstance(imu, LSM9DS1_I2C):
senses[0] = list(imu.acceleration)
senses[1] = list(imu.gyro)
senses[2] = list(imu.magnetic)
elif isinstance(imu, MPU6050):
senses[0] = list(imu.acceleration)
senses[1] = list(imu.gryo)
return senses | 32,529 |
def is_numeric_dtype(arr_or_dtype: List[Literal["d", "a", "b", "c"]]):
"""
usage.seaborn: 1
"""
... | 32,530 |
def test_documents_post_error1(flask_client, user8, dev_app):
"""User should be given error if no title providing when adding a new item."""
access_token = api.create_token(user8, dev_app.client_id)
response = flask_client.post(
"/api/documents", headers={"authorization": "Bearer " + access_token}, json={"title": ""}
)
json_data = response.get_json()
docs = models.Documents.query.filter_by(user_id=user8.id).all()
assert response.status_code == 400 and json_data["error"] == 62 and len(docs) == 4 | 32,531 |
def loads(self, serializer, data):
"""
Unserializes (loads) converting and loading
the given data into the current object.
:type serializer: Serializer
:param serializer: The serializer object to be used
to unserialize the given data.
:rtype: String
:return: The serialized data to be loaded.
"""
# unserializes the data (loads)
# retrieving the model
model = serializer.loads(data)
# iterates over all the dictionary items
# to load the values (from the model)
for key, value in colony.legacy.items(model):
# loads the given value in the current object
self._load_value(key, value) | 32,532 |
def parse_proc_diskstats(proc_diskstats_contents):
# type: (six.text_type) -> List[Sample]
"""
Parse /proc/net/dev contents into a list of samples.
"""
return_me = [] # type: List[Sample]
for line in proc_diskstats_contents.splitlines():
match = PROC_DISKSTATS_RE.match(line)
if not match:
continue
name = match.group(1)
read_sectors = int(match.group(2))
write_sectors = int(match.group(3))
if read_sectors == 0 and write_sectors == 0:
continue
# Multiply by 512 to get bytes from sectors:
# https://stackoverflow.com/a/38136179/473672
return_me.append(Sample(name + " read", read_sectors * 512))
return_me.append(Sample(name + " write", write_sectors * 512))
return return_me | 32,533 |
def test_pivot_with_sphere_fit():
"""Tests pivot calibration with sphere fitting"""
config = {"method" : "sphere_fitting"}
file_names = glob('tests/data/PivotCalibration/*')
arrays = [np.loadtxt(f) for f in file_names]
matrices = np.concatenate(arrays)
number_of_matrices = int(matrices.size/16)
matrices = matrices.reshape((number_of_matrices, 4, 4))
_, _, residual_error = p.pivot_calibration(matrices, config)
#do a regression test on the residual error
assert round(residual_error, 3) == 2.346 | 32,534 |
def test_kwargs_unique(rotation):
""" return_index and return_inverse edge cases"""
rotation.unique(return_index=True, return_inverse=True)
rotation.unique(return_index=True, return_inverse=False)
rotation.unique(return_index=False, return_inverse=True) | 32,535 |
def normalize(x):
"""Normalize a vector or a set of vectors.
Arguments:
* x: a 1D array (vector) or a 2D array, where each row is a vector.
Returns:
* y: normalized copies of the original vector(s).
"""
if x.ndim == 1:
return x / np.sqrt(np.sum(x ** 2))
elif x.ndim == 2:
return x / np.sqrt(np.sum(x ** 2, axis=1)).reshape((-1, 1)) | 32,536 |
def compute_perrakis_estimate(marginal_sample, lnlikefunc, lnpriorfunc,
lnlikeargs=(), lnpriorargs=(),
densityestimation='histogram', **kwargs):
"""
Computes the Perrakis estimate of the bayesian evidence.
The estimation is based on n marginal posterior samples
(indexed by s, with s = 0, ..., n-1).
:param array marginal_sample:
A sample from the parameter marginal posterior distribution.
Dimensions are (n x k), where k is the number of parameters.
:param callable lnlikefunc:
Function to compute ln(likelihood) on the marginal samples.
:param callable lnpriorfunc:
Function to compute ln(prior density) on the marginal samples.
:param tuple lnlikeargs:
Extra arguments passed to the likelihood function.
:param tuple lnpriorargs:
Extra arguments passed to the lnprior function.
:param str densityestimation:
The method used to estimate the marginal posterior density of each
model parameter ("normal", "kde", or "histogram").
Other parameters
----------------
:param kwargs:
Additional arguments passed to estimate_density function.
:return:
References
----------
Perrakis et al. (2014; arXiv:1311.0674)
"""
if not isinstance(marginal_sample, np.ndarray):
marginal_sample = np.array(marginal_sample)
number_parameters = marginal_sample.shape[1]
##
# Estimate marginal posterior density for each parameter.
log_marginal_posterior_density = np.zeros(marginal_sample.shape)
for parameter_index in range(number_parameters):
# Extract samples for this parameter.
x = marginal_sample[:, parameter_index]
# Estimate density with method "densityestimation".
log_marginal_posterior_density[:, parameter_index] = \
estimate_logdensity(x, method=densityestimation, **kwargs)
# Compute produt of marginal posterior densities for all parameters
log_marginal_densities = log_marginal_posterior_density.sum(axis=1)
##
# Compute log likelihood in marginal sample.
log_likelihood = lnlikefunc(marginal_sample, *lnlikeargs)
# Compute weights (i.e. prior over marginal density)
w = weight(marginal_sample, lnpriorfunc, lnpriorargs,
log_marginal_densities)
# Mask values with zero likelihood (a problem in lnlike)
cond = log_likelihood != 0
# Use identity for summation
# http://en.wikipedia.org/wiki/List_of_logarithmic_identities#Summation.2Fsubtraction
# ln(sum(x)) = ln(x[0]) + ln(1 + sum( exp( ln(x[1:]) - ln(x[0]) ) ) )
# log_summands = log_likelihood[cond] + np.log(prior_probability[cond])
# - np.log(marginal_densities[cond])
perr = lib.log_sum(w[cond] + log_likelihood[cond]) - log(len(w[cond]))
return perr | 32,537 |
def update_configuration_set_event_destination(ConfigurationSetName=None, EventDestinationName=None, EventDestination=None):
"""
Update the configuration of an event destination for a configuration set.
In Amazon Pinpoint, events include message sends, deliveries, opens, clicks, bounces, and complaints. Event destinations are places that you can send information about these events to. For example, you can send event data to Amazon SNS to receive notifications when you receive bounces or complaints, or you can use Amazon Kinesis Data Firehose to stream data to Amazon S3 for long-term storage.
See also: AWS API Documentation
Exceptions
:example: response = client.update_configuration_set_event_destination(
ConfigurationSetName='string',
EventDestinationName='string',
EventDestination={
'Enabled': True|False,
'MatchingEventTypes': [
'SEND'|'REJECT'|'BOUNCE'|'COMPLAINT'|'DELIVERY'|'OPEN'|'CLICK'|'RENDERING_FAILURE',
],
'KinesisFirehoseDestination': {
'IamRoleArn': 'string',
'DeliveryStreamArn': 'string'
},
'CloudWatchDestination': {
'DimensionConfigurations': [
{
'DimensionName': 'string',
'DimensionValueSource': 'MESSAGE_TAG'|'EMAIL_HEADER'|'LINK_TAG',
'DefaultDimensionValue': 'string'
},
]
},
'SnsDestination': {
'TopicArn': 'string'
},
'PinpointDestination': {
'ApplicationArn': 'string'
}
}
)
:type ConfigurationSetName: string
:param ConfigurationSetName: [REQUIRED]\nThe name of the configuration set that contains the event destination that you want to modify.\n
:type EventDestinationName: string
:param EventDestinationName: [REQUIRED]\nThe name of the event destination that you want to modify.\n
:type EventDestination: dict
:param EventDestination: [REQUIRED]\nAn object that defines the event destination.\n\nEnabled (boolean) --If true , the event destination is enabled. When the event destination is enabled, the specified event types are sent to the destinations in this EventDestinationDefinition .\nIf false , the event destination is disabled. When the event destination is disabled, events aren\'t sent to the specified destinations.\n\nMatchingEventTypes (list) --An array that specifies which events Amazon Pinpoint should send to the destinations in this EventDestinationDefinition .\n\n(string) --An email sending event type. For example, email sends, opens, and bounces are all email events.\n\n\n\nKinesisFirehoseDestination (dict) --An object that defines an Amazon Kinesis Data Firehose destination for email events. You can use Amazon Kinesis Data Firehose to stream data to other services, such as Amazon S3 and Amazon Redshift.\n\nIamRoleArn (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the IAM role that Amazon Pinpoint uses when sending email events to the Amazon Kinesis Data Firehose stream.\n\nDeliveryStreamArn (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the Amazon Kinesis Data Firehose stream that Amazon Pinpoint sends email events to.\n\n\n\nCloudWatchDestination (dict) --An object that defines an Amazon CloudWatch destination for email events. You can use Amazon CloudWatch to monitor and gain insights on your email sending metrics.\n\nDimensionConfigurations (list) -- [REQUIRED]An array of objects that define the dimensions to use when you send email events to Amazon CloudWatch.\n\n(dict) --An object that defines the dimension configuration to use when you send Amazon Pinpoint email events to Amazon CloudWatch.\n\nDimensionName (string) -- [REQUIRED]The name of an Amazon CloudWatch dimension associated with an email sending metric. The name has to meet the following criteria:\n\nIt can only contain ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).\nIt can contain no more than 256 characters.\n\n\nDimensionValueSource (string) -- [REQUIRED]The location where Amazon Pinpoint finds the value of a dimension to publish to Amazon CloudWatch. If you want Amazon Pinpoint to use the message tags that you specify using an X-SES-MESSAGE-TAGS header or a parameter to the SendEmail/SendRawEmail API, choose messageTag . If you want Amazon Pinpoint to use your own email headers, choose emailHeader . If you want Amazon Pinpoint to use link tags, choose linkTags .\n\nDefaultDimensionValue (string) -- [REQUIRED]The default value of the dimension that is published to Amazon CloudWatch if you don\'t provide the value of the dimension when you send an email. This value has to meet the following criteria:\n\nIt can only contain ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).\nIt can contain no more than 256 characters.\n\n\n\n\n\n\n\n\nSnsDestination (dict) --An object that defines an Amazon SNS destination for email events. You can use Amazon SNS to send notification when certain email events occur.\n\nTopicArn (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the Amazon SNS topic that you want to publish email events to. For more information about Amazon SNS topics, see the Amazon SNS Developer Guide .\n\n\n\nPinpointDestination (dict) --An object that defines a Amazon Pinpoint destination for email events. You can use Amazon Pinpoint events to create attributes in Amazon Pinpoint projects. You can use these attributes to create segments for your campaigns.\n\nApplicationArn (string) --The Amazon Resource Name (ARN) of the Amazon Pinpoint project that you want to send email events to.\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
An HTTP 200 response if the request succeeds, or an error message if the request fails.
Exceptions
PinpointEmail.Client.exceptions.NotFoundException
PinpointEmail.Client.exceptions.TooManyRequestsException
PinpointEmail.Client.exceptions.BadRequestException
:return: {}
:returns:
PinpointEmail.Client.exceptions.NotFoundException
PinpointEmail.Client.exceptions.TooManyRequestsException
PinpointEmail.Client.exceptions.BadRequestException
"""
pass | 32,538 |
def main():
"""
ALGORITHM : find out the index of the ciphered string, and convert into the deciphered index!
"""
rule = int(input('Secret number: '))
code = str(input('What\'s the ciphered string?'))
ans = decipher(rule, code)
print('The deciphered string is: ' + ans) | 32,539 |
def extract_features(extractor, loader, device, feature_transform=None):
"""Iterable that extracts features from images and optionally transforms them"""
for batch_idx, data in enumerate(loader):
with torch.no_grad():
data = data.to(device).float()
features = extractor(data)
if feature_transform:
features = feature_transform(features)
yield features | 32,540 |
def postNewProfile(profile : Profile):
"""Gets all profile details of user with given profile_email
Parameters:
str: profile_email
Returns:
Json with Profile details """
profile_email = profile.email
profile_query = collection.find({"email":profile_email})
profile_query = [item for item in profile_query]
if not profile_query :
collection.save(dict(profile))
return True
return False | 32,541 |
def store_incidents_for_mapping(incidents, integration_context):
""" Stores ready incidents in integration context to allow the mapping to pull the incidents from the instance.
We store at most 20 incidents.
Args:
incidents (list): The incidents
integration_context (dict): The integration context
"""
if incidents:
integration_context[INCIDENTS] = incidents[:20] | 32,542 |
def main(is_semi_supervised, trial_num):
"""EM for Gaussian Mixture Models (unsupervised and semi-supervised)"""
print('Running {} EM algorithm...'.format('semi-supervised' if is_semi_supervised else 'unsupervised'))
# Load dataset
train_path = os.path.join('.', 'train.csv')
x_all, z_all = load_gmm_dataset(train_path)
# Split into labeled and unlabeled examples
labeled_idxs = (z_all != UNLABELED).squeeze()
x = x_all[~labeled_idxs, :] # Unlabeled examples
g = mixture.GaussianMixture(n_components=K)
g.fit(x)
plot_gmm_preds(x, g.predict(x), is_semi_supervised, plot_id=trial_num) | 32,543 |
def buy():
"""Buy shares of stock."""
# if user reached route via POST (as by submitting a form via POST)
if request.method == "POST":
# ensure SYMBOL and Share is submitted
if request.form.get("symbol") == "" or request.form.get("share") == "":
return apology("Please Enter SYMBOL/SHARE CORRECTLY!")
# ensure if stock exists
elif lookup(request.form.get("symbol")) == None:
return apology("SYMBOL DOES NOT EXIST!")
# ensure if user input for share is positive
elif int(request.form.get("share")) < 0:
return apology("Cannot Buy Negative Shares Bruu!")
# if everything is ok then ..
# retrieve stock
stock = lookup(request.form.get("symbol"))
# stock price
stock_price = stock["price"]
# user cash
user_cash = db.execute("SELECT cash FROM users WHERE id=:id", id = session["user_id"])
user_cash = float(user_cash[0]["cash"])
# ensure appropriate cash is available for purchase
nShare = 0
for i in request.form.get("share"):
nShare = nShare + float(i)
if not user_cash - stock_price * nShare >= 0:
return apology("YOU DO NOT HAVE ENOUGH CASH")
else:
# check if stock already exists in purchase table, if yes then update the no. of stocks
rows = db.execute("SELECT stockname FROM purchase WHERE user_id=:user_id AND stockname=:stockname", user_id=session["user_id"], stockname=request.form.get("symbol"))
if rows:
db.execute("UPDATE purchase SET nstocks = nstocks + :nstocks WHERE stockname = :stockname", nstocks=nShare, stockname=stock["symbol"])
else:
result = db.execute("INSERT INTO purchase (user_id, stockname, nstocks, price) VALUES (:user_id, :stockname, :nstocks, :price)",
user_id=session["user_id"], stockname=stock["symbol"], nstocks=nShare, price=stock_price)
# bought
by = "BUY"
# current time
c_time = str(datetime.utcnow())
# insert data in history table
db.execute("INSERT INTO history (user_id, stockname, nstocks, price, time, ty_purchase) VALUES (:user_id, :stockname, :nstocks, :price, :time, :b)", user_id=session["user_id"], stockname=stock["symbol"], nstocks=nShare, price=stock_price, time=c_time, b= by)
# update the users cash
db.execute("UPDATE users SET cash = cash - :tcash WHERE id=:user_id", tcash=stock_price*nShare,user_id=session["user_id"])
return redirect(url_for("index"))
# if user reached route via GET (as by submitting a form via GET)
else:
return render_template("buy.html")
return apology("TODO") | 32,544 |
def get_templates() -> List[dict]:
"""
Gets a list of Templates that the active client can access
"""
client = get_active_notification_client()
if not client:
raise NotificationClientNotFound()
r = _get_templates(client=client)
return r | 32,545 |
def svn_mergeinfo_intersect2(*args):
"""
svn_mergeinfo_intersect2(svn_mergeinfo_t mergeinfo1, svn_mergeinfo_t mergeinfo2,
svn_boolean_t consider_inheritance, apr_pool_t result_pool,
apr_pool_t scratch_pool) -> svn_error_t
"""
return _core.svn_mergeinfo_intersect2(*args) | 32,546 |
def credit(args):
"""
credit
自动访友获取信用点
"""
CreditSolver().run() | 32,547 |
def _update_images():
"""Update all docker images in this list, running a few in parallel."""
any_new = False
def comment(name, new):
nonlocal any_new
if new:
log.info(f"Downloaded new Docker image for {name} - {docker.image_size(name)}")
else:
log.debug(f"Docker image is up to date for {name} - {docker.image_size(name)}")
pass
any_new |= new
t0 = time()
log.info("Downloading docker images...")
override_images = set(config._image(i) for i in config.image_keys)
with multiprocessing.Pool(6) as p:
for name, new in p.imap_unordered(_update_image, override_images):
comment(name, new)
images = set(all_images()) | set(config.images) | override_images
with multiprocessing.Pool(6) as p:
for name, new in p.imap_unordered(_update_image, images):
comment(name, new)
log.info(f"All {len(images)} images are up to date, took {time()-t0:.02f}s")
return any_new | 32,548 |
def conv_backward(dZ, A_prev, W, b, padding="same", stride=(1, 1)):
"""
Performs back propagation over a convolutional layer of a neural network
dZ is a numpy.ndarray of shape (m, h_new, w_new, c_new) containing the
partial derivatives with respect to the unactivated output of the
convolutional layer
m is the number of examples
h_new is the height of the output
w_new is the width of the output
c_new is the number of channels in the output
A_prev is a numpy.ndarray of shape (m, h_prev, w_prev, c_prev) containing
the output of the previous layer
m is the number of examples
h_prev is the height of the previous layer
w_prev is the width of the previous layer
c_prev is the number of channels in the previous layer
W is a numpy.ndarray of shape (kh, kw, c_prev, c_new) containing the
kernels for the convolution
kh is the filter height
kw is the filter width
c_prev is the number of channels in the previous layer
c_new is the number of channels in the output
b is a numpy.ndarray of shape (1, 1, 1, c_new) containing the biases
applied to the convolution
padding is a string that is either same or valid, indicating the type of
padding used
stride is a tuple of (sh, sw) containing the strides for the convolution
sh is the stride for the height
sw is the stride for the width
Returns: the partial derivatives with respect to the previous layer
(dA_prev), the kernels (dW), and the biases (db), respectively
"""
sh, sw = stride
kh, kw, c, c_new = W.shape
m, h_prev, w_prev, c_prev = A_prev.shape
d, h_new, w_new, _ = dZ.shape
if padding == 'same':
padw = int((((w_prev - 1) * sw + kw - w_prev) / 2) + 1)
padh = int((((h_prev - 1) * sh + kh - h_prev) / 2) + 1)
else:
padh, padw = (0, 0)
A_prev = np.pad(A_prev, ((0,), (padh,), (padw,), (0,)), constant_values=0,
mode='constant')
dW = np.zeros(W.shape)
dA = np.zeros(A_prev.shape)
db = np.sum(dZ, axis=(0, 1, 2), keepdims=True)
for i in range(m):
for j in range(h_new):
for k in range(w_new):
jsh = j * sh
ksw = k * sw
for ll in range(c_new):
dW[:, :, :, ll] += A_prev[i, jsh: jsh + kh,
ksw: ksw + kw, :] * \
dZ[i, j, k, ll]
dA[i, jsh: jsh + kh, ksw: ksw + kw, :] += \
dZ[i, j, k, ll] * W[:, :, :, ll]
if padding == 'same':
dA = dA[:, padh: -padh, padw: -padw, :]
return dA, dW, db | 32,549 |
def clone(repo, user, site, parent=None):
"""
Clone a repo from the requested site and user.
:param repo: The name of the repo.
:param user: The name of the user.
:param site: The site to download from.
:param parent: The parent folder where the repo will be cloned. By default,
this is the current working directory.
:return: The full path to the root directory of the cloned repo.
"""
if parent is None:
parent = os.getcwd()
elif not os.path.isdir(parent):
raise NotADirectoryError(parent)
path = os.path.join(parent, repo)
if os.path.isdir(path):
raise IsADirectoryError(path)
site = site.lower()
if site not in URL_TEMPLATES:
base = os.path.splitext(site)[0]
if base not in URL_TEMPLATES:
raise KeyError(site)
site = base
url = URL_TEMPLATES[site].format(user=user, repo=repo)
# TODO: Download to a temporary folder first, then rename.
downloaded_path = clone_from_url(url, parent)
os.rename(downloaded_path, path)
if not os.path.isdir(path):
raise NotADirectoryError(path)
return path | 32,550 |
def print_board(board):
"""
This will print the whole sudoku board
:param board: The actual board
:return: print the board
"""
# len(board) = 9
# this prints the horizontal line after every three rows
for i in range(len(board)):
if i % 3 == 0 and i != 0:
print("-----------------------")
# this prints the vertical line after every three columns
for j in range(len(board[0])):
if j % 3 == 0 and j != 0:
print(" | ", end="")
# from 0 to 8 total is 9
# here we actually print the numbers in board
if j == 8:
print(board[i][j])
else:
print(str(board[i][j]) + " ", end="") | 32,551 |
def get_final_text(pred_text, orig_text, do_lower_case):
"""Project the tokenized prediction back to the original text."""
# When we created the data, we kept track of the alignment between original
# (whitespace tokenized) tokens and our WordPiece tokenized tokens. So
# now `orig_text` contains the span of our original text corresponding to the
# span that we predicted.
#
# However, `orig_text` may contain extra characters that we don't want in
# our prediction.
#
# For example, let's say:
# pred_text = steve smith
# orig_text = Steve Smith's
#
# We don't want to return `orig_text` because it contains the extra "'s".
#
# We don't want to return `pred_text` because it's already been normalized
# (the SQuAD eval script also does punctuation stripping/lower casing but
# our tokenizer does additional normalization like stripping accent
# characters).
#
# What we really want to return is "Steve Smith".
#
# Therefore, we have to apply a semi-complicated alignment heruistic between
# `pred_text` and `orig_text` to get a character-to-charcter alignment. This
# can fail in certain cases in which case we just return `orig_text`.
def _strip_spaces(text):
ns_chars = []
ns_to_s_map = collections.OrderedDict()
for (i, c) in enumerate(text):
if c == " ":
continue
ns_to_s_map[len(ns_chars)] = i
ns_chars.append(c)
ns_text = "".join(ns_chars)
return (ns_text, ns_to_s_map)
# We first tokenize `orig_text`, strip whitespace from the result
# and `pred_text`, and check if they are the same length. If they are
# NOT the same length, the heuristic has failed. If they are the same
# length, we assume the characters are one-to-one aligned.
tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
tok_text = " ".join(tokenizer.tokenize(orig_text))
start_position = tok_text.find(pred_text)
if start_position == -1:
return orig_text
end_position = start_position + len(pred_text) - 1
(orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)
(tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)
if len(orig_ns_text) != len(tok_ns_text):
return orig_text
# We then project the characters in `pred_text` back to `orig_text` using
# the character-to-character alignment.
tok_s_to_ns_map = {}
for (i, tok_index) in tok_ns_to_s_map.items():
tok_s_to_ns_map[tok_index] = i
orig_start_position = None
if start_position in tok_s_to_ns_map:
ns_start_position = tok_s_to_ns_map[start_position]
if ns_start_position in orig_ns_to_s_map:
orig_start_position = orig_ns_to_s_map[ns_start_position]
if orig_start_position is None:
return orig_text
orig_end_position = None
if end_position in tok_s_to_ns_map:
ns_end_position = tok_s_to_ns_map[end_position]
if ns_end_position in orig_ns_to_s_map:
orig_end_position = orig_ns_to_s_map[ns_end_position]
if orig_end_position is None:
return orig_text
output_text = orig_text[orig_start_position:(orig_end_position + 1)]
return output_text | 32,552 |
def test_pushing(reference, summary, numbers):
"""
Проверка добавления элементов
"""
m = MovingAverage(window=4)
for ref, summ, number in zip(reference, summary, numbers):
m.push(number)
assert approx(m.avg) == ref
assert approx(m.sum) == summ | 32,553 |
def _test_montage_trans(raw, montage, pos_test, space='fsaverage',
coord_frame='auto', unit='auto'):
"""Test if a montage is transformed correctly."""
_set_montage_no_trans(raw, montage)
trans = template_to_head(
raw.info, space, coord_frame=coord_frame, unit=unit)[1]
montage_test = raw.get_montage()
montage_test.apply_trans(trans)
assert_almost_equal(
pos_test,
np.array(list(montage_test.get_positions()['ch_pos'].values()))) | 32,554 |
def createContext(data, id=None, keyTransform=None, removeNull=False):
"""Receives a dict with flattened key values, and converts them into nested dicts
:type data: ``dict`` or ``list``
:param data: The data to be added to the context (required)
:type id: ``str``
:keyword id: The ID of the context entry
:type keyTransform: ``function``
:keyword keyTransform: A formatting function for the markdown table headers
:type removeNull: ``bool``
:keyword removeNull: True if empty columns should be removed, false otherwise
:return: The converted context list
:rtype: ``list``
"""
if isinstance(data, (list, tuple)):
return [createContextSingle(d, id, keyTransform, removeNull) for d in data]
else:
return createContextSingle(data, id, keyTransform, removeNull) | 32,555 |
def default_xonshrc(env) -> "tuple[str, ...]":
"""
``['$XONSH_SYS_CONFIG_DIR/xonshrc', '$XONSH_CONFIG_DIR/xonsh/rc.xsh', '~/.xonshrc']``
"""
dxrc = (
os.path.join(xonsh_sys_config_dir(env), "xonshrc"),
os.path.join(xonsh_config_dir(env), "rc.xsh"),
os.path.expanduser("~/.xonshrc"),
)
# Check if old config file exists and issue warning
old_config_filename = xonshconfig(env)
if os.path.isfile(old_config_filename):
print(
"WARNING! old style configuration ("
+ old_config_filename
+ ") is no longer supported. "
+ "Please migrate to xonshrc."
)
return dxrc | 32,556 |
def move_existing_application():
"""Check if the sidewalk-webpage directory exists already. If so, change the name of the directory"""
print "Checking if the directory `sidewalk-webpage` already exists"
command = "ls %s" % sidewalk_git_directory
p = subprocess.Popen(command.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
ls_output = stdout.split("\n")
if "sidewalk-webpage" in ls_output:
print "Changing the directory name from `sidewalk-webpage` to `_sidewalk-webpage`"
command = "mv %s %s" % (sidewalk_app_directory,
sidewalk_git_directory + "/_sidewalk-webpage")
run_shell_command(command)
else:
# Directory doesn't exist create one
print "Directory `sidewalk-webpage` does not exist"
# Create a new sidewalk-webpage folder
command = "mkdir " + sidewalk_app_directory
run_shell_command(command)
print "Directory `sidewalk-webpage` created" | 32,557 |
def haversine(phi1, lambda1, phi2, lambda2):
"""
calculate angular great circle distance with haversine formula
see parameters in spherical_law_of_cosines
"""
d_phi = phi2 - phi1
d_lambda = lambda2 - lambda1
a = math.pow(math.sin(d_phi / 2), 2) + \
math.cos(phi1) * math.cos(phi2) * math.pow(math.sin(d_lambda / 2), 2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))
return c | 32,558 |
def get_backup_start_timestamp(bag_name):
"""
Input: Fisrt bag name
Output: datatime object
"""
info_dict = yaml.load(Bag(bag_name, 'r')._get_yaml_info())
start_timestamp = info_dict.get("start", None)
start_datetime = None
if start_timestamp is None:
print("No start time info in bag, try to retrieve the start time by parsing bag name.")
start_datetime = parse_backup_start_timestamp(bag_name)
else:
start_datetime = datetime.datetime.fromtimestamp(start_timestamp)
# print("info_dict = \n%s" % str(info_dict))
# print('type(info_dict["start"]) = %s' % type(info_dict["start"]))
# print(info_dict["start"])
return start_datetime | 32,559 |
def ResidualSlopesSequence2ResidualPhase(Gradients,S2M,M2V,name='residual_phase_cube.fits',\
path='.',binning=40):
"""
Same functions as ResidualSlopes2ResidualPhase but applies it to a sequence
of slopes (gradients) instead of a single vector.
Input:
- Gradients: a sequence of vectors of 2480 elements representing the slopes measured
by the WFS
- S2M: the slopes to modes matrix (shape (988, 2480))
- M2V: the modes to voltage matrix (shape (1377, 988))
- name: the name of the file to save
- path: path where the file is saved
- binning: the binning factor to apply (40 by default). If no binning
is desired, use binning=1
"""
if Gradients.ndim != 2 and Gradients.shape[1] != 2480:
raise IOError('The input vector must be a 2D array of shape (nframes,2480) (currently',Gradients.shape,')')
if S2M.ndim!=2 or S2M.shape[1]!=2480:
raise IOError('The input S2M matrix must have a shape (988, 2480) (currently',S2M.shape,')')
if M2V.ndim!=2 or M2V.shape[0]!=1377:
raise IOError('The input M2V matrix must have a shape (1377, 988) (currently',M2V.shape,')')
IMF = fits.getdata(os.path.join(path_sparta,'SAXO_DM_IFM.fits')) #shape (1377, 240, 240)
# The matrix needs to be normalised to allow conversion into optical wavefront errors:
# influence matrix normalization = defoc meca in rad @ 632 nm
rad_632_to_nm_opt = 1. / 2. / np.pi * 632 * 2
IMF = IMF * rad_632_to_nm_opt
IMF = IMF.reshape(1377, 240*240).T # shape (57600, 1377)
nframes = Gradients.shape[0]
mode = Gradients @ S2M.T
volt = mode @ M2V.T
res_turbulence = (volt @ IMF.T).reshape((nframes, 240, 240))
if binning ==1:
nframes = Gradients.shape[0]
slopes = Gradients
elif binning>1:
slopes = np.ndarray((int(Gradients.shape[0]/binning),2480),dtype=float)
for i in range(int(nframes/binning)):
slopes[i,:] = np.mean(Gradients[i*binning:(i+1)*binning,:],axis=0)
nframes = int(Gradients.shape[0]/binning)
else:
raise IOError('The binning factor must be an integer greater or equal to 1.')
mode = slopes @ S2M.T
volt = mode @ M2V.T
res_turbulence = (volt @ IMF.T).reshape((nframes, 240, 240))
fits.writeto(os.path.join(path,name), res_turbulence, overwrite=True)
return res_turbulence | 32,560 |
def get_mask_areas(masks: np.ndarray) -> np.ndarray:
"""Get mask areas from the compressed mask map."""
# 0 for background
ann_ids = np.sort(np.unique(masks))[1:]
areas = np.zeros((len(ann_ids)))
for i, ann_id in enumerate(ann_ids):
areas[i] = np.count_nonzero(ann_id == masks)
return areas | 32,561 |
def s3_analysis(conc=None):
"""
Long running job where more information is collected.
Use S3 get_object_list_v2 to get a list of the objects
"""
bucket_stats = s3_bucket_stats()
update_s3_gauges(bucket_stats)
commit_s3_gauges() | 32,562 |
def run(agent, root_dir, restore_ckpt):
"""Main entrypoint for running and generating visualizations.
Args:
agent: str, agent type to use.
root_dir: str, root directory where files will be stored.
restore_ckpt: str, path to the checkpoint to reload.
"""
tf.compat.v1.reset_default_graph()
config = """
atari_lib.create_atari_environment.game_name = 'SpaceInvaders'
WrappedReplayBuffer.replay_capacity = 300
"""
base_dir = os.path.join(root_dir, 'results', agent)
gin.parse_config(config)
runner = create_runner(base_dir, restore_ckpt, agent)
runner.run_full_experiment(os.path.join(base_dir, 'images')) | 32,563 |
def cursor_from_image(image):
"""
Take a valid cursor image and create a mouse cursor.
"""
colors = {(0,0,0,255) : "X",
(255,255,255,255) : "."}
rect = image.get_rect()
icon_string = []
for j in range(rect.height):
this_row = []
for i in range(rect.width):
pixel = tuple(image.get_at((i,j)))
this_row.append(colors.get(pixel, " "))
icon_string.append("".join(this_row))
return icon_string | 32,564 |
def h_matrix(jac, p, lamb, method='kotre', W=None):
"""
JAC method of dynamic EIT solver:
H = (J.T*J + lamb*R)^(-1) * J.T
Parameters
----------
jac: NDArray
Jacobian
p, lamb: float
regularization parameters
method: str, optional
regularization method
Returns
-------
H: NDArray
pseudo-inverse matrix of JAC
"""
if W is None:
j_w_j = np.dot(jac.transpose(), jac)
else:
j_w_j = multi_dot([jac.transpose(), W, jac])
if method == 'kotre':
# see adler-dai-lionheart-2007
# p=0 : noise distribute on the boundary ('dgn')
# p=0.5 : noise distribute on the middle
# p=1 : noise distribute on the center ('lm')
r_mat = np.diag(np.diag(j_w_j)) ** p
elif method == 'lm':
# Marquardt–Levenberg, 'lm' for short
# or can be called NOSER, DLS
r_mat = np.diag(np.diag(j_w_j))
else:
# Damped Gauss Newton, 'dgn' for short
r_mat = np.eye(jac.shape[1])
# build H
h_mat = np.dot(la.inv(j_w_j + lamb * r_mat), jac.transpose())
return h_mat | 32,565 |
def _get_flavors_metadata_ui_converters_from_configuration():
"""Get flavor metadata ui converters from flavor mapping config dir."""
flavors_metadata_ui_converters = {}
configs = util.load_configs(setting.FLAVOR_MAPPING_DIR)
for config in configs:
adapter_name = config['ADAPTER']
flavor_name = config['FLAVOR']
flavors_metadata_ui_converters.setdefault(
adapter_name, {}
)[flavor_name] = config.get('CONFIG_MAPPING', {})
adapters = adapter_api.ADAPTERS
parents = {}
for adapter_name, adapter in adapters.items():
parent = adapter.get('parent', None)
parents[adapter_name] = parent
for adapter_name, adapter in adapters.items():
flavors_metadata_ui_converters[adapter_name] = (
util.recursive_merge_dict(
adapter_name, flavors_metadata_ui_converters, parents
)
)
return flavors_metadata_ui_converters | 32,566 |
def outermost_scope_from_subgraph(graph, subgraph, scope_dict=None):
"""
Returns the outermost scope of a subgraph.
If the subgraph is not connected, there might be several
scopes that are locally outermost. In this case, it
throws an Exception.
"""
if scope_dict is None:
scope_dict = graph.scope_dict()
scopes = set()
for element in subgraph:
scopes.add(scope_dict[element])
# usual case: Root of scope tree is in subgraph,
# return None (toplevel scope)
if None in scopes:
return None
toplevel_candidates = set()
for scope in scopes:
# search the one whose parent is not in scopes
# that must be the top level one
current_scope = scope_dict[scope]
while current_scope and current_scope not in scopes:
current_scope = scope_dict[current_scope]
if current_scope is None:
toplevel_candidates.add(scope)
if len(toplevel_candidates) != 1:
raise TypeError("There are several locally top-level nodes. "
"Please check your subgraph and see to it "
"being connected.")
else:
return toplevel_candidates.pop() | 32,567 |
def calc_entropy_ew(molecule, temp):
"""
Expoential well entropy
:param molecule:
:param temp:
:param a:
:param k:
:return:
"""
mass = molecule.mass / Constants.amu_to_kg * Constants.amu_to_au
a = molecule.ew_a_inv_ang * Constants.inverse_ang_inverse_au
k = molecule.ew_k_kcal * Constants.kcal_mol_to_au
q_t = _q_t_ew(molecule, temp)
beta = 1.0 / (Constants.kb_au * temp)
cap_lambda = ((2.0 * mass * np.pi) / (beta * Constants.h_au ** 2)) ** 1.5
def integrand(r, beta, a, b):
return r ** 2 * np.exp(-beta * a * (np.exp(b * r) - 1.0) + b * r)
integral = integrate.quad(integrand, 0.0, 10.0, args=(beta, k, a))[0]
term_4 = 4.0 * np.pi * (k * beta * cap_lambda / q_t) * integral
return Constants.r * (1.5 - k * beta + np.log(q_t) + term_4) | 32,568 |
def registerCreatorDataCallbackURL():
"""
params:
creatorID
dataCallbackURL
"""
try:
global rmlEngine
rawRequest = request.POST.dict
for rawKey in rawRequest.keys():
keyVal = rawKey
jsonPayload = json.loads(keyVal)
#ownerID
try:
creatorID = jsonPayload["creatorID"]
creatorUUID = uuid.UUID(creatorID)
except KeyError:
raise Exceptions.MissingPOSTArgumentError("creatorID parameter missing from POST request.")
except Exception as e:
raise e
try:
ownerEntityType = rmlEngine.api.getEntityMemeType(creatorUUID)
except Exception as e:
raise Exceptions.NoSuchEntityError("creatorID parameter value %s does not exist." %creatorID)
if ownerEntityType != "Agent.Creator":
raise Exceptions.TemplatePathError("creatorID parameter value %s does not refer to a valid data creator" %creatorID)
#stimulusCallbackURL
try:
dataCallbackURL = jsonPayload["dataCallbackURL"]
except KeyError:
raise Exceptions.MissingPOSTArgumentError("dataCallbackURL parameter missing from POST request.")
except Exception as e:
raise e
try:
rmlEngine.api.setEntityPropertyValue(creatorUUID, "dataCallbackURL", dataCallbackURL)
except Exception as e:
raise Exceptions.MismatchedPOSTParametersError("Error while assigning stimulusCallbackURL value %s to entity %s " %(dataCallbackURL, creatorID))
returnStr = "Assigned dataCallbackURL %s to owner %s " %(dataCallbackURL, creatorID)
response.body = json.dumps({"status": returnStr})
response.status = 200
return response
except Exception as unusedE:
fullerror = sys.exc_info()
errorID = str(fullerror[0])
errorMsg = str(fullerror[1])
returnStr = "Failed to assign dataCallbackURL to new Agent.Creator Entity. %s, %s" %(errorID, errorMsg)
response.body = json.dumps({"status": returnStr})
response.status = 500
return response | 32,569 |
def csm(A, B):
"""
Calculate Cosine similarity measure of distance between two vectors `A` and `B`.
Parameters
-----------
A : ndarray
First vector containing values
B : ndarray
Second vector containing values
Returns
--------
float
distance value between two vectors
Examples
---------
>>> distance = csm(A, B)
"""
numerator = np.sum(A * B)
denominator = (np.sqrt(np.sum(A))) * (np.sqrt(np.sum(B)))
if denominator == 0:
denominator = 1
return numerator / denominator | 32,570 |
def RationalsModP(p):
"""Assume p is a prime."""
class RationalModP(_Modular):
"""A rational modulo p
The rational is stored with numerator and denominator relatively prime.
This is done to prevent growth in numerator or denominator which causes
overflow and makes math harder.
"""
def __init__(self, m, n=1):
"""Constructor for rationals.
If denominator is not specified, set it to 1.
"""
try:
# This is awkward constructor overloading
if isinstance(m, bytes):
num = int.from_bytes(m[:32], 'big')
den = int.from_bytes(m[32:], 'big')
else:
num = int(m) % RationalModP.p
den = int(n) % RationalModP.p
common = gcd(num, den)
# Handle case with 0
if common == 0:
self.m, self.n = num, den
else:
self.m = num // common
self.n = den // common
except:
raise TypeError("Can't cast type %s to %s in __init__" %
(type(n).__name__, type(self).__name__))
self.field = RationalModP
@typecheck
def __add__(self, other):
num = (self.m * other.n + other.m * self.n) % RationalModP.p
den = (self.n * other.n) % RationalModP.p
common = gcd(num, den)
return RationalModP(num // common, den // common)
@typecheck
def __sub__(self, other):
num = (self.m * other.n - other.m * self.n) % RationalModP.p
den = (self.n * other.n) % RationalModP.p
common = gcd(num, den)
return RationalModP(num // common, den // common)
@typecheck
def __mul__(self, other):
num = (self.m * other.m) % RationalModP.p
den = (self.n * other.n) % RationalModP.p
return RationalModP(num, den)
def __neg__(self):
return RationalModP(-self.m, self.n)
@typecheck
def __eq__(self, other):
return isinstance(other, RationalModP) and (
(self.m * other.n) % RationalModP.p == (other.m * self.n) % RationalModP.p)
@typecheck
def __ne__(self, other):
return isinstance(other, IntegerModP) is False or (
(self.m * other.n) % RationalModP.p != (other.m * self.n) % RationalModP.p)
# TODO(rbharath): This should be possible to implement. Think more about it.
#@typecheck
#def __divmod__(self, divisor):
# q, r = divmod(self.n, divisor.n)
# return (IntegerModP(q), IntegerModP(r))
# TODO(rbharath): Check if this makes sense
def inverse(self):
if self.m == 0:
raise Exception("Cannot invert with numerator 0")
return RationalModP(self.n, self.m)
## need to use the division algorithm *as integers* because we're
## doing it on the modulus itself (which would otherwise be zero)
#x, y, d = extended_euclidean_algorithm(self.n, self.p)
#if d != 1:
# raise Exception("Error: p is not prime in %s!" % (self.__name__))
#return IntegerModP(x)
#def __abs__(self):
# return abs(self.n)
def __str__(self):
return "%s/%s" % (str(self.m), str(self.n))
def __repr__(self):
return '%d/%d (mod %d)' % (self.m, self.n, self.p)
# TODO(rbharath): Can this method be done better?
def to_bytes(self):
return self.m.to_bytes(32, 'big') + self.n.to_bytes(32, 'big')
#def __int__(self):
# return self.n
RationalModP.p = p
RationalModP.__name__ = 'Q/%d' % (p)
RationalModP.englishName = 'RationalsMod%d' % (p)
return RationalModP | 32,571 |
def pytest_sessionfinish(session, exitstatus):
"""Remove data generated for the tests."""
print("\nRemoving datasets generated for the test session.")
for file in FILES_TO_GENERATE:
file_path = os.path.join(DATA_PATH, file)
if os.path.exists(file_path):
os.remove(file_path) | 32,572 |
def rand_email():
"""Random email.
Usage Example::
>>> rand_email()
Z4Lljcbdw7m@npa.net
"""
name = random.choice(string.ascii_letters) + \
rand_str(string.ascii_letters + string.digits, random.randint(4, 14))
domain = rand_str(string.ascii_lowercase, random.randint(2, 10))
kind = random.choice(_all_email_kinds)
return "%s@%s%s" % (name, domain, kind) | 32,573 |
def __parse_quic_timing_from_scenario(in_dir: str, scenario_name: str, pep: bool = False) -> pd.DataFrame:
"""
Parse the quic timing results in the given scenario.
:param in_dir: The directory containing all measurement results
:param scenario_name: The name of the scenario to parse
:param pep: Whether to parse QUIC or QUIC (PEP) files
:return: A dataframe containing the parsed results of the specified scenario.
"""
logger.debug("Parsing quic%s timing files in %s", " (pep)" if pep else "", scenario_name)
df = pd.DataFrame(columns=['run', 'con_est', 'ttfb'])
for file_name in os.listdir(os.path.join(in_dir, scenario_name)):
file_path = os.path.join(in_dir, scenario_name, file_name)
if not os.path.isfile(file_path):
continue
match = re.search(r"^quic%s_ttfb_(\d+)_client\.txt$" % ("_pep" if pep else "",), file_name)
if not match:
continue
logger.debug("%s: Parsing '%s'", scenario_name, file_name)
run = int(match.group(1))
con_est = None
ttfb = None
with open(file_path) as file:
for line in file:
if line.startswith('connection establishment time:'):
if con_est is not None:
logger.warning("Found duplicate value for con_est in '%s', ignoring", file_path)
else:
con_est = float(line.split(':', 1)[1].strip()[:-2])
elif line.startswith('time to first byte:'):
if ttfb is not None:
logger.warning("Found duplicate value for ttfb in '%s', ignoring", file_path)
else:
ttfb = float(line.split(':', 1)[1].strip()[:-2])
df = df.append({
'run': run,
'con_est': con_est,
'ttfb': ttfb
}, ignore_index=True)
with_na = len(df.index)
df.dropna(subset=['con_est', 'ttfb'], inplace=True)
without_na = len(df.index)
if with_na != without_na:
logger.warning("%s: Dropped %d lines with NaN values", scenario_name, with_na - without_na)
if df.empty:
logger.warning("%s: No quic%s timing data found", scenario_name, " (pep)" if pep else "")
return df | 32,574 |
async def value_to_deep_structure(value, hash_pattern):
"""build deep structure from value"""
try:
objects = {}
deep_structure0 = _value_to_objects(
value, hash_pattern, objects
)
except (TypeError, ValueError):
raise DeepStructureError(hash_pattern, value) from None
obj_id_to_checksum = {}
new_checksums = set()
async def conv_obj_id_to_checksum(obj_id):
obj = objects[obj_id]
obj_buffer = await serialize(obj, "mixed")
obj_checksum = await calculate_checksum(obj_buffer)
new_checksums.add(obj_checksum.hex())
buffer_cache.cache_buffer(obj_checksum, obj_buffer)
obj_id_to_checksum[obj_id] = obj_checksum.hex()
coros = []
for obj_id in objects:
coro = conv_obj_id_to_checksum(obj_id)
coros.append(coro)
await asyncio.gather(*coros)
deep_structure = _build_deep_structure(
hash_pattern, deep_structure0, obj_id_to_checksum
)
return deep_structure, new_checksums | 32,575 |
def get_users_run(jobs, d_from, target, d_to='', use_unit='cpu',
serialize_running=''):
"""Takes a DataFrame full of job information and
returns usage for each "user"
uniquely based on specified unit.
This function operates as a stepping stone for plotting usage figures
and returns various series and frames for several different uses.
Parameters
-------
jobs: DataFrame
Job DataFrame typically generated by slurm/sacct_jobs
or the ccmnt package.
use_unit: str, optional
Usage unit to examine. One of: {'cpu', 'cpu-eqv', 'gpu', 'gpu-eqv'}.
Defaults to 'cpu'.
d_from: date str
Beginning of the query period, e.g. '2019-04-01T00:00:00'.
target: int-like
Typically a cpu allocation or core eqv value for a particular acount.
Often 50.
d_to: date str, optional
End of the query period, e.g. '2020-01-01T00:00:00'.
Defaults to now if empty.
serialize_running: str, optional
Pickle given structure with argument as a name.
If left empty, pickle procedure is skipped.
Defaults to empty.
Returns
-------
user_running_cat:
Frame of running resources for each of the unique
"users" in the jobs data frame.
"""
users = jobs.user.unique()
user_count = 0
for user in users:
user_mask = jobs['user'].str.match(user)
user_jobs = jobs[user_mask].copy()
_, user_queued, user_running, _ = job_use(user_jobs, d_from,
target, d_to=d_to,
use_unit=use_unit)
user_queued=user_queued[d_from:d_to]
user_running=user_running[d_from:d_to]
if user_count == 0:
user_running_cat = pd.Series(user_running,
index=user_running.index,
name=user)
else:
user_running_ser = pd.Series(user_running,
index=user_running.index,
name=user)
user_running_cat = pd.concat([user_running_cat, user_running_ser],
axis=1)
user_count = user_count + 1
if user_count == 1:
user_running_cat = user_running_cat.to_frame()
if serialize_running != '':
user_running_cat.to_pickle(serialize_running)
return user_running_cat | 32,576 |
def get_classes_constants(paths):
"""
Extract the vtk class names and constants from the path.
:param paths: The path(s) to the Python file(s).
:return: The file name, the VTK classes and any VTK constants.
"""
res = collections.defaultdict(set)
for path in paths:
content = path.read_text().split('\n')
for line in content:
for pattern in Patterns.skip_patterns:
m = pattern.search(line)
if m:
continue
for pattern in Patterns.vtk_patterns:
m = pattern.search(line)
if m:
for g in m.groups():
res[str(path)].add(g)
return res | 32,577 |
def predict_unfolding_at_temperature(temp, data, PDB_files):
"""
Function to predict lables for all trajectoires at a given temperature
Note: The assumption is that at a given temperature, all snapshots are at the same times
Filter should be 'First commit' or 'Last commit' or 'Filter osc' as described in ClusterPCA
You can also enter None (or anything else besides the options above) in whcih case no filtering is applied
"""
temp=str(temp)
if len(temp)==1:
temp='{}.'.format(temp)
while len(temp)<5: #add zeros so that the temperature is of the form 0.80
temp='{}0'.format(temp)
f, trajectories = utils.get_trajectory(data, PDB_files, '{}_'.format(temp) )
#need to figure out how long are all the trajectories.
#to figure this out, iterate through the first files until you see a change
go=True
i=0
traj_nums=[]
while go:
file=f[i]
file=file.split('{}_'.format(temp))
suffix=file[1]
traj_num=suffix.split('.')[0]
traj_nums.append(traj_num)
if traj_nums[i]!=traj_nums[i-1]:
go=False
else:
i+=1
traj_len=i
n_trajectories=int(len(f)/traj_len)
sim_labels=np.zeros((n_trajectories, traj_len))
times=utils.get_times(f[0:traj_len])
for n in range(n_trajectories):
traj=trajectories[n*traj_len:n*traj_len+traj_len]
sim_labels[n,:]=traj
return times, sim_labels | 32,578 |
def test_agricrop():
"""
https://smart-data-models.github.io/dataModel.Agrifood/AgriCrop/examples/example-normalized.jsonld
"""
e = Entity(
"AgriCrop",
"AgriCrop:df72dc57-1eb9-42a3-88a9-8647ecc954b4",
ctx=[
"https://smartdatamodels.org/context.jsonld",
"https://uri.etsi.org/ngsi-ld/v1/ngsi-ld-core-context.jsonld",
],
)
e.prop("name", "Wheat")
e.prop("alternateName", "Triticum aestivum")
e.prop("description", "Spring wheat")
e.prop("agroVocConcept", "http://aims.fao.org/aos/agrovoc/c_7951")
e.prop("wateringFrequency", "daily")
e.prop(
"harvestingInterval",
[
{"dateRange": "-03-21/-04-01", "description": "Best Season"},
{"dateRange": "-04-02/-04-15", "description": "Season OK"},
],
)
e.prop(
"hasAgriFertiliser",
[
"urn:ngsi-ld:AgriFertiliser:1b0d6cf7-320c-4a2b-b2f1-4575ea850c73",
"urn:ngsi-ld:AgriFertiliser:380973c8-4d3b-4723-a899-0c0c5cc63e7e",
],
)
e.prop(
"hasAgriPest",
[
"urn:ngsi-ld:AgriPest:1b0d6cf7-320c-4a2b-b2f1-4575ea850c73",
"urn:ngsi-ld:AgriPest:380973c8-4d3b-4723-a899-0c0c5cc63e7e",
],
)
e.prop(
"hasAgriSoil",
[
"urn:ngsi-ld:AgriSoil:00411b56-bd1b-4551-96e0-a6e7fde9c840",
"urn:ngsi-ld:AgriSoil:e8a8389a-edf5-4345-8d2c-b98ac1ce8e2a",
],
)
e.prop(
"plantingFrom",
[
{"dateRange": "-09-28/-10-12", "description": "Best Season"},
{"dateRange": "-10-11/-10-18", "description": "Season OK"},
],
)
e.prop(
"relatedSource",
[
{
"application": "urn:ngsi-ld:AgriApp:72d9fb43-53f8-4ec8-a33c-fa931360259a",
"applicationEntityId": "app:weat",
}
],
)
e.prop(
"seeAlso",
["https://example.org/concept/wheat", "https://datamodel.org/example/wheat"],
)
assert e.to_dict() == expected_dict("agri_crop")
assert e.to_dict(kv=True) == expected_dict("agri_crop.kv") | 32,579 |
def _frac_scorer(matched_hs_ions_df, all_hyp_ions_df, N_spectra):
"""Fraction ion observed scorer.
Provides a score based off of the fraction of hypothetical ions that were observed
for a given hypothetical structure.
Parameters
----------
matched_hs_ions_df : pd.DataFrame
Dataframe of observed ions that matched a specific hypothetical structure
all_hyp_ions_df : pd.DataFrame
Dataframe of all possible ions for a given hypothetical structure.
N_spectra : int
Number of spectra provided.
Returns
-------
float
Score for a given hypothetical structure.
"""
# Calculate the number of matched ions observed and total possible
N_matched_hs_ions = matched_hs_ions_df.shape[0]
N_tot_hyp_ions = all_hyp_ions_df.shape[0]
score = N_matched_hs_ions / (N_tot_hyp_ions*N_spectra)
return score | 32,580 |
def animation_import_button(self, context: Context):
"""Targets animation importer class on menu button press.
Args:
self (): A reference to this bpy dynamic draw function.
context (Context): The context containing data for the current 3d view.
"""
self.layout.operator(CalAnimationImporter.bl_idname, text="Cal3D Animation (.xaf)") | 32,581 |
def role_in(roles_allowed):
"""
A permission checker that checks that a role possessed by the user matches one of the role_in list
"""
def _check_with_authuser(authuser):
return any(r in authuser.roles for r in roles_allowed)
return _check_with_authuser | 32,582 |
def elements_for_model(model: Model) -> List[str]:
"""Creates a list of elements to expect to register.
Args:
model: The model to create a list for.
"""
def increment(index: List[int], dims: List[int]) -> None:
# assumes index and dims are the same length > 0
# modifies index argument
i = len(index) - 1
index[i] += 1
while index[i] == dims[i]:
index[i] = 0
i -= 1
if i == -1:
break
index[i] += 1
def index_to_str(index: List[int]) -> str:
result = ''
for i in index:
result += '[{}]'.format(i)
return result
def generate_indices(multiplicity: List[int]) -> List[str]:
# n-dimensional counter
indices = list() # type: List[str]
index = [0] * len(multiplicity)
indices.append(index_to_str(index))
increment(index, multiplicity)
while sum(index) > 0:
indices.append(index_to_str(index))
increment(index, multiplicity)
return indices
result = list() # type: List[str]
for element in model.compute_elements:
if len(element.multiplicity) == 0:
result.append(str(element.name))
else:
for index in generate_indices(element.multiplicity):
result.append(str(element.name) + index)
return result | 32,583 |
def form_IntegerNoneDefault(request):
"""
An integer field defaulting to None
"""
schema = schemaish.Structure()
schema.add('myIntegerField', schemaish.Integer())
form = formish.Form(schema, 'form')
form.defaults = {'myIntegerField':None}
return form | 32,584 |
def tree_checkout(repo, tree, path):
"""Recursively instantiates a tree during checkout into a empty directory"""
for item in tree.items:
obj = object_read(repo, item.sha)
rootPath = os.path.realpath(os.path.join(repo.vcsdir, '../'))
relativePath = item.path.decode().replace(rootPath, '')
destRootPath = path
dest = destRootPath + relativePath.encode()
if obj.fmt == b'tree':
os.mkdir(dest)
# recursively call if the object is tree
tree_checkout(repo, obj, destRootPath)
elif obj.fmt == b'blob':
with open(dest, 'wb') as f:
f.write(obj.blobdata) | 32,585 |
def tokenize(text, stopwords):
"""Tokenizes and removes stopwords from the document"""
without_punctuations = text.translate(str.maketrans('', '', string.punctuation))
tokens = word_tokenize(without_punctuations)
filtered = [w.lower() for w in tokens if not w in stopwords]
return filtered | 32,586 |
def add_subscription_handler(args: argparse.Namespace) -> None:
"""
Handler for "groups subscription add" subcommand.
"""
new_subscription_id = 0
params = {"type": "subscription_type"}
try:
# resolve index
try:
resources: BugoutResources = bc.list_resources(
token=MOONSTREAM_ADMIN_ACCESS_TOKEN, params=params
)
new_subscription_id = (
max(
[
int(resource.resource_data["id"])
for resource in resources.resources
]
)
+ 1
)
except BugoutResponseException as e:
if e.detail != "Resources not found":
raise BroodResourcesInteractionException(
f"status_code={e.status_code}, detail={e.detail}"
)
# If Brood returns 404, then we want to continue execution of the outer try block
# with new_subscription_id as 0. That's why we don't have an "else" condition here.
except Exception as e:
print("Unexpected Exception on request to brood")
raise
subscription_data = {
"type": "subscription_type",
"id": str(new_subscription_id),
"name": args.name,
"description": args.description,
"stripe_product_id": args.stripe_product_id,
"stripe_price_id": args.stripe_price_id,
"active": args.active,
}
try:
bc.create_resource(
token=MOONSTREAM_ADMIN_ACCESS_TOKEN,
application_id=MOONSTREAM_APPLICATION_ID,
resource_data=subscription_data,
)
except BugoutResponseException as e:
print(f"status_code={e.status_code}, detail={e.detail}")
raise BroodResourcesInteractionException(
f"status_code={e.status_code}, detail={e.detail}"
)
except Exception as e:
print(f"Exception in create brood resource error:{e}")
raise UnExpectedException("Error in resource creating")
except Exception as e:
print(e) | 32,587 |
def to_dict(prim: Primitive) -> ObjectData:
"""Convert a primitive to a dictionary for serialization."""
val: BasePrimitive = prim.value
data: ObjectData = {
"name": val.name,
"size": val.size,
"signed": val.signed,
"integer": prim in INTEGER_PRIMITIVES,
}
if val.min != 0 or val.max != 0:
data["min"] = val.min
data["max"] = val.max
return data | 32,588 |
def setup_testing_all_data():
"""
Function to initialize truffe data for testing
"""
admin_user = setup_testing_users_units()
setup_testing_main(admin_user)
setup_testing_vehicles(admin_user)
setup_testing_logistics()
setup_testing_communication()
setup_testing_notifications(admin_user)
setup_testing_members(admin_user)
setup_testing_accounting_core()
setup_testing_accounting_main()
setup_testing_accounting_tools(admin_user) | 32,589 |
def sarif_result_to_cso_warning(state, version, result):
"""Report a CodeSonar warning
The specific warning report will depend on the contents of the sarif
result as follows:
The locations list is expected to be a singleton. This will form the
endbox of the CodeSonar warning.
If the list of related code locations is empty, then report using one
of the following options, expressed in terms of the arguments to the report
function:
- at a code location: first three arguments are sfileinst, int, str
- at a code span in a specified file instance: first five arguments are sfileinst, int, int, int, int
If there are related locations or code flows:
- with a list of code locations: first argument is a list of locations_node
If there are no locations:
- associated with a file instance first argument is sfileinst
- with no association to a file or procedure there are no location arguments
"""
sarif_assert(not (result.ruleId is None and result.ruleIndex == -1), "Neither of ruleId or ruleIndex are specified")
sarif_assert(not (result.message is not None and result.messageId is not None), "Neither of message or messageId are specified")
locations = result.locations
relatedLocations = result.relatedLocations
codeFlows = result.codeFlows
properties = result.properties
hostedViewerUri = result.hostedViewerUri
if result.ruleIndex != -1:
warning_class = state.sarif_run.wcs[result.ruleIndex]
else:
# We have to create a new warning class. There's not much to go on here
# The warning class may already have been encountered, so look it up
# first.
ruleId = result.ruleId.encode('utf-8')
warning_class_index = state.sarif_run.wcs_map.get(ruleId)
if warning_class_index is not None:
# It's been encountered before. Use that.
warning_class = state.sarif_run.wcs[warning_class_index]
else:
# Create a new one and put it in the table with the run
warning_class = WarningClass(
ruleId,
ruleId,
mk_rank(result.rank, None, result.level),
[],
warning_significance.RELIABILITY)
state.sarif_run.add_warning_class(warning_class)
warning_class.augment_warning_class_from_result(state, result)
if result.message is not None:
warning_message = result.message
else:
warning_message = warning_class.get_messagestring(result.messageId, state.sarif_run)
if warning_message is None:
unhandled_warning("Could not find a messageStrings entry for key '{}' for rule '{}'".format(result.messageId, warning_class.name))
warning_message = "None"
# warning_message = warning_class.get_significancestring() + ': ' + warning_class.name + ': '+ warning_message
message = to_reml(warning_message)
if len(locations) == 0:
unhandled_warning("locations list is empty")
return
if len(locations) > 1:
unhandled_warning("locations list is not a singleton; only the first location will be shown as the endbox in the CodeSonar warning")
coords = location_to_coords(state, version, locations[0])
if coords is None:
return
(endbox_sf, region) = coords
# No source file at all? Report at project level
if endbox_sf is None:
addComment(state, Comment(message, warning_class.rank, warning_class.name, warning_class.get_significancestring(), hostedViewerUri))
return
# If no region is available just report it at the file level
if region is None:
addComment(state, PositionalComment(message, warning_class.rank, warning_class.name, warning_class.get_significancestring(), hostedViewerUri, endbox_sf, 1))
return
extra_locations = codeflows_to_locations(state, codeFlows)
# If there are no other locations, then we can report the warning at the given region
if len(extra_locations) == 0:
# Precondition: we know that region[0] is not None
# If the endColumn is not specified and if the startLine is the same
# as the endLine, just report this as a single line warning
if region[0] == region[1] and region[3] is None:
addComment(state, PositionalComment(message, warning_class.rank, warning_class.name, warning_class.get_significancestring(), hostedViewerUri, endbox_sf, region[0]))
return
# if the endColumn is None, then max it out to 1000. TODO: is this OK?
if region[3] is None:
addComment(state, PositionalComment(message, warning_class.rank, warning_class.name, warning_class.get_significancestring(), hostedViewerUri, endbox_sf, region[0]))
return
# If we get to here, then we have vector of locations nodes
# And we have to report the warning at all locations
if len(extra_locations) > 0:
addComment(state, PositionalComment(message, warning_class.rank, warning_class.name, warning_class.get_significancestring(), hostedViewerUri, endbox_sf, region[0])) | 32,590 |
def sync_remote_catalogs(*args, **kwargs):
"""Synchroniser les catalogues distants."""
for RemoteCatalog in RemoteCatalogs:
for remote in RemoteCatalog.objects.filter(**kwargs):
logger.info("Start synchronize remote instance %s %d (%s)" % (
remote.__class__.__qualname__, remote.pk, remote.url))
try:
remote.save()
except Exception as e:
logger.exception(e) | 32,591 |
def plotFitSize(logbook, fitness="min", size="avg"):
"""
Values for fitness and size:
"min" plots the minimum
"max" plots the maximum
"avg" plots the average
"std" plots the standard deviation
"""
gen = logbook.select("gen")
fit_mins = logbook.chapters["fitness"].select(fitness)
size_avgs = logbook.chapters["size"].select(size)
fig, ax1 = plt.subplots()
line1 = ax1.plot(gen, fit_mins, "b-", label=f"{fitness} Error")
ax1.set_xlabel("Generation")
ax1.set_ylabel("Error", color="b")
for tl in ax1.get_yticklabels():
tl.set_color("b")
plt.ylim(0,1)
ax2 = ax1.twinx()
line2 = ax2.plot(gen, size_avgs, "r-", label=f"{size} Size")
ax2.set_ylabel("Size", color="r")
for tl in ax2.get_yticklabels():
tl.set_color("r")
lns = line1 + line2
labs = [l.get_label() for l in lns]
ax1.legend(lns, labs, loc="center right")
plt.show() | 32,592 |
def get_raw_img(url):
"""
Download input image from url.
"""
pic = False
response = requests.get(url, stream=True)
with open('./imgs/img.png', 'wb') as file:
for chunk in response.iter_content():
file.write(chunk)
pic = True
response.close()
return pic | 32,593 |
def set_layer(context, request):
"""Set the calendar layer on the request, so the resources are rendered."""
zope.interface.alsoProvides(
request, icemac.ab.calendar.browser.interfaces.ICalendarLayer) | 32,594 |
def get_and_validate_study_id(chunked_download=False):
"""
Checks for a valid study object id or primary key.
If neither is given, a 400 (bad request) error is raised.
Study object id malformed (not 24 characters) causes 400 error.
Study object id otherwise invalid causes 400 error.
Study does not exist in our database causes 404 error.
"""
study = _get_study_or_abort_404(request.values.get('study_id', None),
request.values.get('study_pk', None))
if not study.is_test and chunked_download:
# You're only allowed to download chunked data from test studies
return abort(404)
else:
return study | 32,595 |
def flirt_registration(input_file,
output_file,
matrix_file,
ref_file,
verbose=False):
"""Registration to MNI template using FSL-FLIRT
Ubuntu: Create link for flirt
sudo ln -s /usr/bin/fsl5.0-flirt /usr/bin/flirt
Args:
input_file (str/Path): input file (nii.gz)
output_file (str/Path): output file (nii.gz)
matrix_file (str/Path): transformation matrix (.mat)
ref_file (str/Path): path to MNI-152-1mm reference file (nii.gz)
verbose (bool): print fsl command
"""
flt = fsl.FLIRT(bins=256, cost_func='mutualinfo')
flt.inputs.in_file = str(input_file)
flt.inputs.reference = str(ref_file)
flt.inputs.output_type = "NIFTI_GZ"
if not verbose:
flt.inputs.verbose=0
flt.inputs.out_file = str(output_file)
flt.inputs.out_matrix_file = str(matrix_file)
if verbose:
print(flt.cmdline)
flt.run() | 32,596 |
async def multiple_database_queries(r, w, c):
"""
Test type 3: Multiple database queries
"""
num_queries = get_num_queries(r._scope)
row_ids = [randint(1, 10000) for _ in range(num_queries)]
worlds = []
connection = await pool.acquire()
try:
statement = await connection.prepare(READ_ROW_SQL)
for row_id in row_ids:
number = await statement.fetchval(row_id)
worlds.append({'id': row_id, 'randomNumber': number})
finally:
await pool.release(connection)
content = json_dumps(worlds).encode('utf-8')
w.headers = CONTENT_TYPE, JSON
w.body = content | 32,597 |
def tokenize_char(pinyin: str) -> tuple[str, str, int] | None:
"""
Given a string containing the pinyin representation of a Chinese character, return a 3-tuple containing its
initial (``str``), final (``str``), and tone (``int; [0-4]``), or ``None`` if it cannot be properly tokenized.
"""
import re
initial = final = ''
tone = 0
for i in pinyin:
if i in __TONED_VOWELS:
tone = __TONED_VOWELS[i][1]
pinyin = pinyin.replace(i, __TONED_VOWELS[i][0])
break
for f in __FINALS:
if (s := re.search(f, pinyin)) is not None:
final = s[0]
initial = re.sub(f, '', pinyin)
break
return (initial, final, tone) if final else None | 32,598 |
def test_task_relation_hash_func(pre_code, post_code, expect):
"""Test TaskRelation magic function :func:`__hash__`."""
task_param = TaskRelation(pre_task_code=pre_code, post_task_code=post_code)
assert hash(task_param) == expect | 32,599 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.