content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def update_metric(task, metric, results):
"""
Update metric arrording to the task.
"""
pred = np.array(results[0])
label = np.array(results[1])
loss = np.array(results[2])
if task == 'pretrain':
cross_entropy = np.array(results[3])
metric.update(pred, label, cross_entropy)
else:
metric.update(pred, label)
| 5,337,100
|
def hxlvalidate_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr):
"""
Run hxlvalidate with command-line arguments.
@param args A list of arguments, excluding the script name
@param stdin Standard input for the script
@param stdout Standard output for the script
@param stderr Standard error for the script
"""
parser = make_args('Validate a HXL dataset.')
parser.add_argument(
'-s',
'--schema',
help='Schema file for validating the HXL dataset (if omitted, use the default core schema).',
metavar='schema',
default=None
)
parser.add_argument(
'-a',
'--all',
help='Include all rows in the output, including those without errors',
action='store_const',
const=True,
default=False
)
parser.add_argument(
'-e',
'--error-level',
help='Minimum error level to show (defaults to "info") ',
choices=['info', 'warning', 'error'],
metavar='info|warning|error',
default='info'
)
args = parser.parse_args(args)
do_common_args(args)
with make_input(args, stdin) as input, make_output(args, stdout) as output:
class Counter:
infos = 0
warnings = 0
errors = 0
def callback(e):
"""Show a validation error message."""
if e.rule.severity == 'info':
if args.error_level != 'info':
return
Counter.infos += 1
elif e.rule.severity == 'warning':
if args.error_level == 'error':
return
Counter.warnings += 1
else:
Counter.errors += 1
message = '[{}] '.format(e.rule.severity)
if e.row:
if e.rule:
message += "{},{}: ".format(e.row.row_number + 1, e.rule.tag_pattern)
else:
message += "{}: ".format(e.row.row_number + 1)
elif e.rule:
message += "<dataset>,{}: ".format(e.rule.tag_pattern)
else:
message += "<dataset>: "
if e.value:
message += '"{}" '.format(e.value)
if e.message:
message += e.message
message += "\n"
output.write(message)
output.write("Validating {} with schema {} ...\n".format(args.infile or "<standard input>", args.schema or "<default>"))
source = hxl.io.data(input)
if args.schema:
with make_input(args, None, args.schema) as schema_input:
schema = hxl.schema(schema_input, callback=callback)
else:
schema = hxl.schema(callback=callback)
schema.validate(source)
if args.error_level == 'info':
output.write("{:,} error(s), {:,} warnings, {:,} suggestions\n".format(Counter.errors, Counter.warnings, Counter.infos))
elif args.error_level == 'warning':
output.write("{:,} error(s), {:,} warnings\n".format(Counter.errors, Counter.warnings))
else:
output.write("{:,} error(s)\n".format(Counter.errors))
if Counter.errors > 0:
output.write("Validation failed.\n")
return EXIT_ERROR
else:
output.write("Validation succeeded.\n")
return EXIT_OK
| 5,337,101
|
def fix_bayes_factor(bayes_factor):
"""
If one of the bayes factors is 'inf' we get a string instead of a
tuple back. This is hacky but fixes that.
"""
# Maximum cut off for Bayes factor value
max_bf = 1e12
if type(bayes_factor) == str:
bayes_factor = bayes_factor.split(",")
bayes_factor = [min(float(x), max_bf) for x in bayes_factor]
bayes_factor = tuple(bayes_factor)
bayes_factor = bayes_factor[0]
return bayes_factor
| 5,337,102
|
def save_model(model_dir, cur_time, model, name):
"""save a single model"""
filename = os.path.join(model_dir, "{}_{}.h5")
model.save_weights(filename.format(cur_time, name))
| 5,337,103
|
def load_config_vars(target_config, source_config):
"""Loads all attributes from source config into target config
@type target_config: TestRunConfigManager
@param target_config: Config to dump variables into
@type source_config: TestRunConfigManager
@param source_config: The other config
@return: True
"""
# Overwrite all attributes in config with new config
for attr in dir(source_config):
# skip all private class attrs
if attr.startswith('_'):
continue
val = getattr(source_config, attr)
if val is not None:
setattr(target_config, attr, val)
| 5,337,104
|
def test__eq___dict__nested_data(nested_data):
""" Test equality for stub. """
actual = nested_data.copy()
assert actual == nested_data
| 5,337,105
|
def parse_and_save(local_file_location):
"""
Iterate through given local xml file line by line and write to default csv location
:param local_file_location: str
:return: void
"""
context = eTree.iterparse(local_file_location)
for event, element in context:
if event == "end" and element.tag == 'Listings':
for listing in element:
if listing_valid(listing):
listing_fields = get_get_fields(listing)
write_listing_to_csv(listing_fields)
| 5,337,106
|
def fiebelkorn_binning(x_trial, t_trial):
"""
Given accuracy and time-points, find the time-smoothed average accuracy
Parameters
----------
x_trial : np.ndarray
Accuracy (Hit: 1, Miss: 0) of each trial
t_trial : np.ndarray
The time-stamp of each trial
Returns
-------
x_bin : np.ndarray
The average accuracy within each time bin
t_bin : np.ndarray
The centers of each time bin
"""
details = behav_details['fiebelkorn']
# Time-stamps of the center of each bin
t_bin = np.arange(details['t_start'],
details['t_end'] + 1e-10,
details['bin_step'])
# Accuracy within each bin
x_bin = []
for i_bin in range(len(t_bin)):
bin_center = t_bin[i_bin]
bin_start = bin_center - (details['bin_width'] / 2)
bin_end = bin_center + (details['bin_width'] / 2)
bin_sel = (bin_start <= t_trial) & (t_trial <= bin_end)
x_bin_avg = np.mean(x_trial[bin_sel])
x_bin.append(x_bin_avg)
x_bin = np.array(x_bin)
return x_bin, t_bin
| 5,337,107
|
def get_fastest_while_jump(condition:str, jump_tag:str, verdicts: list) -> list:
"""Verdicts like ["while", "a", "<", "10"] """
result = []
jumpables = ("===", ) + tuple(INVERT_TABLE.keys())
if len(verdicts) == 2:
result.append(F"jump-if {jump_tag} {verdicts[1]} != false")
elif verdicts[2] in jumpables:
result.append(F"jump-if {jump_tag} " + (" ".join(verdicts[1:]) ) )
else:
result.append(create_temporary_xlet(condition, verdicts[1:]))
result.append(F"jump-if {jump_tag} {condition} != false")
return result
| 5,337,108
|
def test_run_analysis(noise_dataset):
"""Test that run_analysis runs.
"""
X, y = noise_dataset
run_analysis(X, y, [2, 3], [1, 2, 3], [0, 1, 2], 2, 3)
| 5,337,109
|
async def test_misc_upgrade_ledger_with_old_auth_rule(
docker_setup_and_teardown, pool_handler, wallet_handler, get_default_trustee
):
"""
set up 1.1.50 sovrin + 1.9.0 node + 1.9.0 plenum + 1.0.0 plugins stable to fail
(upgrade to 1.1.52 sovrin)
set up 1.9.0~dev1014 node + 1.9.0~dev829 plenum master (no plugins env)
(upgrade to 1.9.2~dev1061 node)
set up 1.1.135 sovrin + 1.9.0~dev1014 node + 1.9.0~dev829 plenum + 1.0.0~dev59 plugins master (prod env)
(upgrade to 1.1.136 sovrin)
"""
# create extra node
new_node = pool_starter(
pool_builder(
DOCKER_BUILD_CTX_PATH, DOCKER_IMAGE_NAME, 'new_node', NETWORK_NAME, 1
)
)[0]
GENESIS_PATH = '/var/lib/indy/sandbox/'
# put both genesis files
print(new_node.exec_run(['mkdir', GENESIS_PATH], user='indy'))
for _, prefix in enumerate(['pool', 'domain']):
bits, stat = client.containers.get('node1'). \
get_archive('{}{}_transactions_genesis'.format(GENESIS_PATH, prefix))
assert new_node.put_archive(GENESIS_PATH, bits)
new_ip = '10.0.0.6'
PORT_1 = '9701'
PORT_2 = '9702'
new_alias = 'Node5'
# initialize
assert new_node.exec_run(
['init_indy_node', new_alias, new_ip, PORT_1, new_ip, PORT_2, '000000000000000000000000000node5'],
user='indy'
).exit_code == 0
# upgrade
plenum_ver = '1.9.2~dev872'
plenum_pkg = 'indy-plenum'
node_ver = '1.9.2~dev1064'
node_pkg = 'indy-node'
sovrin_ver = '1.1.143'
sovrin_pkg = 'sovrin'
plugin_ver = '1.0.2~dev80'
assert new_node.exec_run(
['apt', 'update'],
user='root'
).exit_code == 0
assert new_node.exec_run(
['apt', 'install',
'{}={}'.format(sovrin_pkg, sovrin_ver),
'{}={}'.format(node_pkg, node_ver),
'{}={}'.format(plenum_pkg, plenum_ver),
'{}={}'.format('sovtoken', plugin_ver),
'{}={}'.format('sovtokenfees', plugin_ver),
'-y'],
user='root'
).exit_code == 0
# # node only upgrade
# assert new_node.exec_run(
# ['apt', 'update'],
# user='root'
# ).exit_code == 0
# assert new_node.exec_run(
# ['apt', 'install', '{}={}'.format(node_pkg, node_ver), '-y'],
# user='root'
# ).exit_code == 0
# start
assert new_node.exec_run(
['systemctl', 'start', 'indy-node'],
user='root'
).exit_code == 0
trustee_did, _ = get_default_trustee
steward_did, steward_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(
pool_handler, wallet_handler, trustee_did, steward_did, steward_vk, 'Steward5', 'STEWARD'
)
assert res['op'] == 'REPLY'
dests = [
'Gw6pDLhcBcoQesN72qfotTgFa7cbuqZpkX3Xo6pLhPhv', '8ECVSk179mjsjKRLWiQtssMLgp6EPhWXtaYyStWPSGAb',
'DKVxG2fXXTU8yT5N7hGEbXB3dfdAnYv1JczDUHpmDxya', '4PS3EDQ3dW1tci1Bp6543CfuuebjFrg36kLAUcskGfaA'
]
init_time = 1
name = 'upgrade'+'_'+sovrin_ver+'_'+datetime.now(tz=timezone.utc).strftime('%Y-%m-%dT%H:%M:%S%z')
action = 'start'
_sha256 = hashlib.sha256().hexdigest()
_timeout = 5
docker_4_schedule = json.dumps(
dict(
{dest: datetime.strftime(
datetime.now(tz=timezone.utc) + timedelta(minutes=init_time+i*5), '%Y-%m-%dT%H:%M:%S%z'
) for dest, i in zip(dests, range(len(dests)))}
)
)
reinstall = False
force = False
# set rule for cred def adding
req = await ledger.build_auth_rule_request(trustee_did, '102', 'ADD', '*', None, '*',
json.dumps({
'constraint_id': 'ROLE',
'role': '2',
'sig_count': 1,
'need_to_be_owner': False,
'metadata': {}
}))
res1 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res1)
assert res1['op'] == 'REPLY'
# schedule pool upgrade
version = '1.9.2.dev1064' # overwrite for upgrade txn (for indy-node only)
req = await ledger.build_pool_upgrade_request(
trustee_did, name, sovrin_ver, action, _sha256, _timeout, docker_4_schedule, None, reinstall, force, sovrin_pkg
)
res2 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res2)
assert res2['op'] == 'REPLY'
# # INDY-2216
# print(client.containers.list())
#
# for node in client.containers.list()[1:]:
# assert node.exec_run(
# ['systemctl', 'stop', 'indy-node'],
# user='root'
# ).exit_code == 0
#
# for node in client.containers.list()[1:]:
# assert node.exec_run(
# ['apt', 'update'],
# user='root'
# ).exit_code == 0
# print(
# node.exec_run(
# ['apt', 'install',
# '{}={}'.format(sovrin_pkg, sovrin_ver),
# '{}={}'.format(node_pkg, node_ver),
# '{}={}'.format(plenum_pkg, plenum_ver),
# '{}={}'.format('sovtoken', plugin_ver),
# '{}={}'.format('sovtokenfees', plugin_ver),
# '-y',
# '--allow-change-held-packages'],
# user='root'
# )
# )
#
# for node in client.containers.list()[1:]:
# assert node.exec_run(
# ['systemctl', 'start', 'indy-node'],
# user='root'
# ).exit_code == 0
# # ------------------------
# wait until upgrade is finished
await asyncio.sleep(4*5*60)
# add 5th node
res3 = await send_node(
pool_handler, wallet_handler, ['VALIDATOR'], steward_did, EXTRA_DESTS[0], new_alias,
EXTRA_BLSKEYS[0], EXTRA_BLSKEY_POPS[0], new_ip, int(PORT_2), new_ip, int(PORT_1)
)
assert res3['op'] == 'REPLY'
await ensure_pool_is_in_sync(nodes_num=5)
# set rule for schema adding with off ledger parameters
req = await ledger.build_auth_rule_request(trustee_did, '101', 'ADD', '*', None, '*',
json.dumps({
'constraint_id': 'OR',
'auth_constraints': [
{
'constraint_id': 'ROLE',
'role': '0',
'sig_count': 1,
'need_to_be_owner': False,
'off_ledger_signature': False,
'metadata': {}
},
{
'constraint_id': 'ROLE',
'role': '*',
'sig_count': 0,
'need_to_be_owner': False,
'off_ledger_signature': True,
'metadata': {}
}
]
}))
res4 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res4)
assert res4['op'] == 'REPLY'
# set rule for revoc reg def adding
req = await ledger.build_auth_rule_request(trustee_did, '113', 'ADD', '*', None, '*',
json.dumps({
'constraint_id': 'ROLE',
'role': '2',
'sig_count': 1,
'need_to_be_owner': False,
'metadata': {}
}))
res5 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res5)
assert res5['op'] == 'REPLY'
await ensure_pool_performs_write_read(pool_handler, wallet_handler, trustee_did, nyms_count=25)
await ensure_pool_is_in_sync(nodes_num=5)
await ensure_state_root_hashes_are_in_sync(pool_handler, wallet_handler, trustee_did)
| 5,337,110
|
def ChangeUserPath(args):
"""Function to change or create the user repository path. This is where all the user's data is
stored."""
global user_datapath
if user_datapath:
sys.stdout.write("Current user_datapath is: %s\n" % user_datapath)
elif savedpath:
sys.stdout.write("Saved user_datapath: %s was not found\n" % savedpath)
p = input("Please provide a path to place a user repository (s to skip):\n")
if p.lower() == "s":
return False
newpath = os.path.abspath(os.path.join(p, "ABangleData/"))
# Handle some potential errors - this may not be completely robust.
if not os.path.exists(newpath):
try:
os.mkdir(newpath)
except OSError as exe:
if str(exe).startswith("[Errno 13]"):
sys.stderr.write("No write privelages for %s.\n" % os.path.abspath(p))
else:
sys.stderr.write(
"Path %s does not exist. Please provide an existing path to create a repository\n"
% os.path.abspath(p)
)
return False
elif not (os.access(newpath, os.R_OK) and os.access(newpath, os.W_OK)):
sys.stderr.write("No read/write privelages for %s.\n" % newpath)
return False
if not os.path.exists(os.path.join(newpath, "user_fvs")):
try:
os.mkdir(os.path.join(newpath, "user_fvs"))
except OSError as exe:
if str(exe).startswith("[Errno 13]"):
sys.stderr.write("No write privelages for %s.\n" % os.path.abspath(p))
return False
elif not (os.access(newpath, os.R_OK) and os.access(newpath, os.W_OK)):
sys.stderr.write("No read/write privelages for %s.\n" % newpath)
return False
user_datapath = newpath
ufname = open(os.path.join(path, "config/userdatapath.txt"), "w")
ufname.write(user_datapath)
ufname.close()
# Create the data store files.
CreateStore()
return True
| 5,337,111
|
def absolute_path_without_git(directory):
"""
return the absolute path of local git repo
"""
return os.path.abspath(directory + "/..")
| 5,337,112
|
def find_template(fname):
"""Find absolute path to template.
"""
for dirname in tuple(settings.TEMPLATE_DIRS) + get_app_template_dirs('templates'):
tmpl_path = os.path.join(dirname, fname)
# print "TRYING:", tmpl_path
if os.path.exists(tmpl_path):
return tmpl_path
raise IOError(fname + " not found.")
| 5,337,113
|
def create_hcp_sets(skeleton, side, directory, batch_size, handedness=0):
"""
Creates datasets from HCP data
IN: skeleton: boolean, True if input is skeleton, False otherwise,
side: str, 'right' or 'left'
handedness: int, 0 if mixed ind, 1 if right handed, 2 if left handed
directory: str, folder in which save the results
batch_size: int, size of training batches
weights: list, list of weights to apply to skeleton values
OUT: root_dir: created directory where results will be stored
dataset_train_loader, dataset_val_loader, dataset_test_loader: loaders
that will be used for training and testing
"""
print(torch.cuda.current_device())
date_exp = date.today().strftime("%d%m%y")
if skeleton == True:
skel = 'skeleton'
loss_type = 'CrossEnt'
root_dir = directory + side + '_hemi_' + skel + '_' + date_exp + '_' +loss_type + '_' + str(handedness) + '_2classes/'
else:
skel = 'norm_spm'
loss_type = 'L2'
root_dir = directory + side + '_hemi_' + skel + '_' + date_exp + '_' +loss_type + '_' + str(handedness) +'/'
#print("Parameters : skeleton: {}, side: {}, weights: {}, loss_type: {}".format(skeleton, side, weights, loss_type))
print(root_dir)
save_results.create_folder(root_dir)
if skeleton:
data_dir = '/neurospin/dico/lguillon/skeleton/sts_crop/'
#data_dir = '/home_local/lg261972/data/'
if handedness == 0:
input_data = 'sts_crop_skeleton_' + side
tmp = pd.read_pickle(data_dir + input_data +'.pkl')
filenames = list(tmp.columns)
tmp = torch.from_numpy(np.array([tmp.loc[0].values[k] for k in range(len(tmp))]))
else:
if handedness == 1:
input_data = side + '_hemi_rightH_sts_crop_skeleton'
else:
input_data = side + '_hemi_leftH_sts_crop_skeleton'
print(input_data)
tmp = pd.read_pickle(data_dir + input_data +'.pkl')
filenames = tmp.Subject.values
print(len(filenames))
tmp = torch.from_numpy(np.array([tmp.loc[k].values[0] for k in range(len(tmp))]))
else:
data_dir = '/neurospin/dico/lguillon/hcp_cs_crop/sts_crop/'+ side + '_hemi/'
data_dir = '/home_local/lg261972/data/'
if handedness == 0:
input_data = 'sts_crop_' + side
tmp = pd.read_pickle(data_dir + input_data +'.pkl')
filenames = list(tmp.columns)
tmp = torch.from_numpy(np.array([tmp.loc[0].values[k] for k in range(len(tmp))]))
else:
if handedness == 1:
input_data = side + '_hemi_rightH_sts_crop'
else:
input_data = side + '_hemi_leftH_sts_crop'
print(input_data)
tmp = pd.read_pickle(data_dir + input_data +'.pkl')
filenames = tmp.Subject.values
print(len(filenames))
tmp = torch.from_numpy(np.array([tmp.loc[k].values[0] for k in range(len(tmp))]))
tmp = tmp.to('cuda')
hcp_dataset = TensorDataset(filenames=filenames, data_tensor=tmp,
skeleton=skeleton, vae=False)
# Split training set into train, val and test
partition = [0.7, 0.2, 0.1]
print([round(i*(len(hcp_dataset))) for i in partition])
train_set, val_set, test_set = torch.utils.data.random_split(hcp_dataset,
[round(i*(len(hcp_dataset))) for i in partition])
#train_set = AugDatasetTransformer(train_set)
#val_set = AugDatasetTransformer(val_set)
#test_set = AugDatasetTransformer(test_set)
dataset_train_loader = torch.utils.data.DataLoader(train_set, batch_size=batch_size,
shuffle=True, num_workers=0)
dataset_val_loader = torch.utils.data.DataLoader(val_set, shuffle=True,
num_workers=0)
dataset_test_loader = torch.utils.data.DataLoader(test_set, shuffle=True,
num_workers=0)
print("Dataset generated \n Size of training dataset :", len(dataset_train_loader))
return root_dir, dataset_train_loader, dataset_val_loader, dataset_test_loader
| 5,337,114
|
def rtrim(n):
"""Returns a transform that removes the rightmost n points
"""
def t(xarr, yarr, *args):
return (xarr[:-n], yarr[:-n]) + args
t.__name__ = b'rtrim({})'.format(n)
return t
| 5,337,115
|
def analyze_avg_prof_quality_by_department(dict_cursor, departmentID, campus):
"""
>>> analyze_avg_prof_quality_by_department(dict_cursor, 'CSC', 'St. George')
CSC
enthusiasm 3.95
course_atmosphere 3.90
...
(This is not complete)
"""
return __analyze_data_by_DB_GETMETHOD_WITH_TWO_ARGS(DEPARTMENT_QUALITY_BY_DID, dict_cursor, departmentID, campus)
| 5,337,116
|
def audit(environ):
"""Check a wmt-exe environment.
Parameters
----------
environ : dict
Environment variables.
Returns
-------
str
Warnings/errors.
"""
from os import linesep
messages = []
for command in ['TAIL', 'CURL', 'BASH']:
messages.append(check_is_executable(environ[command]))
for path_var in ['PYTHONPATH', 'LD_LIBRARY_PATH', 'PATH', 'CLASSPATH']:
for item in environ[path_var].split(pathsep):
messages.append(check_is_dir(item))
for path_var in ['SIDL_DLL_PATH']:
for item in environ[path_var].split(';'):
messages.append(check_is_dir(item))
for module in ['csdms', 'csdms.model']:
messages.append(check_is_module(module, env=environ))
for component in find_components(env=environ):
module = '.'.join(['csdms.model', component])
messages.append(check_is_module(module, env=environ))
for component in find_components(env=environ):
module = '.'.join(['csdms.model', component])
messages.append(check_is_component(module, component,
env=environ))
return linesep.join(messages)
| 5,337,117
|
def parse_to_json(data_str):
"""
Convert string to a valid json object
"""
json_obj_list = []
obj = data_str.split('%')
for record in obj:
attributes = re.split(',', record)
data = json.dumps(attributes)
data = re.sub(r':', '":"', data)
data = re.sub(r'\[', '{', data)
data = re.sub(r']', '}', data)
json_obj_list.append(data)
return json_obj_list
| 5,337,118
|
def plot_ROC(
y_test: pd.Series,
y_prob: pd.Series,
model_name: str,
output_folder: str='/mnt/data/figures',
save_plot: bool=True):
"""Plot one ROC curve"""
# Instantiate
fpr = dict()
tpr = dict()
roc_auc = dict()
# Calculate x and y for ROC-curve
fpr, tpr, _ = roc_curve(y_test, y_prob)
roc_auc = auc(fpr, tpr)
#print (fpr.shape, tpr.shape, roc_auc)# DEBUG
#Plot of a ROC curve for a specific class
plt.figure()
plt.plot(fpr, tpr, color='#336699',
label='AUC: {:0.2f})'.format(roc_auc))#lw=2,
plt.plot([0, 1], [0, 1], color='grey', linestyle='--')#lw=2,
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('1 - Specificity')
plt.ylabel('Sensitivity')
#plt.title('Receiver operating characteristic')
plt.legend(loc="lower right")
if len(output_folder)<1:
raise ValueError(f"Invalid output folder given to save ROC plot: {output_folder}.")
if save_plot:
output_fpath = str(get_output_filepath(output_folder, 'roc', 'roc_{model_name}'))
plt.savefig(output_fpath, bbox_inches='tight', dpi=300)
logging.info(f"Saving ROC-AUC Model Comparison Plot to:\n{output_fpath}")
plt.show()
| 5,337,119
|
def calculate_elbo(model, X, recon_X):
"""
Compute the ELBO of the model with reconstruction error and KL divergence..
"""
rec_loss = - np.sum(X * np.log(1e-8 + recon_X)
+ (1 - X) * np.log(1e-8 + 1 - recon_X), 1)
mu, logvar = model.transform(X)
kl = -0.5 * np.sum(1 + logvar - mu ** 2 - np.exp(logvar), 1)
elbo = np.mean(rec_loss + kl)
return elbo
| 5,337,120
|
def dice_loss(pred, target):
"""
Dice Loss based on Dice Similarity Coefficient (DSC)
@param pred: torch.tensor, model prediction
@param target: torch.tensor, ground truth label
"""
return 1 - dice_coeff(pred, target)
| 5,337,121
|
def parse(transaction):
""" Parses Bitcoin Transaction into it's component parts"""
byteStringLength = 2
# Version
version = struct.unpack('<L', transaction[0:4*byteStringLength].decode("hex"))[0]
offset = 4*byteStringLength
# print "Version is: " + str(version)
# Inputs
varLength, inputCount = translationUtil.unformatVarInt(transaction[offset:offset+9*byteStringLength].decode("hex"))
# print "Input Count is: " + str(inputCount)
offset += varLength*byteStringLength
inputs = []
for i in range(0, inputCount):
# Hash of input (previous output) transaction
inHash = (transaction[offset:offset+64].decode("hex"))[::-1].encode("hex")
offset += 64
# Index of reference within input (previous output) transaction
inIndex = struct.unpack('<L', transaction[offset:offset+4*byteStringLength].decode("hex"))[0]
offset += 4*byteStringLength
# Script signature length
varLength, scriptLen = translationUtil.unformatVarInt(transaction[offset:offset+9*byteStringLength].decode("hex"))
offset += varLength*byteStringLength
# Script
script = transaction[offset:offset+scriptLen*byteStringLength].decode("hex")
offset += scriptLen*byteStringLength
# Sequence
sequence = struct.unpack('<L', transaction[offset:offset+4*byteStringLength].decode("hex"))[0]
offset += 4*byteStringLength
# Append
# print "Input {0} is: {1}, {2}, {3}, {4}".format(i, inHash, inIndex, script, sequence)
inputs.append([inHash, inIndex, script, sequence])
# Outputs
varLength, outputCount = translationUtil.unformatVarInt(transaction[offset:offset+9*byteStringLength].decode("hex"))
# print "Output Count is: {0}".format(outputCount)
offset += varLength*byteStringLength
outputs = []
for i in range(0, outputCount):
# Index of reference within input (previous output) transaction
value = struct.unpack('<Q', transaction[offset:offset+8*byteStringLength].decode("hex"))[0]
offset += 8*byteStringLength
# Script signature length
varLength, scriptLen = translationUtil.unformatVarInt(transaction[offset:offset+9*byteStringLength].decode("hex"))
offset += varLength*2
# Script
script = transaction[offset:offset+scriptLen*byteStringLength].decode("hex")
offset += scriptLen*byteStringLength
# Append
# print "Output {0} is: {1}, {2}".format(i, value, script)
outputs.append([value, script])
# Block Lock Time
blockLockTime = struct.unpack('<L', transaction[offset:offset+4*byteStringLength].decode("hex"))[0]
# print "Block Lock Time is: " + str(blockLockTime)
return (version, inputs, outputs, blockLockTime)
| 5,337,122
|
def get_version():
"""Get project version
"""
version_file_path = os.path.join(
os.path.dirname(__file__),
'spowtd',
'VERSION.txt')
with open(version_file_path) as version_file:
version_string = version_file.read().strip()
version_string_re = re.compile('[0-9.]+')
match = version_string_re.match(version_string)
if match is None:
raise ValueError(
'version string "{}" does not match regexp "{}"'
.format(version_string, version_string_re.pattern))
return match.group(0)
| 5,337,123
|
def get_related_items_by_type(parser, token):
"""Gets list of relations from object identified by a content type.
Syntax::
{% get_related_items_by_type [content_type_app_label.content_type_model] for [object] as [varname] [direction] %}
"""
tokens = token.contents.split()
if len(tokens) not in (6, 7):
raise template.TemplateSyntaxError(
"%r tag requires 6 arguments" % tokens[0]
)
if tokens[2] != 'for':
raise template.TemplateSyntaxError(
"Third argument in %r tag must be 'for'" % tokens[0]
)
if tokens[4] != 'as':
raise template.TemplateSyntaxError(
"Fifth argument in %r tag must be 'as'" % tokens[0]
)
direction = 'forward'
if len(tokens) == 7:
direction = tokens[6]
return GetRelatedItemsByTypeNode(
name=tokens[1], obj=tokens[3], as_var=tokens[5], direction=direction
)
| 5,337,124
|
def stitch_valleys(valley_list):
"""Returns a stitched list of valleys to extract seq from."""
valley_collection = utils.LocusCollection(valley_list, 1)
stitched_valley_collection = valley_collection.stitch_collection()
loci = []
regions = []
for valley in stitched_valley_collection.get_loci():
if [valley.chr, valley.start, valley.end] not in regions:
loci.append(valley)
regions.append([valley.chr, valley.start, valley.end])
return loci
| 5,337,125
|
def kepoutlier(infile, outfile=None, datacol='SAP_FLUX', nsig=3.0, stepsize=1.0,
npoly=3, niter=1, operation='remove', ranges='0,0', plot=False,
plotfit=False, overwrite=False, verbose=False,
logfile='kepoutlier.log'):
"""
kepoutlier -- Remove or replace statistical outliers from time series data
kepoutlier identifies data outliers relative to piecemeal best-fit
polynomials. Outliers are either removed from the output time series or
replaced by a noise-treated value defined by the polynomial fit. Identified
outliers and the best fit functions are optionally plotted for inspection
purposes.
Parameters
----------
infile : str
The name of a MAST standard format FITS file containing a Kepler light
curve within the first data extension.
outfile : str
The name of the output FITS file. ``outfile`` will be direct copy of
infile with either data outliers removed (i.e. the table will have
fewer rows) or the outliers will be corrected according to a best-fit
function and a noise model.
datacol : str
The column name containing data stored within extension 1 of infile.
This data will be searched for outliers. Typically this name is
SAP_FLUX (Simple Aperture Photometry fluxes) or PDCSAP_FLUX (Pre-search
Data Conditioning fluxes).
nsig : float
The sigma clipping threshold. Data deviating from a best fit function
by more than the threshold will be either removed or corrected
according to the user selection of operation.
stepsize : float
The data within datacol is unlikely to be well represented by a single
polynomial function. stepsize splits the data up into a series of time
blocks, each is fit independently by a separate function. The user can
provide an informed choice of stepsize after inspecting the data with
the kepdraw tool. Units are days.
npoly : int
The polynomial order of each best-fit function.
niter : int
If outliers are found in a particular data section, that data will be
removed temporarily and the time series fit again. This will be
iterated niter times before freezing upon the best available fit.
operation : str
* ``remove`` throws away outliers. The output data table will smaller
or equal in size to the input table.
* ``replace`` replaces outliers with a value that is consistent with
the best-fit polynomial function and a random component defined by the
rms of the data relative to the fit and calculated using the inverse
normal cumulative function and a random number generator.
ranges : str
The user can choose specific time ranges of data on which to work. This
could, for example, avoid removing known stellar flares from a dataset.
Time ranges are supplied as comma-separated pairs of Barycentric Julian
Dates (BJDs). Multiple ranges are separated by a semi-colon. An example
containing two time ranges is::
'2455012.48517,2455014.50072;2455022.63487,2455025.08231'
If the user wants to correct the entire time series then providing
``ranges = '0,0'`` will tell the task to operate on the whole time series.
plot : bool
Plot the data and outliers?
plotfit : bool
Overlay the polynomial fits upon the plot?
overwrite : bool
Overwrite the output file?
verbose : bool
Print informative messages and warnings to the shell and logfile?
logfile : str
Name of the logfile containing error and warning messages.
Examples
--------
.. code-block:: bash
$ kepoutlier kplr002437329-2010355172524_llc.fits --datacol SAP_FLUX
--nsig 4 --stepsize 5 --npoly 2 --niter 10 --operation replace
--verbose --plot --plotfit
.. image:: ../_static/images/api/kepoutlier.png
:align: center
"""
if outfile is None:
outfile = infile.split('.')[0] + "-{}.fits".format(__all__[0])
# log the call
hashline = '--------------------------------------------------------------'
kepmsg.log(logfile, hashline, verbose)
call = ('KEPOUTLIER -- '
+ ' infile={}'.format(infile)
+ ' outfile={}'.format(outfile)
+ ' datacol={}'.format(datacol)
+ ' nsig={}'.format(nsig)
+ ' stepsize={}'.format(stepsize)
+ ' npoly={}'.format(npoly)
+ ' niter={}'.format(niter)
+ ' operation={}'.format(operation)
+ ' ranges={}'.format(ranges)
+ ' plot={}'.format(plot)
+ ' plotfit={}'.format(plotfit)
+ ' overwrite={}'.format(overwrite)
+ ' verbose={}'.format(verbose)
+ ' logfile={}'.format(logfile))
kepmsg.log(logfile, call+'\n', verbose)
# start time
kepmsg.clock('KEPOUTLIER started at', logfile, verbose)
# overwrite output file
if overwrite:
kepio.overwrite(outfile, logfile, verbose)
if kepio.fileexists(outfile):
errmsg = ('ERROR -- KEPOUTLIER: {} exists. Use overwrite=True'
.format(outfile))
kepmsg.err(logfile, errmsg, verbose)
# open input file
instr = pyfits.open(infile)
tstart, tstop, bjdref, cadence = kepio.timekeys(instr, infile, logfile,
verbose)
try:
work = instr[0].header['FILEVER']
cadenom = 1.0
except:
cadenom = cadence
# fudge non-compliant FITS keywords with no values
instr = kepkey.emptykeys(instr, infile, logfile, verbose)
# read table structure
table = kepio.readfitstab(infile, instr[1], logfile, verbose)
# filter input data table
try:
nanclean = instr[1].header['NANCLEAN']
except:
time = kepio.readtimecol(infile, table, logfile, verbose)
flux = kepio.readfitscol(infile, table, datacol, logfile, verbose)
finite_data_mask = np.isfinite(time) & np.isfinite(flux) & (flux != 0)
table = table[finite_data_mask]
instr[1].data = table
comment = 'NaN cadences removed from data'
kepkey.new('NANCLEAN', True, comment, instr[1], outfile, logfile,
verbose)
# read table columns
try:
intime = instr[1].data.field('barytime') + 2.4e6
except:
intime = kepio.readfitscol(infile, instr[1].data, 'time', logfile,
verbose)
indata = kepio.readfitscol(infile, instr[1].data, datacol, logfile,
verbose)
intime = intime + bjdref
indata = indata / cadenom
# time ranges for region to be corrected
t1, t2 = kepio.timeranges(ranges, logfile, verbose)
cadencelis = kepstat.filterOnRange(intime, t1, t2)
# find limits of each time step
tstep1, tstep2 = [], []
work = intime[0]
while work < intime[-1]:
tstep1.append(work)
tstep2.append(np.array([work + stepsize, intime[-1]],
dtype='float64').min())
work += stepsize
# find cadence limits of each time step
cstep1, cstep2 = [], []
work1 = 0
work2 = 0
for i in range(len(intime)):
if intime[i] >= intime[work1] and intime[i] < intime[work1] + stepsize:
work2 = i
else:
cstep1.append(work1)
cstep2.append(work2)
work1 = i
work2 = i
cstep1.append(work1)
cstep2.append(work2)
outdata = indata * 1.0
# comment keyword in output file
kepkey.history(call, instr[0], outfile, logfile, verbose)
# clean up x-axis unit
intime0 = (tstart // 100) * 100.0
ptime = intime - intime0
xlab = 'BJD $-$ {}'.format(intime0)
# clean up y-axis units
pout = indata * 1.0
nrm = len(str(int(pout.max())))-1
pout = pout / 10**nrm
ylab = '10$^%d$ e$^-$ s$^{-1}$' % nrm
# data limits
xmin = ptime.min()
xmax = ptime.max()
ymin = pout.min()
ymax = pout.max()
xr = xmax - xmin
yr = ymax - ymin
ptime = np.insert(ptime, [0], [ptime[0]])
ptime = np.append(ptime, [ptime[-1]])
pout = np.insert(pout, [0], [0.0])
pout = np.append(pout, 0.0)
# plot light curve
if plot:
plt.figure()
plt.clf()
# plot data
ax = plt.axes([0.06, 0.1, 0.93, 0.87])
# force tick labels to be absolute rather than relative
plt.gca().xaxis.set_major_formatter(plt.ScalarFormatter(useOffset=False))
plt.gca().yaxis.set_major_formatter(plt.ScalarFormatter(useOffset=False))
plt.plot(ptime, pout, color='#0000ff', linestyle='-', linewidth=1.0)
plt.fill(ptime, pout, color='#ffff00', linewidth=0.0, alpha=0.2)
plt.xlabel(xlab, {'color' : 'k'})
plt.ylabel(ylab, {'color' : 'k'})
plt.grid()
# loop over each time step, fit data, determine rms
masterfit = indata * 0.0
mastersigma = np.zeros(len(masterfit))
functype = getattr(kepfunc, 'poly' + str(npoly))
for i in range(len(cstep1)):
pinit = [indata[cstep1[i]:cstep2[i]+1].mean()]
if npoly > 0:
for j in range(npoly):
pinit.append(0.0)
pinit = np.array(pinit, dtype='float32')
try:
coeffs, errors, covar, iiter, sigma, chi2, dof, fit, plotx, ploty = \
kepfit.lsqclip(functype, pinit,
intime[cstep1[i]:cstep2[i]+1] - intime[cstep1[i]],
indata[cstep1[i]:cstep2[i]+1], None, nsig,
nsig, niter, logfile, verbose)
for j in range(len(coeffs)):
masterfit[cstep1[i]: cstep2[i] + 1] += (coeffs[j]
* (intime[cstep1[i]:cstep2[i]+1] - intime[cstep1[i]]) ** j)
for j in range(cstep1[i], cstep2[i] + 1):
mastersigma[j] = sigma
if plotfit:
plt.plot(plotx + intime[cstep1[i]] - intime0, ploty / 10 ** nrm,
'g', lw=3)
except:
for j in range(cstep1[i], cstep2[i] + 1):
masterfit[j] = indata[j]
mastersigma[j] = 1.0e10
message = ('WARNING -- KEPOUTLIER: could not fit range '
+ str(intime[cstep1[i]]) + '-' + str(intime[cstep2[i]]))
kepmsg.warn(logfile, message, verbose)
# reject outliers
rejtime, rejdata = [], []
naxis2 = 0
for i in tqdm(range(len(masterfit))):
if (abs(indata[i] - masterfit[i]) > nsig * mastersigma[i]
and i in cadencelis):
rejtime.append(intime[i])
rejdata.append(indata[i])
if operation == 'replace':
[rnd] = kepstat.randarray([masterfit[i]], [mastersigma[i]])
table[naxis2] = table[i]
table.field(datacol)[naxis2] = rnd
naxis2 += 1
else:
table[naxis2] = table[i]
naxis2 += 1
instr[1].data = table[:naxis2]
if plot:
rejtime = np.array(rejtime, dtype='float64')
rejdata = np.array(rejdata, dtype='float32')
plt.plot(rejtime - intime0, rejdata / 10 ** nrm, 'ro')
# plot ranges
plt.xlim(xmin - xr * 0.01, xmax + xr * 0.01)
if ymin >= 0.0:
plt.ylim(ymin - yr * 0.01, ymax + yr * 0.01)
else:
plt.ylim(1.0e-10, ymax + yr * 0.01)
# render plot
plt.show()
# write output file
print("Writing output file {}...".format(outfile))
instr.writeto(outfile)
# close input file
instr.close()
kepmsg.clock('KEPOUTLIER completed at', logfile, verbose)
| 5,337,126
|
def no_gcab_namespace(name, *args):
"""
Mock gi.require_version() to raise an ValueError to
simulate that GCab bindings are not available.
We mock importing the whole 'gi', so that this test
can be run even when the 'gi' package is not available.
"""
if name.startswith("gi"):
m = mock.Mock()
m.require_version.side_effect = ValueError
return m
return orig_import(name, *args)
| 5,337,127
|
def _start_beamtime(
PI_last, saf_num, experimenters=[], wavelength=None, test=False
):
"""function for start a beamtime"""
# check status first
active_beamtime = glbl.get('_active_beamtime')
if active_beamtime is False:
raise xpdAcqError("It appears that end_beamtime may have been "
"run.\nIf you wish to start a new beamtime, "
"please open a new terminal and proceed "
"with the standard starting sequence.")
# check directory
home_dir = glbl_dict['home']
if not os.path.exists(home_dir):
raise RuntimeError(
"WARNING: fundamental directory {} does not "
"exist.\nPlease contact beamline staff immediately".format(
home_dir
)
)
f_list = os.listdir(home_dir)
if len(f_list) != 0:
raise FileExistsError(
"There are more than one files/directories:\n"
"{}\n"
"under {}.\n"
"have you run '_end_beamtime()' yet?".format(f_list, home_dir)
)
elif len(f_list) == 0:
_make_clean_env()
print("INFO: initiated requried directories for experiment")
bt = Beamtime(PI_last, saf_num, experimenters, wavelength=wavelength)
os.chdir(home_dir)
print(
"INFO: to link newly created beamtime object to xrun, "
"please do\n"
">>> xrun.beamtime = bt"
)
# copy default Ni24.D to xpdUser/user_analysis
src = os.path.join(DATA_DIR, "Ni24.D")
dst = os.path.join(glbl_dict["usrAnalysis_dir"], "Ni24.D")
shutil.copy(src, dst)
_load_beamline_config(
glbl["blconfig_path"], test=test
)
# pre-populated scan plan
for expo in EXPO_LIST:
ScanPlan(bt, ct, expo)
# inject beamtime state
glbl['_active_beamtime'] = True
return bt
| 5,337,128
|
def logger(module_name: str):
"""Инициализация и конфигурирования логгера"""
logging.basicConfig(
level=logging.INFO,
format='[%(levelname)s][%(asctime)s] %(name)s: %(message)s'
)
return logging.getLogger(module_name)
| 5,337,129
|
def cast(op_name: str, expr: Expr, in_xlayers: List[XLayer]) -> XLayer:
"""
Conversion of Relay 'clip' layer
Relay
-----
Type: tvm.relay.op.clip
Ref: https://docs.tvm.ai/langref/relay_op.html
Parameters:
- a (relay.Expr)
The input tensor.
- a_min (float)
The clip minimum.
- a_max (float)
The clip maximum.
"""
a_min = float(expr.attrs.a_min)
a_max = float(expr.attrs.a_max)
logger.debug("clip: {}".format(op_name))
X = px.ops.clip(op_name, in_xlayers[0], a_min, a_max, relay_id=[hash(expr)])
logger.debug("-- outshape: {}".format(list(X.shapes)))
return X
| 5,337,130
|
def __generation_dec(n: int, m: int, x_min: np.array, x_max: np.array) -> np.matrix:
"""
:param n: num rows in returned matrix
:param m: num cols in returned matrix
:param x_min: float array, min possible nums in cols of returned matrix
:param x_max: float array, max possible nums in cols of returned matrix
:return: n times m float matrix with nums in col number i in [x_min[i], x_max[i])
"""
assert n > 0, "n should be positive"
assert m > 0, "m should be positive"
assert x_min.shape == (m, ), "x_min should be of shape (m, )"
assert x_max.shape == (m, ), "x_max should be of shape (m, )"
return np.random.uniform(low=x_min, high=x_max, size=(n, m))
| 5,337,131
|
def create_queue(domains, main_domains):
"""
创建首次探测的任务队列
"""
for i, d in enumerate(domains):
queue.put((d,main_domains[i]))
| 5,337,132
|
def apply_tariff(kwh, hour):
"""Calculates cost of electricity for given hour."""
if 0 <= hour < 7:
rate = 12
elif 7 <= hour < 17:
rate = 20
elif 17 <= hour < 24:
rate = 28
else:
raise ValueError(f'Invalid hour: {hour}')
return rate * kwh
| 5,337,133
|
def _compound_smiles(compound: reaction_pb2.Compound) -> str:
"""Returns the compound SMILES, if defined."""
for identifier in compound.identifiers:
if identifier.type == identifier.SMILES:
return identifier.value
return ""
| 5,337,134
|
def convert_pdf_to_txt(pdf, startpage=None):
"""Convert a pdf file to text and return the text.
This method requires pdftotext to be installed.
Parameters
----------
pdf : str
path to pdf file
startpage : int, optional
the first page we try to convert
Returns
-------
str
the converted text
"""
if startpage is not None:
startpageargs = ['-f', str(startpage)]
else:
startpageargs = []
stdout = subprocess.Popen(["pdftotext", "-q"] + startpageargs + [pdf, "-"],
stdout=subprocess.PIPE).communicate()[0]
# python2 and 3
if not isinstance(stdout, str):
stdout = stdout.decode()
return stdout
| 5,337,135
|
def cross_correlizer(sample_rate, max_itd, max_frequency):
"""
Convenience function for creating a CrossCorrelizer with appropriate
parameters.
sample_rate : the sample rate of the wav files to expect.
max_itd : the maximum interaural time difference to test.
max_frequency : the highest frequency to test.
"""
shift_max = int(np.ceil(max_itd * sample_rate))
shift_steps = int(float(sample_rate) / max_frequency / 2.)
return CrossCorrelizer(sample_rate, shift_max, shift_steps)
| 5,337,136
|
def log_angle_distributions(args, pred_ang, src_seq):
""" Logs a histogram of predicted angles to wandb. """
# Remove batch-level masking
batch_mask = src_seq.ne(VOCAB.pad_id)
pred_ang = pred_ang[batch_mask]
inv_ang = inverse_trig_transform(pred_ang.view(1, pred_ang.shape[0], -1)).cpu().detach().numpy()
pred_ang = pred_ang.cpu().detach().numpy()
wandb.log({"Predicted Angles (sin cos)": wandb.Histogram(np_histogram=np.histogram(pred_ang)),
"Predicted Angles (radians)": wandb.Histogram(np_histogram=np.histogram(inv_ang))}, commit=False)
for sincos_idx in range(pred_ang.shape[-1]):
wandb.log({f"Predicted Angles (sin cos) - {sincos_idx:02}":
wandb.Histogram(np_histogram=np.histogram(pred_ang[:,sincos_idx]))}, commit=False)
for rad_idx in range(inv_ang.shape[-1]):
wandb.log({f"Predicted Angles (radians) - {rad_idx:02}":
wandb.Histogram(np_histogram=np.histogram(inv_ang[0,:,rad_idx]))}, commit=False)
| 5,337,137
|
def save_token(token: str) -> None:
"""Store the token in the cache."""
cachedir = Path(platformdirs.user_cache_dir(APP_NAME))
tokencache = cachedir / "token"
tokencache.parent.mkdir(exist_ok=True)
tokencache.write_text(token)
| 5,337,138
|
def batchmark(avatar_list_path: str, getchu_data_path: str, list_output_path: str, avatar_output_path: str) -> None:
"""
:return:
"""
years = utils.get_release_years_hot_map(avatar_list_path, getchu_data_path)
with open(avatar_list_path) as fin:
avatar_path = fin.readlines()
avatar_path = list(map(lambda each: each.split(' '), avatar_path))
avatar_path = list(map(lambda each: [each[0], each[-1].strip('\n')], avatar_path))
fz = len(str(len(avatar_path)))
new_id = 0
with open(list_output_path, 'w') as fout:
for each in avatar_path:
file_path = each[-1][3:]
id = each[0]
img = Image.open(file_path)
feat = get_main_tag(img)
if len(feat) == 0:
continue
feat = reduce(lambda x, y: x + ';' + y, feat)
save_path = os.path.join(avatar_output_path, str(new_id).zfill(fz)+'.jpg')
print('{},{},{},{}'.format(str(new_id).zfill(fz), years[int(id)], feat, save_path))
fout.write('{},{},{},{}\n'.format(str(new_id).zfill(fz), years[int(id)], feat, save_path))
shutil.copyfile(file_path, save_path)
new_id += 1
| 5,337,139
|
def close_sessions(tsm: SMContext):
"""
Resets and Closes all the NI-SCOPE instruments sessions from the pinmap file associated
with the Semiconductor Module Context.
Args:
tsm (SMContext): TestStand semiconductor module context
"""
sessions = tsm.get_all_niscope_sessions()
for session in sessions:
session.reset()
session.close()
| 5,337,140
|
def plot_precision_recall_at_k(
predicate_df, idx_flip, max_k=100, give_random=True, give_ensemble=True
):
"""
Plots precision/recall at `k` values for flipped label experiments.
Returns an interactive altair visualisation. Make sure it is installed beforehand.
Arguments:
predicate_df: the dataframe with predicates from `ensemble.get_predicates`
idx_flip: array that indicates if labels are wrong
max_k: the maximum value for `k` to consider
give_random: plot the "at k" statistics for the randomly selected lower bound
give_ensemble: plot the "at k" statistics from the reason ensemble
"""
import altair as alt
alt.data_transformers.disable_max_rows()
# We combine the results in dataframes
plot_df = calculate_precision_recall_at_k(
predicate_df=predicate_df,
idx_flip=idx_flip,
max_k=max_k,
give_random=give_random,
give_ensemble=give_ensemble,
)
# So that we may plot it.
return (
alt.Chart(plot_df)
.mark_line()
.encode(x="k", y="value", color="variable", strokeDash="setting")
.interactive()
)
| 5,337,141
|
def has_permissions(
permissions: int, required: List[Union[int, BasePermission]]
) -> bool:
"""Returns `True` if `permissions` has all required permissions"""
if permissions & Administrator().value:
return True
all_perms = 0
for perm in required:
if isinstance(perm, int):
all_perms |= perm
else:
all_perms |= perm.value
return permissions & all_perms == all_perms
| 5,337,142
|
def input_to_stations(inout_folder,file_in,file_out,forced_stations=[],v=0):
"""
Convert DiFX/.input into CorrelX/stations.ini.
Parameters
----------
inout_folder : str
path to folder containing .input file, and where newly created stations.ini file will be placed.
file_in : str
.input filename.
file_out : str
stations.ini filename.
forced_files : list of str,optional
list of str with station names (for overriding values from .input).
v : int
verbose if 1.
Returns
-------
None
"""
clock_line = ""
lines_out=[]
not_first=0
full_input_file=inout_folder+"/"+file_in
full_output_file=inout_folder+"/"+file_out
summary_file=inout_folder+"/"+file_out+"_report"
if forced_stations!=[]:
print(" Forcing station names: "+','.join(list(map(str,set(forced_stations)))))
print(" Processing "+full_input_file+" ...")
# ### --- Process .input and prepare stations.ini ---
with open(full_input_file, 'r') as f:
lines = f.readlines()
for line in lines:
if C_DIFX_INPUT_TELESCOPE_NAME in line: # Station name
st_id = get_last_num(line)
st_name = get_value_im(line)
if forced_stations!=[]:
st_name=forced_stations[int(st_id)]
if not_first:
lines_out.append("")
lines_out.append(INI_HF+st_name+INI_HL)
lines_out.append(C_INI_ST_ID+INI_SEP+st_id)
not_first=1
elif C_DIFX_INPUT_CLOCK_REF_MJD in line: # Station clock epoch
clock_ref = get_value_im(line)
lines_out.append(C_INI_ST_CLOCK_REF+INI_SEP+clock_ref)
elif C_DIFX_INPUT_CLOCK_COEFF in line: # Station clock polynomial
[st,order,value]=get_coeff(line)
if order=="0":
value0=value
else:
clock_line = C_INI_ST_CLOCK_POLY+INI_SEP+value0+INI_VEC+value
lines_out.append(clock_line)
write_lines_to_f(lines_out,full_output_file)
if v==1:
print("File contents:")
for ii in lines_out:
print(" "+ii)
print("Done.")
| 5,337,143
|
def mat_to_xyz(mat: NDArrayFloat) -> NDArrayFloat:
"""Convert a 3D rotation matrix to a sequence of _extrinsic_ rotations.
In other words, 3D rotation matrix and returns a sequence of Tait-Bryan angles
representing the transformation.
Reference: https://en.wikipedia.org/wiki/Euler_angles#Rotation_matrix
Reference: https://en.wikipedia.org/wiki/Euler_angles#Tait%E2%80%93Bryan_angles_2
Args:
mat: (...,3,3) Rotation matrix.
Returns:
(...,3) Tait-Bryan angles (in radians) formulated for a sequence of extrinsic rotations.
"""
xyz_rad: NDArrayFloat = Rotation.from_matrix(mat).as_euler("xyz", degrees=False)
return xyz_rad
| 5,337,144
|
def get_user_gravatar(user_id):
"""
Gets link to user's gravatar from serializer.
Usage::
{% get_user_gravatar user_id %}
Examples::
{% get_user_gravatar 1 %}
{% get_user_gravatar user.id %}
"""
try:
user = User.objects.get(pk=user_id)
except User.DoesNotExist:
return static('img/anonymous.png')
if not user.email:
return static('img/anonymous.png')
url_base = 'https://www.gravatar.com/avatar/{}?d=mp'
user_hash = hashlib.md5(user.email.lower().encode('utf-8')).hexdigest()
return url_base.format(user_hash)
| 5,337,145
|
def natural_key(s):
"""Converts string ``s`` into a tuple that will sort "naturally" (i.e.,
``name5`` will come before ``name10`` and ``1`` will come before ``A``).
This function is designed to be used as the ``key`` argument to sorting
functions.
:param s: the str/unicode string to convert.
:rtype: tuple
"""
# Use _nkre to split the input string into a sequence of
# digit runs and non-digit runs. Then use _nkconv() to convert
# the digit runs into ints and the non-digit runs to lowercase.
return tuple(_nkconv(m) for m in _nkre.findall(s))
| 5,337,146
|
def read_ValidationSets_Sources():
"""Read and return ValidationSets_Sources.csv file"""
df = pd.read_csv(data_dir + 'ValidationSets_Sources.csv',header=0,
dtype={"Year":"str"})
return df
| 5,337,147
|
def igcd_lehmer(a, b):
"""Computes greatest common divisor of two integers.
Euclid's algorithm for the computation of the greatest
common divisor gcd(a, b) of two (positive) integers
a and b is based on the division identity
a = q*b + r,
where the quotient q and the remainder r are integers
and 0 <= r < b. Then each common divisor of a and b
divides r, and it follows that gcd(a, b) == gcd(b, r).
The algorithm works by constructing the sequence
r0, r1, r2, ..., where r0 = a, r1 = b, and each rn
is the remainder from the division of the two preceding
elements.
In Python, q = a // b and r = a % b are obtained by the
floor division and the remainder operations, respectively.
These are the most expensive arithmetic operations, especially
for large a and b.
Lehmer's algorithm is based on the observation that the quotients
qn = r(n-1) // rn are in general small integers even
when a and b are very large. Hence the quotients can be
usually determined from a relatively small number of most
significant bits.
The efficiency of the algorithm is further enhanced by not
computing each long remainder in Euclid's sequence. The remainders
are linear combinations of a and b with integer coefficients
derived from the quotients. The coefficients can be computed
as far as the quotients can be determined from the chosen
most significant parts of a and b. Only then a new pair of
consecutive remainders is computed and the algorithm starts
anew with this pair.
References
==========
.. [1] https://en.wikipedia.org/wiki/Lehmer%27s_GCD_algorithm
"""
a, b = abs(as_int(a)), abs(as_int(b))
if a < b:
a, b = b, a
# The algorithm works by using one or two digit division
# whenever possible. The outer loop will replace the
# pair (a, b) with a pair of shorter consecutive elements
# of the Euclidean gcd sequence until a and b
# fit into two Python (long) int digits.
nbits = 2*int_info.bits_per_digit
while a.bit_length() > nbits and b != 0:
# Quotients are mostly small integers that can
# be determined from most significant bits.
n = a.bit_length() - nbits
x, y = int(a >> n), int(b >> n) # most significant bits
# Elements of the Euclidean gcd sequence are linear
# combinations of a and b with integer coefficients.
# Compute the coefficients of consecutive pairs
# a' = A*a + B*b, b' = C*a + D*b
# using small integer arithmetic as far as possible.
A, B, C, D = 1, 0, 0, 1 # initial values
while True:
# The coefficients alternate in sign while looping.
# The inner loop combines two steps to keep track
# of the signs.
# At this point we have
# A > 0, B <= 0, C <= 0, D > 0,
# x' = x + B <= x < x" = x + A,
# y' = y + C <= y < y" = y + D,
# and
# x'*N <= a' < x"*N, y'*N <= b' < y"*N,
# where N = 2**n.
# Now, if y' > 0, and x"//y' and x'//y" agree,
# then their common value is equal to q = a'//b'.
# In addition,
# x'%y" = x' - q*y" < x" - q*y' = x"%y',
# and
# (x'%y")*N < a'%b' < (x"%y')*N.
# On the other hand, we also have x//y == q,
# and therefore
# x'%y" = x + B - q*(y + D) = x%y + B',
# x"%y' = x + A - q*(y + C) = x%y + A',
# where
# B' = B - q*D < 0, A' = A - q*C > 0.
if y + C <= 0:
break
q = (x + A) // (y + C)
# Now x'//y" <= q, and equality holds if
# x' - q*y" = (x - q*y) + (B - q*D) >= 0.
# This is a minor optimization to avoid division.
x_qy, B_qD = x - q*y, B - q*D
if x_qy + B_qD < 0:
break
# Next step in the Euclidean sequence.
x, y = y, x_qy
A, B, C, D = C, D, A - q*C, B_qD
# At this point the signs of the coefficients
# change and their roles are interchanged.
# A <= 0, B > 0, C > 0, D < 0,
# x' = x + A <= x < x" = x + B,
# y' = y + D < y < y" = y + C.
if y + D <= 0:
break
q = (x + B) // (y + D)
x_qy, A_qC = x - q*y, A - q*C
if x_qy + A_qC < 0:
break
x, y = y, x_qy
A, B, C, D = C, D, A_qC, B - q*D
# Now the conditions on top of the loop
# are again satisfied.
# A > 0, B < 0, C < 0, D > 0.
if B == 0:
# This can only happen when y == 0 in the beginning
# and the inner loop does nothing.
# Long division is forced.
a, b = b, a % b
continue
# Compute new long arguments using the coefficients.
a, b = A*a + B*b, C*a + D*b
# Small divisors. Finish with the standard algorithm.
while b:
a, b = b, a % b
return a
| 5,337,148
|
def init(start):
"""Initializes n."""
global n
n = start
print "Input is", n
| 5,337,149
|
def delete_records(connection):
"""Delete comment record."""
execute_query(connection, delete_comment)
| 5,337,150
|
def regular_polygon_area_equivalent_radius(n, radius=1.0):
""" Compute equivalent radius to obtain same surface as circle.
\theta = \frac{2 \pi}{n}
r_{eqs} = \sqrt{\frac{\theta r^2}{\sin{\theta}}}
:param radius: circle radius
:param n: number of regular polygon segments
:return: equivalent regular polynom surface
"""
theta = 2 * np.pi / n
r = np.sqrt((theta * radius ** 2) / np.sin(theta))
return r
| 5,337,151
|
def get_dashboard_list(project_id=None, page=1, page_size=25, token_info=None, user=None):
"""Get a list of dashboards
:param project_id: Filter dashboards by project ID
:type project_id: str
:param user_id: Filter dashboards by user ID
:type user_id: str
:param limit: Limit the dashboards
:type limit: int
:param offset: Offset the dashboards
:type offset: int
:rtype: DashboardList
"""
query = Dashboard.query
project = None
if "project_id" in connexion.request.args:
project = Project.query.get(connexion.request.args["project_id"])
if project:
if not project_has_user(project, user):
return "Forbidden", 403
query = query.filter(Dashboard.project_id == project_id)
offset = (page * page_size) - page_size
total_items = query.count()
total_pages = (total_items // page_size) + (1 if total_items % page_size > 0 else 0)
dashboards = query.offset(offset).limit(page_size).all()
return {
"dashboards": [dashboard.to_dict() for dashboard in dashboards],
"pagination": {
"page": page,
"pageSize": page_size,
"totalItems": total_items,
"totalPages": total_pages,
},
}
| 5,337,152
|
def test_non_iterable_relations():
"""Test that ValueError is raised if link data has a non-iterable object for relations.
1. Create a link parser for a dictionary with non-iterable relations.
2. Try to call parse_relations method.
3. Check that ValueError is raised.
4. Check the error message.
"""
with pytest.raises(ValueError) as error_info:
LinkParser(data={"rel": None}).parse_relations()
assert error_info.value.args[0] == "Failed to iterate over relations from link data", (
"Wrong error"
)
| 5,337,153
|
def get_download_link(url, quality_type=2, get_dlink_only=True, is_merge=False, is_remain=True):
"""
获取视频链接
:param url: 源地址
:param quality_type:分辨率类型(1: lowChapters 2: chapters 3: chapters2 4: chapters3 5: chapters4)
:param get_dlink_only: 是否仅获取链接
:param is_merge: 是否合并分段视频
:param is_remain: 是否保留临时目录
:return:
"""
pid = get_pid_by_url(url)
if not pid:
return
target_url = const.API_URL + '?pid=' + pid
data = json.loads(get_html(target_url, const.USER_AGENT, const.REFER_URL))
result = list()
temp_list = list()
if data['ack'] == 'no':
return
title = data['title']
video = data['video']
valid_chapter_num = video['validChapterNum']
chapters = [x for x in video.keys() if 'hapters' in x]
chapters[1:] = sorted(chapters[1:])
if quality_type < 1:
quality_type = 1
if quality_type > valid_chapter_num:
quality_type = valid_chapter_num
video_list = video[chapters[quality_type - 1]]
for x in video_list:
url = x['url']
if isinstance(url, unicode):
url = url.encode('utf8')
result.append(url)
temp_list.append('file\t' + '\'' + url.split('/')[-1] + '\'')
if not result:
return
save_to_file(result, title + '.txt', const.BASE_VIDEO_DIR)
save_to_file(temp_list, const.TMP_FILE, const.TMP_DIR)
if not get_dlink_only:
ext = r1(r'\.([^.]+)$', result[0])
assert ext in ('flv', 'mp4')
download_videos(title + '.%s' % ext, dlinks=result, is_merge=is_merge, is_remain=is_remain)
| 5,337,154
|
def create_document(Content=None, Requires=None, Attachments=None, Name=None, VersionName=None, DocumentType=None, DocumentFormat=None, TargetType=None, Tags=None):
"""
Creates a Systems Manager (SSM) document. An SSM document defines the actions that Systems Manager performs on your managed instances. For more information about SSM documents, including information about supported schemas, features, and syntax, see AWS Systems Manager Documents in the AWS Systems Manager User Guide .
See also: AWS API Documentation
Exceptions
:example: response = client.create_document(
Content='string',
Requires=[
{
'Name': 'string',
'Version': 'string'
},
],
Attachments=[
{
'Key': 'SourceUrl'|'S3FileUrl'|'AttachmentReference',
'Values': [
'string',
],
'Name': 'string'
},
],
Name='string',
VersionName='string',
DocumentType='Command'|'Policy'|'Automation'|'Session'|'Package'|'ApplicationConfiguration'|'ApplicationConfigurationSchema'|'DeploymentStrategy'|'ChangeCalendar',
DocumentFormat='YAML'|'JSON'|'TEXT',
TargetType='string',
Tags=[
{
'Key': 'string',
'Value': 'string'
},
]
)
:type Content: string
:param Content: [REQUIRED]\nThe content for the new SSM document in JSON or YAML format. We recommend storing the contents for your new document in an external JSON or YAML file and referencing the file in a command.\nFor examples, see the following topics in the AWS Systems Manager User Guide .\n\nCreate an SSM document (AWS API)\nCreate an SSM document (AWS CLI)\nCreate an SSM document (API)\n\n
:type Requires: list
:param Requires: A list of SSM documents required by a document. This parameter is used exclusively by AWS AppConfig. When a user creates an AppConfig configuration in an SSM document, the user must also specify a required document for validation purposes. In this case, an ApplicationConfiguration document requires an ApplicationConfigurationSchema document for validation purposes. For more information, see AWS AppConfig in the AWS Systems Manager User Guide .\n\n(dict) --An SSM document required by the current document.\n\nName (string) -- [REQUIRED]The name of the required SSM document. The name can be an Amazon Resource Name (ARN).\n\nVersion (string) --The document version required by the current document.\n\n\n\n\n
:type Attachments: list
:param Attachments: A list of key and value pairs that describe attachments to a version of a document.\n\n(dict) --Identifying information about a document attachment, including the file name and a key-value pair that identifies the location of an attachment to a document.\n\nKey (string) --The key of a key-value pair that identifies the location of an attachment to a document.\n\nValues (list) --The value of a key-value pair that identifies the location of an attachment to a document. The format for Value depends on the type of key you specify.\n\nFor the key SourceUrl , the value is an S3 bucket location. For example: 'Values': [ 's3://my-bucket/my-folder' ]\nFor the key S3FileUrl , the value is a file in an S3 bucket. For example: 'Values': [ 's3://my-bucket/my-folder/my-file.py' ]\nFor the key AttachmentReference , the value is constructed from the name of another SSM document in your account, a version number of that document, and a file attached to that document version that you want to reuse. For example: 'Values': [ 'MyOtherDocument/3/my-other-file.py' ] However, if the SSM document is shared with you from another account, the full SSM document ARN must be specified instead of the document name only. For example: 'Values': [ 'arn:aws:ssm:us-east-2:111122223333:document/OtherAccountDocument/3/their-file.py' ]\n\n\n(string) --\n\n\nName (string) --The name of the document attachment file.\n\n\n\n\n
:type Name: string
:param Name: [REQUIRED]\nA name for the Systems Manager document.\n\nWarning\nYou can\'t use the following strings as document name prefixes. These are reserved by AWS for use as document name prefixes:\n\naws-\namazon\namzn\n\n\n
:type VersionName: string
:param VersionName: An optional field specifying the version of the artifact you are creating with the document. For example, 'Release 12, Update 6'. This value is unique across all versions of a document, and cannot be changed.
:type DocumentType: string
:param DocumentType: The type of document to create.
:type DocumentFormat: string
:param DocumentFormat: Specify the document format for the request. The document format can be JSON, YAML, or TEXT. JSON is the default format.
:type TargetType: string
:param TargetType: Specify a target type to define the kinds of resources the document can run on. For example, to run a document on EC2 instances, specify the following value: /AWS::EC2::Instance. If you specify a value of \'/\' the document can run on all types of resources. If you don\'t specify a value, the document can\'t run on any resources. For a list of valid resource types, see AWS resource and property types reference in the AWS CloudFormation User Guide .
:type Tags: list
:param Tags: Optional metadata that you assign to a resource. Tags enable you to categorize a resource in different ways, such as by purpose, owner, or environment. For example, you might want to tag an SSM document to identify the types of targets or the environment where it will run. In this case, you could specify the following key name/value pairs:\n\nKey=OS,Value=Windows\nKey=Environment,Value=Production\n\n\nNote\nTo add tags to an existing SSM document, use the AddTagsToResource action.\n\n\n(dict) --Metadata that you assign to your AWS resources. Tags enable you to categorize your resources in different ways, for example, by purpose, owner, or environment. In Systems Manager, you can apply tags to documents, managed instances, maintenance windows, Parameter Store parameters, and patch baselines.\n\nKey (string) -- [REQUIRED]The name of the tag.\n\nValue (string) -- [REQUIRED]The value of the tag.\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'DocumentDescription': {
'Sha1': 'string',
'Hash': 'string',
'HashType': 'Sha256'|'Sha1',
'Name': 'string',
'VersionName': 'string',
'Owner': 'string',
'CreatedDate': datetime(2015, 1, 1),
'Status': 'Creating'|'Active'|'Updating'|'Deleting'|'Failed',
'StatusInformation': 'string',
'DocumentVersion': 'string',
'Description': 'string',
'Parameters': [
{
'Name': 'string',
'Type': 'String'|'StringList',
'Description': 'string',
'DefaultValue': 'string'
},
],
'PlatformTypes': [
'Windows'|'Linux',
],
'DocumentType': 'Command'|'Policy'|'Automation'|'Session'|'Package'|'ApplicationConfiguration'|'ApplicationConfigurationSchema'|'DeploymentStrategy'|'ChangeCalendar',
'SchemaVersion': 'string',
'LatestVersion': 'string',
'DefaultVersion': 'string',
'DocumentFormat': 'YAML'|'JSON'|'TEXT',
'TargetType': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'AttachmentsInformation': [
{
'Name': 'string'
},
],
'Requires': [
{
'Name': 'string',
'Version': 'string'
},
]
}
}
Response Structure
(dict) --
DocumentDescription (dict) --
Information about the Systems Manager document.
Sha1 (string) --
The SHA1 hash of the document, which you can use for verification.
Hash (string) --
The Sha256 or Sha1 hash created by the system when the document was created.
Note
Sha1 hashes have been deprecated.
HashType (string) --
The hash type of the document. Valid values include Sha256 or Sha1 .
Note
Sha1 hashes have been deprecated.
Name (string) --
The name of the Systems Manager document.
VersionName (string) --
The version of the artifact associated with the document.
Owner (string) --
The AWS user account that created the document.
CreatedDate (datetime) --
The date when the document was created.
Status (string) --
The status of the Systems Manager document.
StatusInformation (string) --
A message returned by AWS Systems Manager that explains the Status value. For example, a Failed status might be explained by the StatusInformation message, "The specified S3 bucket does not exist. Verify that the URL of the S3 bucket is correct."
DocumentVersion (string) --
The document version.
Description (string) --
A description of the document.
Parameters (list) --
A description of the parameters for a document.
(dict) --
Parameters specified in a System Manager document that run on the server when the command is run.
Name (string) --
The name of the parameter.
Type (string) --
The type of parameter. The type can be either String or StringList.
Description (string) --
A description of what the parameter does, how to use it, the default value, and whether or not the parameter is optional.
DefaultValue (string) --
If specified, the default values for the parameters. Parameters without a default value are required. Parameters with a default value are optional.
PlatformTypes (list) --
The list of OS platforms compatible with this Systems Manager document.
(string) --
DocumentType (string) --
The type of document.
SchemaVersion (string) --
The schema version.
LatestVersion (string) --
The latest version of the document.
DefaultVersion (string) --
The default version.
DocumentFormat (string) --
The document format, either JSON or YAML.
TargetType (string) --
The target type which defines the kinds of resources the document can run on. For example, /AWS::EC2::Instance. For a list of valid resource types, see AWS resource and property types reference in the AWS CloudFormation User Guide .
Tags (list) --
The tags, or metadata, that have been applied to the document.
(dict) --
Metadata that you assign to your AWS resources. Tags enable you to categorize your resources in different ways, for example, by purpose, owner, or environment. In Systems Manager, you can apply tags to documents, managed instances, maintenance windows, Parameter Store parameters, and patch baselines.
Key (string) --
The name of the tag.
Value (string) --
The value of the tag.
AttachmentsInformation (list) --
Details about the document attachments, including names, locations, sizes, and so on.
(dict) --
An attribute of an attachment, such as the attachment name.
Name (string) --
The name of the attachment.
Requires (list) --
A list of SSM documents required by a document. For example, an ApplicationConfiguration document requires an ApplicationConfigurationSchema document.
(dict) --
An SSM document required by the current document.
Name (string) --
The name of the required SSM document. The name can be an Amazon Resource Name (ARN).
Version (string) --
The document version required by the current document.
Exceptions
SSM.Client.exceptions.DocumentAlreadyExists
SSM.Client.exceptions.MaxDocumentSizeExceeded
SSM.Client.exceptions.InternalServerError
SSM.Client.exceptions.InvalidDocumentContent
SSM.Client.exceptions.DocumentLimitExceeded
SSM.Client.exceptions.InvalidDocumentSchemaVersion
:return: {
'DocumentDescription': {
'Sha1': 'string',
'Hash': 'string',
'HashType': 'Sha256'|'Sha1',
'Name': 'string',
'VersionName': 'string',
'Owner': 'string',
'CreatedDate': datetime(2015, 1, 1),
'Status': 'Creating'|'Active'|'Updating'|'Deleting'|'Failed',
'StatusInformation': 'string',
'DocumentVersion': 'string',
'Description': 'string',
'Parameters': [
{
'Name': 'string',
'Type': 'String'|'StringList',
'Description': 'string',
'DefaultValue': 'string'
},
],
'PlatformTypes': [
'Windows'|'Linux',
],
'DocumentType': 'Command'|'Policy'|'Automation'|'Session'|'Package'|'ApplicationConfiguration'|'ApplicationConfigurationSchema'|'DeploymentStrategy'|'ChangeCalendar',
'SchemaVersion': 'string',
'LatestVersion': 'string',
'DefaultVersion': 'string',
'DocumentFormat': 'YAML'|'JSON'|'TEXT',
'TargetType': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'AttachmentsInformation': [
{
'Name': 'string'
},
],
'Requires': [
{
'Name': 'string',
'Version': 'string'
},
]
}
}
:returns:
(string) --
"""
pass
| 5,337,155
|
def set(launcher, command):
"""Set a launcher"""
config.launchers.writable[launcher] = command
config.launchers.write()
| 5,337,156
|
def _ssim(X, Y, filter, K=(0.01, 0.03)):
""" Calculate ssim index for X and Y"""
K1, K2 = K
# batch, channel, [depth,] height, width = X.shape
C1 = K1 ** 2
C2 = K2 ** 2
filter = filter.to(X.device, dtype=X.dtype)
mu_x = gaussian_filter(X, filter)
mu_y = gaussian_filter(Y, filter)
mu_x_sq = mu_x.pow(2)
mu_y_sq = mu_y.pow(2)
mu_x_mu_y = mu_x * mu_y
sigma_x_sq = (gaussian_filter(X * X, filter) - mu_x_sq)
sigma_y_sq = (gaussian_filter(Y * Y, filter) - mu_y_sq)
sigma_xy = (gaussian_filter(X * Y, filter) - mu_x_mu_y)
cs_map = (2 * sigma_xy + C2) / (sigma_x_sq + sigma_y_sq + C2) # set alpha=beta=gamma=1
ssim_map = ((2 * mu_x_mu_y + C1) / (mu_x_sq + mu_y_sq + C1))
ssim_map *= cs_map
ssim_per_channel = torch.flatten(ssim_map, 2).mean(-1)
cs = torch.flatten(cs_map, 2).mean(-1)
return ssim_per_channel, cs
| 5,337,157
|
def get_all_messages(notification_queue, **kwargs):
"""
Get all messages on the specified notification queue
Variables:
complete_queue => Queue to get the message from
Arguments:
None
Data Block:
None
Result example:
[] # List of messages
"""
resp_list = []
u = NamedQueue("nq-%s" % notification_queue,
host=config.core.redis.persistent.host,
port=config.core.redis.persistent.port,
db=config.core.redis.persistent.db)
while True:
msg = u.pop(blocking=False)
if msg is None:
break
resp_list.append(msg)
return make_api_response(resp_list)
| 5,337,158
|
def ssa_reconstruct(pc, v, k):
"""
from Vimal
Series reconstruction for given SSA decomposition using vector of components
:param pc: matrix with the principal components from SSA
:param v: matrix of the singular vectors from SSA
:param k: vector with the indices of the components to be reconstructed
:return: the reconstructed time series
"""
if np.isscalar(k):
k = [k]
if pc.ndim != 2:
raise ValueError('pc must be a 2-dimensional matrix')
if v.ndim != 2:
raise ValueError('v must be a 2-dimensional matrix')
t, dim = pc.shape
n_points = t + (dim - 1)
if any(filter(lambda x: dim < x or x < 0, k)):
raise ValueError('k must be vector of indexes from range 0..%d' % dim)
pc_comp = np.asarray(np.matrix(pc[:, k]) * np.matrix(v[:, k]).T)
xr = np.zeros(n_points)
times = np.zeros(n_points)
# reconstruction loop
for i in range(dim):
xr[i: t + i] = xr[i: t + i] + pc_comp[:, i]
times[i: t + i] = times[i: t + i] + 1
xr = (xr / times) * np.sqrt(t)
return xr
| 5,337,159
|
def loadImtoolrc(imtoolrc=None):
"""
Locates, then reads in IMTOOLRC configuration file from
system or user-specified location, and returns the
dictionary for reference.
"""
# Find the IMTOOLRC file. Except as noted below, this order
# matches what ximtool and ds9 use.
_home = os.getenv("HOME")
# Look for path to directory where this module is installed
# This will be last-resort location for IMTOOLRC that was
# distributed with this module.
_module_path = os.path.split(__file__)[0]
####
# list of file names to look for; ok to have None to skip an entry
_name_list = []
# There are two environment variables that might set the location
# of imtoolrc:
# getenv('imtoolrc')
_name_list.append(os.getenv(_default_imtoolrc_env[0]))
# getenv('IMTOOLRC')
_name_list.append(os.getenv(_default_imtoolrc_env[1]))
# ~/.imtoolrc
if 'HOME' in os.environ :
_name_list.append( os.path.join(os.environ['HOME'], ".imtoolrc") )
_name_list.append(sys.prefix+os.sep+_default_local_imtoolrc)
# /usr/local/lib/imtoolrc
_name_list.append(_default_system_imtoolrc)
# $iraf/dev/imtoolrc - this is not in ds9 or NOAO's ximtool,
# but it is in the AURA Unified Release ximtool. This is the
# one place on your system where you can be certain that
# imtoolrc is really there. Eventually, we will make a patch
# to add this to ds9 and to IRAF.
if 'iraf' in os.environ :
_name_list.append( os.path.join( os.environ['iraf'], 'dev', 'imtoolrc') )
# special to numdisplay: use imtoolrc that is in the package directory.
# Basically, this is our way of having a built-in default table.
_name_list.append(_module_path+os.sep+'imtoolrc')
####
# Search all possible IMTOOLRC names in list
# and open the first one found...
for name in _name_list:
try:
if name:
_fdin = open(name)
break
except OSError as error:
pass
#Parse the file, line by line and populate the dictionary
_lines = _fdin.readlines()
_fdin.close()
# Build a dictionary for the entire IMTOOL table
# It will be indexed by configno.
fbdict = {}
for line in _lines:
# Strip out any blanks/tabs, Python 3 compat
line = line.strip()
# Ignore empty lines
if len(line) > 1:
_lsp = line.split()
# Also, ignore comment lines starting with '#'
if _lsp[0] != '#':
configno = int(_lsp[0])
_dict = {'nframes':int(_lsp[1]),'width':int(_lsp[2]),'height':int(_lsp[3]),'name':_lsp[5]}
fbdict[configno] = _dict
return fbdict
| 5,337,160
|
def get_theo_joints_pm(W, b, beta):
"""calculate the theoretical state distribution for a Boltzmann
machine
"""
N = len(b)
joints = []
states = get_states(N)
for s in states:
joints.append(np.exp(-1. * get_energy(W, b, (2. * s - 1.), beta)))
joints /= np.sum(joints)
return joints
| 5,337,161
|
def tail(the_file: BinaryIO, lines_2find: int = 20) -> list[bytes]:
"""
From http://stackoverflow.com/questions/136168/get-last-n-lines-of-a-file-with-python-similar-to-tail
"""
lines_found: int = 0
total_bytes_scanned: int = 0
the_file.seek(0, 2)
bytes_in_file: int = the_file.tell()
while lines_2find + 1 > lines_found and bytes_in_file > total_bytes_scanned:
byte_block: int = min(1024, bytes_in_file - total_bytes_scanned)
the_file.seek(-(byte_block + total_bytes_scanned), 2)
total_bytes_scanned += byte_block
lines_found += the_file.read(1024).count(b"\n")
the_file.seek(-total_bytes_scanned, 2)
line_list: list[bytes] = list(the_file.readlines())
return line_list[-lines_2find:]
# We read at least 21 line breaks from the bottom, block by block for speed
# 21 to ensure we don't get a half line
| 5,337,162
|
def add_device_tag_command(client, args):
""" Command to add tag to an existing admin devices entry """
site, concentrator, map = get_site_params()
transmitter_id = args.get('transmitter_id')
tag = args.get('tag')
result = client.add_device_tag(site=site, concentrator=concentrator, map=map,
transmitter_id=transmitter_id, tag=tag)
if 'status' not in result:
return_error('Failed to add device tag')
return result['status'], {}, result
| 5,337,163
|
def CommaSeparatedFloats(sFloatsCSV):
"""Read comma-separated floats from string.
[sFloatsCSV]: string, contains comma-separated floats.
<retval>: list, floats parsed from string.
"""
return [float(sFloat) for sFloat in sFloatsCSV.replace(" ","").split(",")]
| 5,337,164
|
def merge_channels(image_list):
"""
Merge channels of multiple scalar ANTsImage types into one
multi-channel ANTsImage
ANTsR function: `mergeChannels`
Arguments
---------
image_list : list/tuple of ANTsImage types
scalar images to merge
Returns
-------
ANTsImage
Example
-------
>>> import ants
>>> image = ants.image_read(ants.get_ants_data('r16'), 'float')
>>> image2 = ants.image_read(ants.get_ants_data('r16'), 'float')
>>> image3 = ants.merge_channels([image,image2])
>>> image3.components == 2
"""
inpixeltype = image_list[0].pixeltype
dimension = image_list[0].dimension
components = len(image_list)
for image in image_list:
if not isinstance(image, iio.ANTsImage):
raise ValueError('list may only contain ANTsImage objects')
if image.pixeltype != inpixeltype:
raise ValueError('all images must have the same pixeltype')
libfn = utils.get_lib_fn('mergeChannels%s' % image_list[0]._libsuffix)
image_ptr = libfn([image.pointer for image in image_list])
return iio.ANTsImage(pixeltype=inpixeltype,
dimension=dimension,
components=components,
pointer=image_ptr)
| 5,337,165
|
def get_var(name: str, options: dict) -> str:
"""
Returns the value from the given dict with key 'INPUT_$key',
or if this does not exist, key 'key'.
"""
return options.get('INPUT_{}'.format(name)) or options.get(name)
| 5,337,166
|
def get_filenames(is_training, data_dir, num_files=1014):
"""Return filenames for dataset."""
if is_training:
return [
os.path.join(data_dir, "train-%05d-of-01014" % i) for i in range(num_files)
]
else:
return [
os.path.join(data_dir, "validation-%05d-of-00128" % i) for i in range(128)
]
| 5,337,167
|
def clear_reaction_role_message(message: discord.Message, emoji: typing.Union[discord.Emoji, str]):
"""
Removes the message for a reaction role
:param message: Message that has the reaction role
:param emoji: Emoji for the reaction role
"""
global guilds, raw_settings
combo_id = str(message.channel.id) + str(message.id)
guilds[message.guild]["reaction_roles"][message]["emojis"][emoji]["message"] = None
if isinstance(emoji, str):
raw_settings["guilds"][str(message.guild.id)]["reaction_roles"][combo_id]["emojis"][emoji]["message"] = None
else:
raw_settings["guilds"][str(message.guild.id)]["reaction_roles"][combo_id]["emojis"][str(emoji.id)]["message"] \
= None
save_json(os.path.join("config", "settings.json"), raw_settings)
| 5,337,168
|
def create_txt_response(name, txt_records):
"""
Returns an RRSet containing the 'txt_records' as the result of a DNS
query for 'name'.
This takes advantage of the fact that an Answer object mostly behaves
like an RRset.
"""
return dns.rrset.from_text_list(name, 60, "IN", "TXT", txt_records)
| 5,337,169
|
def test_crps_gaussian_on_test_set(supply_test_set):
"""Test CRPS on the test set for some dummy values."""
assert np.abs(crps_gaussian(*supply_test_set) - 0.59080610693) < 1e-6
| 5,337,170
|
def FatalTimeout(max_run_time):
"""ContextManager that exits the program if code is run for too long.
This implementation is fairly simple, thus multiple timeouts
cannot be active at the same time.
Additionally, if the timeout has elapsed, it'll trigger a SystemExit
exception within the invoking code, ultimately propagating that past
itself. If the underlying code tries to suppress the SystemExit, once
a minute it'll retrigger SystemExit until control is returned to this
manager.
Args:
max_run_time: a positive integer.
"""
max_run_time = int(max_run_time)
if max_run_time <= 0:
raise ValueError("max_run_time must be greater than zero")
# pylint: disable=W0613
def kill_us(sig_num, frame):
# While this SystemExit *should* crash it's way back up the
# stack to our exit handler, we do have live/production code
# that uses blanket except statements which could suppress this.
# As such, keep scheduling alarms until our exit handler runs.
# Note that there is a potential conflict via this code, and
# RunCommand's kill_timeout; thus we set the alarming interval
# fairly high.
signal.alarm(60)
raise SystemExit("Timeout occurred- waited %i seconds, failing."
% max_run_time)
original_handler = signal.signal(signal.SIGALRM, kill_us)
remaining_timeout = signal.alarm(max_run_time)
if remaining_timeout:
# Restore things to the way they were.
signal.signal(signal.SIGALRM, original_handler)
signal.alarm(remaining_timeout)
# ... and now complain. Unfortunately we can't easily detect this
# upfront, thus the reset dance above.
raise Exception("_Timeout cannot be used in parallel to other alarm "
"handling code; failing")
try:
yield
finally:
# Cancel the alarm request and restore the original handler.
signal.alarm(0)
signal.signal(signal.SIGALRM, original_handler)
| 5,337,171
|
def bit_xor(*arguments):
"""
Bitwise XOR function.
"""
return ast.BitXor(*arguments)
| 5,337,172
|
def create_compile_order_file(project_file, compile_order_file, vivado_path=None):
"""
Create compile file from Vivado project
"""
print(
"Generating Vivado project compile order into %s ..."
% abspath(compile_order_file)
)
if not exists(dirname(compile_order_file)):
makedirs(dirname(compile_order_file))
print("Extracting compile order ...")
run_vivado(
join(dirname(__file__), "tcl", "extract_compile_order.tcl"),
tcl_args=[project_file, compile_order_file],
vivado_path=vivado_path,
)
| 5,337,173
|
def test_uncollectable_incref(testdir):
"""
Test with the Yagot plugin enabled for uncollectable objects and
uncollectable object produced with increased reference count.
"""
test_code = """
import sys
import gc
import yagot
import test_leaky
def test_leak():
l1 = [1, 2]
assert gc.is_tracked(l1)
assert sys.getrefcount(l1) == 2
test_leaky.incref(l1)
assert sys.getrefcount(l1) == 3
"""
testdir.makepyfile(test_code)
result = testdir.runpytest('--yagot', '--yagot-leaks-only')
result.stdout.fnmatch_lines([
'*There were 1 uncollectable object(s) '
'caused by function test_leak.py::test_leak*',
])
assert result.ret == 1
| 5,337,174
|
def remove_r(file_new, file_old, file_save):
"""从file_new文件中去除file_old中的重复行,并将结果保存在file_save文件中。
注意文件读取时的编码转换。
"""
file_dir = _temp_file_path
file_new = os.path.join(_temp_file_path, file_new)
file_old = os.path.join(_temp_file_path, file_old)
file_save = os.path.join(_temp_file_path, file_save)
with codecs.open(file_old, 'r', 'utf-8') as f_o:
stock_id = re.findall(r'\["(\d+)"', str(f_o.read()))
with codecs.open(file_new, 'r', 'utf-8') as f_n:
for line in f_n:
stock_id_new = re.findall(r'\["(\d+)"', str(line))
if stock_id_new[0] not in stock_id:
write_stock_file(line, file_save)
else:
print('{0}行重复,已去除!\n'.format(stock_id_new[0]))
print('任务结束,去重后的数据存入{0}\n'.format(file_save))
| 5,337,175
|
def get_chromiumdir(platform, release):
"""
Args:
platform (str): a sys.platform str
Returns:
str: path to Chromium User Data Directory
http://www.chromium.org/user-experience/user-data-directory
"""
if platform == 'darwin':
chromedir = os.path.expanduser(
'~/Library/Application Support/Chromium')
elif platform.startswith('linux'):
chromedir = os.path.expanduser(
'~/.config/chromium')
elif platform == 'win32':
if release == 'XP':
chromedir = os.path.expanduser(
'~\Local Settings\Application Data\Chromium\User Data')
else:
chromedir = os.path.expanduser(
'~\AppData\Local\Chromium\User Data')
else:
raise NotImplementedError("Unknown platform: %r" % platform)
return [chromedir]
| 5,337,176
|
def make_ts_scorer(
score_func, greater_is_better=True, needs_proba=False, needs_threshold=False, **kwargs,
):
"""Make a scorer from a performance metric or loss function.
This factory function wraps scoring functions for use in `~sklearn.model_selection.GridSearchCV`
and `~sklearn.model_selection.cross_validate`. It takes a score function, such as ``accuracy_score``,
``mean_squared_error``, ``adjusted_rand_index`` or ``average_precision``
and returns a callable that scores an estimator's output.
Read more in the :ref:`User Guide <scoring>`.
Parameters
----------
score_func : callable,
Score function (or loss function) with signature
``score_func(y, y_pred, **kwargs)``.
greater_is_better : boolean
Whether score_func is a score function (default), meaning high is good,
or a loss function, meaning low is good. In the latter case, the
scorer object will sign-flip the outcome of the score_func.
needs_proba : boolean
Not yet implemented, kept only to be compatible with the scikit-learn API
needs_threshold : boolean
Not yet implemented, kept only to be compatible with the scikit-learn API
**kwargs : additional arguments
Additional parameters to be passed to score_func.
Returns
-------
callable
scorer object that returns a scalar score
"""
sign = 1 if greater_is_better else -1
if needs_proba and needs_threshold:
raise ValueError("Set either needs_proba or needs_threshold to True," " but not both.")
if needs_proba:
raise NotImplementedError("Usage/evaluation of prediction probabilities are not yet implemented.")
elif needs_threshold:
raise NotImplementedError("Evaluation of decision function output is not yet implemented.")
else:
cls = _TSPredictScorer
return cls(score_func, sign, kwargs)
| 5,337,177
|
def pfas(x):
"""Parse a JSON array of PFA expressions as a PFA abstract syntax trees.
:type x: open JSON file, JSON string, or Pythonized JSON
:param x: PFA expressions in a JSON array
:rtype: list of titus.pfaast.Expression
:return: parsed expressions as a list of abstract syntax trees
"""
return jsonToAst.exprs(x)
| 5,337,178
|
def overlap(x, y, a, b):
"""Finds the overlap of (x, y) and (a, b).
Assumes an overlap exists, i.e. y >= a and b >= x.
"""
c = clamp(x, a, b)
d = clamp(y, a, b)
return c, d
| 5,337,179
|
def topological_sort(g):
"""
Returns a list of vertices in directed acyclic graph g in topological
order.
"""
ready = []
topo = []
in_count = {}
for v in g.vertices():
in_count[v] = g.degree(v, outgoing=False)
if in_count[v] == 0: # v has no constraints, i.e no incoming edges
ready.append(v)
while len(ready) > 0:
u = ready.pop()
topo.append(u)
for e in g.incident_edges(u):
v = e.opposite(u)
in_count[v] -= 1 # v now no longer has u as a constraint
if in_count[v] == 0:
ready.append(v)
return topo
| 5,337,180
|
def probability_of_failure_in_any_period(p, n):
"""
Returns the probability that a failure (of probability p in one period)
happens once or more in n periods.
The probability of failure in one period is p, so the probability
of not failing is (1 - p). So the probability of not
failing over n periods is (1 - p) ** n, and the probability
of one or more failures in n periods is:
1 - (1 - p) ** n
Doing the math without losing precision is tricky.
After the binomial expansion, you get (for even n):
a = 1 - (1 - choose(n, 1) * p + choose(n, 2) p**2 - p**3 + p**4 ... + choose(n, n) p**n)
For odd n, the last term is negative.
To avoid precision loss, we don't want to to (1 - p) if p is
really tiny, so we'll cancel out the 1 and get:
you get:
a = choose(n, 1) * p - choose(n, 2) * p**2 ...
"""
if p < 0.01:
# For tiny numbers, (1 - p) can lose precision.
# First, compute the result for the integer part
n_int = int(n)
result = 0.0
sign = 1
for i in range(1, n_int + 1):
p_exp_i = p ** i
if p_exp_i != 0:
result += sign * choose(n_int, i) * (p ** i)
sign = -sign
# Adjust the result to include the fractional part
# What we want is: 1.0 - (1.0 - result) * ((1.0 - p) ** (n - n_int))
# Which gives this when refactored:
result = 1.0 - ((1.0 - p) ** (n - n_int)) + result * ((1.0 - p) ** (n - n_int))
return result
else:
# For high probabilities of loss, the powers of p don't
# get small faster than the coefficients get big, and weird
# things happen
return 1.0 - (1.0 - p) ** n
| 5,337,181
|
def stage_1(transformed_token_list):
"""Checks tokens against ngram to unigram dictionary"""
dict_data = pd.read_excel(v.stage_1_input_path, sheet_name=v.input_file_sheet_name)
selected_correct_token_data = pd.DataFrame(dict_data, columns=v.stage_1_input_file_columns)
transformed_state_1 = []
for sentence in transformed_token_list:
for row in selected_correct_token_data.itertuples():
b = list(literal_eval(row.ngram))
ngram = ''
for word in b: ngram += (' ' + word)
split_bigram = ngram.strip().split(' ')
split_sentence = sentence.strip().split(' ')
if ngram.strip() in sentence and split_bigram[0] in split_sentence and split_bigram[1] in split_sentence:
sentence = sentence.replace(ngram.strip(), row.unigram)
transformed_state_1.append(sentence)
print_to_file(v.stage_1_output_path, transformed_state_1, v.input_file_columns)
return transformed_state_1
| 5,337,182
|
def _perform_sanity_checks(config, extra_metadata):
"""
Method to perform sanity checks on current classification run.
:param config: dirbs config instance
:param extra_metadata: job extra metadata dict obj
:return: bool (true/false)
"""
curr_conditions = [c.as_dict() for c in config.conditions]
curr_operators = [op.as_dict() for op in config.region_config.operators]
curr_amnesty = config.amnesty_config.as_dict()
if curr_conditions == extra_metadata['conditions'] and \
curr_operators == extra_metadata['operators'] and \
curr_amnesty == extra_metadata['amnesty']:
return True
return False
| 5,337,183
|
def get_key_from_id(id : str) -> str:
"""
Gets the key from an id.
:param id:
:return:
"""
assert id in KEYMAP, "ID not found"
return KEYMAP[id]
| 5,337,184
|
def CreateFromDict(registration_dict):
"""Returns the content of the header file."""
template = string.Template("""\
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This file is autogenerated by
// base/android/jni_generator/jni_registration_generator.py
// Please do not change its content.
#ifndef HEADER_GUARD
#define HEADER_GUARD
#include <jni.h>
#include "base/android/jni_generator/jni_generator_helper.h"
#include "base/android/jni_int_wrapper.h"
// Step 1: Forward declaration.
${FORWARD_DECLARATIONS}
// Step 2: Main dex and non-main dex registration functions.
bool RegisterMainDexNatives(JNIEnv* env) {
${REGISTER_MAIN_DEX_NATIVES}
return true;
}
bool RegisterNonMainDexNatives(JNIEnv* env) {
${REGISTER_NON_MAIN_DEX_NATIVES}
return true;
}
#endif // HEADER_GUARD
""")
if len(registration_dict['FORWARD_DECLARATIONS']) == 0:
return ''
return jni_generator.WrapOutput(template.substitute(registration_dict))
| 5,337,185
|
def cmu_indic(corpus_dir: Pathlike, output_dir: Pathlike):
"""CMU Indic data preparation."""
prepare_cmu_indic(corpus_dir, output_dir=output_dir)
| 5,337,186
|
def controller_add_raw_commands_not_privileged():
"""
This view allows a client to send a raw command to a CISCO device in not privileged EXEC mode
:return: <dict> result of the operation. check documentation for details
"""
# TODO: convert print screens to app.logger.debug("message")
print("OUTPUT - Entering function: controller_add_raw_commands_not_privileged")
# START COUNTING TIME FOR LOGGING PURPOSES
start_time = datetime.now()
# GETTING CLIENT INFO, FOR LOGGING
client_info = get_http_request_info(request)
# OUTPUT MESSAGES IN DEBUG MODE- ( WE CAN CREATE A DEBUG MODE FOR LOGGING )
message = "OUTPUT - WEBSERVICE URI: \t'{}'".format(client_info["REQUEST_URI"])
print(message)
message = ("OUTPUT - REQUEST_INFORMATION " + str(client_info))
# ----- --- Below line is just to remember us that we could create a debug mode log with messages like these one.
# logger_engine.debug(message)
print("OUTPUT - starting time: {}".format(start_time))
print(message)
print("OUTPUT - Let´s request data from client - CHECK IF DATA IS VALID")
data = request_data(client_info)
print("OUTPUT - data: ", data)
if isinstance(data[0], dict):
if data[0]["STATUS"] == "Failure":
print("OUTPUT - WE HAVE FOUND AN ERROR......")
end_time = datetime.now()
total_time = end_time - start_time
if data[0]["ERROR"] == "1":
print("OUTPUT - ERROR 1. LETS RAISE INVALID_USAGE function amd inform client ")
print("OUTPUT - data", data)
# CREATE ERROR OBJECT
logger_obj = NetworkAutomateLogs(start_time, end_time, total_time, client_info, current_app.logger,
user_data_for_logging= None)
# CALL METHOD FOR ERROR 1 ( CHECK ERROR-CATALOG.txt for details )
logger_obj.error_1_json_data(data[0])
# CREATE A JSON LOG
logger_obj.create_json_log()
raise InvalidUsage("Bad request!", status_code=400, payload=logger_obj.error_dict)
if data[0]["ERROR"] == "2":
print("OUTPUT - ERROR 2. LETS RAISE INVALID_USAGE function amd inform client ")
print("OUTPUT - data", data)
# CREATE ERROR OBJECT
logger_obj = NetworkAutomateLogs(start_time, end_time, total_time, client_info, current_app.logger,
user_data_for_logging=data[1])
# CALL METHOD FOR ERROR 2 ( CHECK ERROR-CATALOG.txt for details
logger_obj.error_2_fundamental_data_required(data[0])
# CREATE A JSON LOG
logger_obj.create_json_log()
raise InvalidUsage("Bad request!", status_code=400, payload=logger_obj.error_dict)
if data[0]["ERROR"] == "3":
print("OUTPUT - ERROR 3. LETS RAISE INVALID_USAGE function amd inform client ")
print("OUTPUT - data", data)
# CREATE ERROR OBJECT
#EXAMPLE HOW DATA SHOULD BE :OUTPUT - \
# data ({'STATUS': 'Failure', 'ERROR': '3', 'TYPE': 'WEBSERVICE DATA FAILURE', 'MESSAGE':
# 'Please, send an ip key in your dictionary'}, {'ips': '192.168.7.1'}) ------ ------- is a tuple
logger_obj = NetworkAutomateLogs(start_time, end_time, total_time, client_info, current_app.logger,
user_data_for_logging=data[1])
# CALL METHOD FOR ERROR 3 ( CHECK ERROR-CATALOG.txt for details
logger_obj.error_webservice_data_failure(data[0])
# CREATE A JSON LOG
logger_obj.create_json_log()
raise InvalidUsage("Bad request!", status_code=400, payload=logger_obj.error_dict)
print("=" * 79)
print("OUTPUT - data is OK! ")
print("OUTPUT - data[0]", data[0])
print("OUTPUT - data[1]", data[1])
print("=" * 79)
# CHECK IF IS PORT
# MAKE A COPY OF CLIENT DATA for logging purposes
user_data_for_logging = dict((k, v) for k, v in data[2].items())
print("OUTPUT - user_data_for_logging:", user_data_for_logging)
# LETS REMOVE THE KEYS, VLAN_ID AND VLAN_NAME AND STORE THEIR VALUES ON NEW VARIABLES. Construct a dictionary with
# vlan_id an vlan_name send later. there are a case where client doesn 't send to US vlan_name and we
# have to process theses kind of behavior
command_to_send = data[1].pop("command_to_send")
print("OUTPUT - command_list_to_send: {}".format(command_to_send))
# CHECK IF CLIENT WANTS A SPECIFIC PORT FOR CONNECTION to device. SET TO NONE if not
if isinstance(data[1], dict):
if 'port' in data[1].keys():
port = data[1].pop("port")
else:
port = None
if data[0]["CONNECTIONS"] == "both":
# ------- first try a telnet connection ---------------
connection = ConnectToDevice(data[1], connection_type="TELNET", port=port)
# LETS START CONFIGURING
result = connection.configure_add_raw_commands_not_privileged(command_to_send=command_to_send)
# result = connection.get_show_run()
print("OUTPUT - result of telnet connection: ", result)
print("OUTPUT - type(result): ", type(result))
print("OUTPUT - result is a list with one dictionary unstructured data")
result_telnet = result
end_time = datetime.now()
total_time = end_time - start_time
# - ------- At these point we should check if telnet was successful ---------------------
if isinstance(result_telnet, dict):
if result_telnet["STATUS"] == "Failure":
del connection
print("OUTPUT - Perform a ssh connection because telnet failed ")
# -- ------ Perform a ssh connection because telnet failed ----------
connection_new = ConnectToDevice(data[1], connection_type="SSH", port=port)
# LETS START CONFIGURING
result = connection_new .configure_add_raw_commands_not_privileged(command_to_send=command_to_send)
print("OUTPUT - result of ssh connection: ", result)
print("OUTPUT - type(result): ", type(result))
print("OUTPUT - result is a list with one dictionary unstructured data")
result_ssh = result
if isinstance(result, dict):
# ---- Check if ssh connection was successful. if not, inform client of both fails and log
if result["STATUS"] == "Failure":
# Expecting here to appear error 4 -------- HANDLE ERROR
# first handle error 4
if result["ERROR"] == "4":
# CREATE ERROR OBJECT
logger_obj = NetworkAutomateLogs(start_time, end_time, total_time, client_info,
current_app.logger,
user_data_for_logging=user_data_for_logging)
# CALL METHOD FOR BOTH CONNECTION ERROR
logger_obj.error_both_connection_fails("Failed connection to device", result_ssh, result_telnet)
# CREATE A JSON LOG
logger_obj.create_json_log()
print("OUTPUT - result: {}".format(result))
raise InvalidUsage("Bad request!", status_code=513, payload=logger_obj.error_dict)
# Error 8 FOR NOW THESE ERROR DOESN EXIST YET - LATER WE MAY NEED IT
# if result["ERROR"] == "8":
# CREATE ERROR OBJECT
# logger_obj = NetworkAutomateLogs(start_time, end_time, total_time, client_info, logger_engine,
# user_data_for_logging=user_data_for_logging)
# CALL METHOD FOR ERROR
# logger_obj.error_operation_error(result)
# CREATE A JSON LOG
# logger_obj.create_json_log()
# raise InvalidUsage("Bad request!", status_code=513, payload=logger_obj.error_dict)
# ----- ----- connection to device Successful. ------ Build log and return info to client ----
# Connection to device:Successful. OPERATION SUCCESSFUL ------ Build log and return interface config to client ----
# --------- CREATE success LOG -------
logger_obj = NetworkAutomateLogs(start_time, end_time, total_time, client_info, current_app.logger,
user_data_for_logging=user_data_for_logging)
success_dict = {"STATUS": "Success",
"MESSAGE": "Check results. Whenever possible, we will try to output Structured and Unstructured "
"data "
"captured from de Network Device CLI. We have to be careful on what commands we are "
"trying to send, because we are not in privileged mode, and sometimes we are not "
"authorized to run them on the device and the output will be something like: e.g.( "
" ^\n% Invalid input detected at '^' marker.\n )",
"STRUCTURED_RESULT": result[0],
"UNSTRUCTURED_RESULT": result[1]
}
final_dict = {"NETWORK AUTOMATE RESPONSE": success_dict}
# CALL METHOD FOR success messages
logger_obj.sucess_add_raw_commands(success_dict)
# CREATE A JSON LOG
logger_obj.create_json_log()
# GIVE RESPONSE TO VIEW, for client
return final_dict
# What differ a ssh or telnet connection is only the device driver used by netmiko, so the first thing we should do,
# is to know which connection the client want us to perform.
# ------ we will pass these choice to the class "ConnectToDevice". ---------------
if data[0]["CONNECTIONS"] == "telnet":
connection = ConnectToDevice(data=data[1], connection_type="TELNET", port=port)
if data[0]["CONNECTIONS"] == "ssh":
connection = ConnectToDevice(data=data[1], connection_type="SSH", port=port)
# LETS START CONFIGURING
result = connection.configure_add_raw_commands_not_privileged(command_to_send=command_to_send)
print("OUTPUT - configure_add_raw_commands ended ....")
print("="*79)
print("OUTPUT - result: ", result)
print("OUTPUT - type(result): ", type(result))
# "OUTPUT - result[0] is a list with one dictionary with structured data and a dictionary with unstructured data")
# TIME FOR LOGGING PURPOSES
end_time = datetime.now()
total_time = end_time - start_time
# ---- At these point, if the connection object return an error ( like connection error or other ) we should
# report these and inform client
if isinstance(result, dict):
if result["STATUS"] == "Failure":
# if status is failure , we are EXPECTING HERE ERROR 4, 3 or 6
# first Error 4
if result["ERROR"] == "4":
# CREATE ERROR OBJECT
logger_obj = NetworkAutomateLogs(start_time, end_time, total_time, client_info, current_app.logger,
user_data_for_logging=user_data_for_logging)
# CALL METHOD FOR ERROR
logger_obj. error_netmiko(result)
# CREATE A JSON LOG
logger_obj.create_json_log()
raise InvalidUsage("Bad request!", status_code=512, payload=logger_obj.error_dict)
# Connection to device: Successful. OPERATION SUCCESSFUL ------ Build log and return info to client ----
# --------- CREATE success LOG -------
logger_obj = NetworkAutomateLogs(start_time, end_time, total_time, client_info, current_app.logger,
user_data_for_logging=user_data_for_logging)
success_dict = {"STATUS": "Success",
"MESSAGE": "Check results. Whenever possible, we will try to output Structured and Unstructured data"
" captured from de Network Device CLI. We have to be careful on what commands we are "
"trying to send, because we are not in privileged mode, and sometimes we are not "
"authorized to run them on the device and the output will be something like: e.g.( "
" ^\n% Invalid input detected at '^' marker.\n )",
"STRUCTURED_RESULT": result[0],
"UNSTRUCTURED_RESULT": result[1]
}
final_dict = {"NETWORK AUTOMATE RESPONSE": success_dict}
# CALL METHOD FOR success messages
logger_obj.sucess_add_raw_commands(success_dict)
# CREATE A JSON LOG
logger_obj.create_json_log()
# GIVE RESPONSE TO Client
return final_dict
| 5,337,187
|
def static_message_fixture(tmpdir_factory, prefix, message, suffix):
"""A fixture which provides a static message."""
filename = tmpdir_factory.mktemp('data').join('static_message.txt').strpath
file_contents = "{0}{1}{2}".format(prefix, message, suffix)
with open(filename, 'w') as f:
f.write(file_contents)
return filename
| 5,337,188
|
def entropy_image(filename,bins=30):
"""
extracts the renyi entropy of image stored under filename.
"""
img = cv2.imread(filename,0)/255.0 # gray images
p,_ = np.histogram( img, range=[0.0,1.0],bins=bins )
return -np.log(np.dot(p,p)/(np.sum(p)**2.0))
| 5,337,189
|
def satisfies_constraint(kel: dict, constraint: dict) -> bool:
"""Determine whether knowledge graph element satisfies constraint.
If the constrained attribute is missing, returns False.
"""
try:
attribute = next(
attribute
for attribute in kel.get("attributes", None) or []
if attribute["attribute_type_id"] == constraint["id"]
)
except StopIteration:
return False
return constraint.get("not", False) != operator_map[constraint["operator"]](
attribute["value"],
constraint["value"],
)
| 5,337,190
|
def static_unroll(core, input_sequence, initial_state, time_major=True):
"""Performs a static unroll of an RNN.
An *unroll* corresponds to calling the core on each element of the
input sequence in a loop, carrying the state through::
state = initial_state
for t in range(len(input_sequence)):
outputs, state = core(input_sequence[t], state)
A *static* unroll replaces a loop with its body repeated multiple
times when executed inside :func:`jax.jit`::
state = initial_state
outputs0, state = core(input_sequence[0], state)
outputs1, state = core(input_sequence[1], state)
outputs2, state = core(input_sequence[2], state)
...
See :func:`dynamic_unroll` for a loop-preserving unroll function.
Args:
core: An :class:`RNNCore` to unroll.
input_sequence: An arbitrarily nested structure of tensors of shape
``[T, ...]`` if time-major=True, or ``[B, T, ...]`` if time_major=False,
where ``T`` is the number of time steps.
initial_state: An initial state of the given core.
time_major: If True, inputs are expected time-major, otherwise they are
expected batch-major.
Returns:
A tuple with two elements:
* **output_sequence** - An arbitrarily nested structure of tensors
of shape ``[T, ...]`` if time-major, otherwise ``[B, T, ...]``.
* **final_state** - Core state at time step ``T``.
"""
output_sequence = []
time_axis = 0 if time_major else 1
num_steps = jax.tree_leaves(input_sequence)[0].shape[time_axis]
state = initial_state
for t in range(num_steps):
if time_major:
inputs = jax.tree_map(lambda x, _t=t: x[_t], input_sequence)
else:
inputs = jax.tree_map(lambda x, _t=t: x[:, _t], input_sequence)
outputs, state = core(inputs, state)
output_sequence.append(outputs)
# Stack outputs along the time axis.
output_sequence = jax.tree_multimap(
lambda *args: jnp.stack(args, axis=time_axis),
*output_sequence)
return output_sequence, state
| 5,337,191
|
def setup_argparse(parser: argparse.ArgumentParser) -> None:
"""Main entry point for subcommand."""
subparsers = parser.add_subparsers(dest="case_cmd")
setup_argparse_list(subparsers.add_parser("list", help="List cases."))
setup_argparse_list_import(
subparsers.add_parser("list-import-info", help="List case import infos.")
)
setup_argparse_create_import(
subparsers.add_parser("create-import-info", help="Create case import infos.")
)
| 5,337,192
|
def getElementByClass(className: str, fileName: str) -> List[Tuple[int, str]]:
"""Returns first matching tag from an HTML/XML document"""
nonN: List[str] = []
with open(fileName, "r+") as f:
html: List[str] = f.readlines()
for line in html:
nonN.append(line.replace("\n", ""))
pattern: str = f'class="{className}"'
patternAlt: str = f"class='{className}'"
matches: List[Tuple[int, str]] = []
for line in nonN:
if pattern in line or patternAlt in line:
lineNo = nonN.index(line) + 1
matches.append((int(lineNo), line))
break
return matches
| 5,337,193
|
def sms_outbound_gateway():
""" SMS Outbound Gateway selection for the messaging framework """
# CRUD Strings
s3.crud_strings["msg_sms_outbound_gateway"] = Storage(
label_create = T("Create SMS Outbound Gateway"),
title_display = T("SMS Outbound Gateway Details"),
title_list = T("SMS Outbound Gateways"),
title_update = T("Edit SMS Outbound Gateway"),
label_list_button = T("List SMS Outbound Gateways"),
label_delete_button = T("Delete SMS Outbound Gateway"),
msg_record_created = T("SMS Outbound Gateway added"),
msg_record_modified = T("SMS Outbound Gateway updated"),
msg_record_deleted = T("SMS Outbound Gateway deleted"),
msg_list_empty = T("No SMS Outbound Gateways currently registered"),
)
return s3_rest_controller()
| 5,337,194
|
def write_graph(g, json_file):
"""Write networkx graph to JSON.
:param g: networkx graph
:type networkx.Graph
:param json_file: path to dump JSON graph
:type: string
"""
d = nx.readwrite.json_graph.node_link_data(g)
with open(json_file, 'w') as f:
json.dump(d, f)
return
| 5,337,195
|
def _insert_volume(_migration, volume_number, volume_obj):
"""Find or create the corresponding volume, and insert the attribute."""
volumes = _migration["volumes"]
volume_obj = deepcopy(volume_obj)
volume_obj["volume"] = volume_number
volumes.append(volume_obj)
return volume_obj
| 5,337,196
|
def create_vnet(credentials, subscription_id, **kwargs):
"""
Create a Batch account
:param credentials: msrestazure.azure_active_directory.AdalAuthentication
:param subscription_id: str
:param **resource_group: str
:param **virtual_network_name: str
:param **subnet_name: str
:param **region: str
"""
network_client = NetworkManagementClient(credentials, subscription_id)
resource_group_name = kwargs.get("resource_group", DefaultSettings.resource_group)
virtual_network_name = kwargs.get("virtual_network_name", DefaultSettings.virtual_network_name)
subnet_name = kwargs.get("subnet_name", DefaultSettings.subnet_name)
# get vnet, and subnet if they exist
virtual_network = subnet = None
try:
virtual_network = network_client.virtual_networks.get(
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
)
except CloudError as e:
pass
if virtual_network:
confirmation_prompt = "A virtual network with the same name ({}) was found. \n"\
"Please note that the existing address space and subnets may be changed or destroyed. \n"\
"Do you want to use this virtual network? (y/n): ".format(virtual_network_name)
deny_error = AccountSetupError("Virtual network already exists, not recreating.")
unrecognized_input_error = AccountSetupError("Input not recognized.")
prompt_for_confirmation(confirmation_prompt, deny_error, unrecognized_input_error)
virtual_network = network_client.virtual_networks.create_or_update(
resource_group_name=resource_group_name,
virtual_network_name=kwargs.get("virtual_network_name", DefaultSettings.virtual_network_name),
parameters=VirtualNetwork(
location=kwargs.get("region", DefaultSettings.region),
address_space=AddressSpace(["10.0.0.0/24"])
)
)
virtual_network = virtual_network.result()
subnet = network_client.subnets.create_or_update(
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
subnet_name=subnet_name,
subnet_parameters=Subnet(
address_prefix='10.0.0.0/24'
)
)
return subnet.result().id
| 5,337,197
|
def to_inorder_iterative(root: dict, allow_none_value: bool = False) -> list:
"""
Convert a binary tree node to depth-first in-order list (iteratively).
"""
node = root
node_list = []
stack = []
while node or len(stack) > 0:
if node:
stack.append(node) # push a node into the stack
node = node.get('left')
else:
node = stack[-1]
del stack[-1] # pop the node from stack
node_value = node.get('value')
if node_value is not None or allow_none_value:
node_list.append(node_value)
node = node.get('right')
return node_list
| 5,337,198
|
def test_plaintext_and_anoncrypt_raises_error(alice):
"""Test specifying both plaintext and anoncrypt raises an error."""
with pytest.raises(ValueError):
alice.pack({"test": "test"}, plaintext=True, anoncrypt=True)
| 5,337,199
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.