content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def get_test_subprocess(cmd=None, **kwds):
"""Return a subprocess.Popen object to use in tests.
By default stdout and stderr are redirected to /dev/null and the
python interpreter is used as test process.
It also attemps to make sure the process is in a reasonably
initialized state.
"""
kwds.setdefault("stdin", DEVNULL)
kwds.setdefault("stdout", DEVNULL)
if cmd is None:
safe_rmpath(_TESTFN)
pyline = "from time import sleep;"
pyline += "open(r'%s', 'w').close();" % _TESTFN
pyline += "sleep(60)"
cmd = [PYTHON, "-c", pyline]
sproc = subprocess.Popen(cmd, **kwds)
wait_for_file(_TESTFN, delete_file=True, empty=True)
else:
sproc = subprocess.Popen(cmd, **kwds)
wait_for_pid(sproc.pid)
_subprocesses_started.add(sproc)
return sproc
| 5,343,400
|
def make_response(code: int, body: Union[Dict, List]) -> Dict[str, Any]:
"""Build a response.
Args:
code: HTTP response code.
body: Python dictionary or list to jsonify.
Returns:
Response object compatible with AWS Lambda Proxy Integration
"""
return {
"statusCode": code,
"headers": {
"Content-Type": "application/json",
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Credentials": "true",
},
"body": json.dumps(body, default=json_custom),
}
| 5,343,401
|
def show_release_details(rel):
"""Print some details about a release dictionary to stdout.
"""
# "artist-credit-phrase" is a flat string of the credited artists
# joined with " + " or whatever is given by the server.
# You can also work with the "artist-credit" list manually.
print("{}, by {}".format(rel['title'], rel["artist-credit-phrase"]))
if 'date' in rel:
print("Released {} ({})".format(rel['date'], rel['status']))
print("MusicBrainz ID: {}".format(rel['id']))
| 5,343,402
|
def log_matches(path_log, levels=("Error", "Warning")):
"""Log all matches
Parameters
----------
path_log : str or path-like
Path to a log file with gfortran compilation output
levels : iterable of str
Should by a subset of `{"Error", "Warning"}`
"""
text = Path(path_log).read_text()
for match in expr.finditer(text):
log_match(match, levels=["Error"])
| 5,343,403
|
def sql2dict(queryset):
"""Return a SQL alchemy style query result into a list of dicts.
Args:
queryset (object): The SQL alchemy result.
Returns:
result (list): The converted query set.
"""
if queryset is None:
return []
return [record.__dict__ for record in queryset]
| 5,343,404
|
def build_DNN(input_dim, hidden_dim, num_hidden, embedding_dim=1, vocab_size=20,output_dim=1 ,activation_func=nn.Sigmoid):
""" Function that automates the generation of a DNN by providing a template for
pytorch's nn.Sequential class
Parameters
----------
input_dim : int
Number of dimensions of input vector
hidden_dim : int
Number of dimensions for each hidden layer
num_hidden : int
Number of hidden layers to construct
output_dim : int, default=1
Number of output (label) dimensions
activation_func : nn.Function
Activation function applied to all but the penultimate layer
return nn.Module
The feedforward network as a PyTorch model
"""
embed = OrderedDict([("Embedding", nn.Embedding(vocab_size,embedding_dim))])
input = OrderedDict([("Input", nn.Linear(input_dim,hidden_dim)),("Sig1", activation_func())])
hidden_structure = [[('Hidden{}'.format(i), nn.Linear(hidden_dim,hidden_dim)),
('Sig{}'.format(i+1), nn.Sigmoid())] for i in range(1,num_hidden+1)]
hiddens = OrderedDict(list(itertools.chain.from_iterable(hidden_structure)))
output = OrderedDict([("Output", nn.Linear(hidden_dim,output_dim))])
return nn.Sequential(OrderedDict(**embed, **input, **hiddens, **output))
| 5,343,405
|
def float_to_bin(x, m_digits:int):
"""
Convert a number x in range [0,1] to a binary string truncated to length m_digits
arguments:
x: float
m_digits: integer
return:
x_bin: string
The decimal representation of digits AFTER '0.'
Ex:
Input 0.75 has binary representation 0.11
Then this function would return '11'
"""
if x < 0 or x >= 1:
raise ValueError("x must be in interval [0,1)")
x_round = round(x * 2**m_digits)
# print(x_round)
# print(2**m_digits)
if x_round == 2**m_digits:
x_round = 0
x_raw = bin(x_round)
x_bin = x_raw[2:].zfill(m_digits)
return x_bin
| 5,343,406
|
def apply(func: Callable, args: List):
"""Call `func` expanding `args`.
Example:
>>> def add(a, b):
>>> return a + b
>>> apply(add, [1, 2])
3
"""
return func(*args)
| 5,343,407
|
def random_energy_model_create(db: Session) -> EnergyModelCreate:
"""
Generate a random energy model create request.
"""
dataset = fixed_existing_dataset(db)
component_1 = fixed_existing_energy_source(db)
return EnergyModelCreate(name=f"EnergyModel-{dataset.id}-" + random_lower_string(),
ref_dataset=dataset.id,
description="EnergyModel description",
parameters=[
EnergyModelParameterCreate(component=component_1.component.name,
attribute=EnergyModelParameterAttribute.yearly_limit,
operation=EnergyModelParameterOperation.set,
value=366.6),
]
)
| 5,343,408
|
def example_one():
"""
>>>
fibonacci_one((1,), {}) -> 1
fibonacci_one((0,), {}) -> 0
fibonacci_one((1,), {}) -> 1
fibonacci_one((2,), {}) -> 1
fibonacci_one((3,), {}) -> 2
"""
fibonacci_one(3)
| 5,343,409
|
def remove(name: str):
"""
Remove a task by name.
Args:
name (str): The name of the task to remove.
"""
click.echo(f"Task removed '{name}'")
for task in repository.list():
if task.name == name:
repository.remove(task)
return
click.echo(f"Task '{name}' not found")
| 5,343,410
|
def sbatch_set_cores(params: AllocationParameters,
args: Dict[str, str]):
"""Sets the core count by setting `cpus-per-task`.
A single task is run per node, which makes this parameter in control
of the core count.
:param params: Allocation params.
:param args: Arguments to modify.
"""
args['--cpus-per-task'] = str(params.cores)
| 5,343,411
|
def _indices_3d(f, y, x, py, px, t, nt, interp=True):
"""Compute time and space indices of parametric line in ``f`` function
Parameters
----------
f : :obj:`func`
Function computing values of parametric line for stacking
y : :obj:`np.ndarray`
Slow spatial axis (must be symmetrical around 0 and with sampling 1)
x : :obj:`np.ndarray`
Fast spatial axis (must be symmetrical around 0 and with sampling 1)
py : :obj:`float`
Slowness/curvature in slow axis
px : :obj:`float`
Slowness/curvature in fast axis
t : :obj:`int`
Time sample (time axis is assumed to have sampling 1)
nt : :obj:`int`
Size scaof time axis
interp : :obj:`bool`, optional
Apply linear interpolation (``True``) or nearest interpolation
(``False``) during stacking/spreading along parametric curve
Returns
-------
sscan : :obj:`np.ndarray`
Spatial indices
tscan : :obj:`np.ndarray`
Time indices
dtscan : :obj:`np.ndarray`
Decimal time variations for interpolation
"""
tdecscan = f(y, x, t, py, px)
if not interp:
sscan = (tdecscan >= 0) & (tdecscan < nt)
else:
sscan = (tdecscan >= 0) & (tdecscan < nt - 1)
tscan = tdecscan[sscan].astype(np.int)
if interp:
dtscan = tdecscan[sscan] - tscan
else:
dtscan = None
return sscan, tscan, dtscan
| 5,343,412
|
def indicators_listing(request,option=None):
"""
Generate Indicator Listing template.
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:param option: Whether or not we should generate a CSV (yes if option is "csv")
:type option: str
:returns: :class:`django.http.HttpResponse`
"""
if option == "csv":
return generate_indicator_csv(request)
return generate_indicator_jtable(request, option)
| 5,343,413
|
def parse_sensor(csv):
"""
Ideally, the output from the sensors would be standardized and a simple
list to dict conversion would be possible. However, there are differences
between the sensors that need to be accommodated.
"""
lst = csv.split(";")
sensor = lst[SENSOR_QUANTITY]
if sensor in SENSORS:
result = SENSORS[sensor](lst)
else:
result = parse_generic_sensor(lst)
return result
| 5,343,414
|
def samiljeol(year=None):
"""
:parm year: int
:return: Independence Movement Day of Korea
"""
year = year if year else _year
return datetime.date(int(year), 3, 1)
| 5,343,415
|
def test_tester_message_output(qtbot, _send_test_msg, _main_ui):
"""Send a test message and check output log text."""
def check_status():
"""Wait for the status to register the transfer."""
assert "Hello from Test Client" in _main_ui.log_widgets.output_text
qtbot.waitUntil(check_status)
| 5,343,416
|
def get_entries(xml_file):
"""Get every entry from a given XML file: the words, their roots
and their definitions.
"""
tree = get_tree(xml_file)
# each <drv> is one entry
entries = []
for drv_node in tree.iter('drv'):
node_words = get_words_from_kap(drv_node.find('kap'))
root = get_word_root(drv_node)
try:
definitions = get_all_definitions(drv_node)
except AssertionError:
print "Error whilst processing %s: %r" % (xml_file, node_words)
raise
for word in node_words:
entries.append(Entry(word, root, definitions))
return entries
| 5,343,417
|
def delete_image(filename):
"""Delete an item image file from the filesystem.
Args:
filename (str): Name of file to be deleted.
"""
try:
os.remove(os.path.join(app.config['UPLOAD_FOLDER'], filename))
except OSError as e:
print ("Error deleting image file %s") % filename
| 5,343,418
|
def comoving_radial_distance(cosmo, a, status):
"""comoving_radial_distance(cosmology cosmo, double a, int * status) -> double"""
return _ccllib.comoving_radial_distance(cosmo, a, status)
| 5,343,419
|
def make_legacy_date(date_str):
"""
Converts a date from the UTC format (used in api v3) to the form in api v2.
:param date_str:
:return:
"""
date_obj = dateutil.parser.parse(date_str)
try:
return date_obj.strftime('%Y%m%d')
except:
return None
| 5,343,420
|
def l2_mat(b1, b2):
"""b1 has size B x M x D, b2 has size b2 B x N x D, res has size P x M x N
Args:
b1:
b2:
Returns:
"""
b1_norm = b1.pow(2).sum(dim=-1, keepdim=True)
b2_norm = b2.pow(2).sum(dim=-1, keepdim=True)
res = torch.addmm(b2_norm.transpose(-2, -1), b1, b2.transpose(-2, -1),
alpha=-2).add_(b1_norm)
# mask = 1.0 - torch.ones(res.shape[0]).diag().to(res.device)
res = res.clamp_min_(torch.finfo(torch.float32).eps).sqrt_()
# res = res * mask
return res
| 5,343,421
|
def find_files(base, pattern):
"""Return list of files matching pattern in base folder."""
return [n for n in fnmatch.filter(os.listdir(base), pattern) if os.path.isfile(os.path.join(base, n))]
| 5,343,422
|
def SyncDirectory(dir_path):
"""Flush and sync directory on file system.
Python 2.7 does not support os.sync() so this is the closest way to flush file
system meta data changes.
"""
try:
dir_fd = os.open(dir_path, os.O_DIRECTORY)
os.fsync(dir_fd)
except Exception:
logging.exception('Failed syncing in directory: %s', dir_path)
finally:
try:
os.close(dir_fd)
except Exception:
pass
| 5,343,423
|
def take_rich(frame, n, offset=0, columns=None):
"""
A take operation which also returns the schema, offset and count of the data.
Not part of the "public" API, but used by other operations like inspect
"""
if n is None:
data = frame.collect(columns)
else:
data = frame.take(n, offset, columns)
schema = frame.schema if not columns else sparktk.frame.schema.get_schema_for_columns(frame.schema, columns)
return TakeRichResult(data=data, n=n, offset=offset, schema=schema)
| 5,343,424
|
async def event_response_callback(ven_id, event_id, opt_type):
"""
Callback that receives the response from a VEN to an Event.
"""
print(f"VEN {ven_id} responded to Event {event_id} with: {opt_type}")
| 5,343,425
|
def default_name(class_or_fn):
"""Default name for a class or function.
This is the naming function by default for registries expecting classes or
functions.
Args:
class_or_fn: class or function to be named.
Returns:
Default name for registration.
"""
return camelcase_to_snakecase(class_or_fn.__name__)
| 5,343,426
|
def main(config_file: str, log_level: int) -> int:
"""Main function
Parameters
----------
TODO
"""
coloredlogs.install(
level=log_level * 10,
logger=LOG,
milliseconds=True,
)
# Parse config file
config_file = pathlib.Path(config_file).resolve()
config = parse_config(config_file)
if config is None:
LOG.error("Could not read config file")
return 1
env_size = tuple(map(int, config["env"]["env_size"].split(", ")))
# Set up image dir
image_dir = PROJ_DIR.joinpath(config["paths"]["image_dir"])
image_dir.mkdir(mode=0o775, exist_ok=True)
image_dir = image_dir.joinpath(config_file.stem)
shutil.rmtree(image_dir, ignore_errors=True)
image_dir.mkdir(mode=0o775, exist_ok=True)
# Create environment
env = Environment(
log_level,
config["general"]["name"],
image_dir,
env_size,
float(config["env"]["grid_size"]),
float(config["env"]["time_step_size"]),
int(config["env"]["epochs"]),
float(config["env"]["air_conductivity"]),
float(config["env"]["initial_temp"]),
float(config["env"]["ambient_temp"]),
(config["general"]["make_gif"] == "True"),
)
# Add agents to environment
added_penguins = 0
max_penguins = int(config["penguin"]["count"])
max_iterations = max_penguins * 10
for _ in range(max_iterations):
penguin = Penguin(
random.randrange(env_size[0]),
random.randrange(env_size[1]),
int(config["penguin"]["body_radius"]),
int(config["penguin"]["sense_radius"]),
float(config["penguin"]["body_temp"]),
float(config["penguin"]["low_death_threshold"]),
float(config["penguin"]["high_death_threshold"]),
float(config["penguin"]["low_move_threshold"]),
float(config["penguin"]["high_move_threshold"]),
float(config["penguin"]["internal_conductivity"]),
float(config["penguin"]["external_conductivity"]),
float(config["penguin"]["insulation_thickness"]),
float(config["penguin"]["density"]),
config["penguin"]["movement_policy"],
int(config["penguin"]["movement_speed"]),
float(config["penguin"]["metabolism"]),
)
if env.add_agent(penguin):
added_penguins += 1
if added_penguins >= max_penguins:
break
LOG.info(f"Added {added_penguins} agents.")
# Run the simulation
env.run()
LOG.info("Done.")
logging.shutdown()
return 0
| 5,343,427
|
def get_ip_result_by_input_method(
set_input_method,
module_input_method,
var_ip_selector,
username,
bk_biz_id,
bk_supplier_account,
filter_set,
filter_service_template,
produce_method,
var_module_name="",
):
"""
@summary 根据输入方式获取ip
@param var_module_name: 模块属性名
@param set_input_method: 集群输入方式对应tag code
@param module_input_method: 模块输入方式对应tag code
@param var_ip_selector: 表单数据
@param username: 用户名
@param bk_biz_id: 业务id
@param bk_supplier_account: 供应商账户
@param filter_set: 筛选集群
@param filter_service_template: 筛选模块
@param produce_method: 输入方式
@return: 逗号分隔ip字符串
"""
produce_method = "var_ip_{}_value".format(produce_method)
select_method = var_ip_selector[produce_method]
# 获取全部集群列表
set_list = get_set_list(username, bk_biz_id, bk_supplier_account)
# 集群全选,筛选条件不为空则调接口获取集群id列表
if ALL_SELECTED_STR not in select_method[set_input_method]:
selected_set_names = select_method[set_input_method]
# 根据选中的集群名称获取选中的集群列表
set_list = get_list_by_selected_names(selected_set_names, set_list)
# 获取全部服务模板列表
service_template_list = get_service_template_list(username, bk_biz_id, bk_supplier_account)
# 服务模板全选,则调接口获取服务模板列表
if ALL_SELECTED_STR not in select_method[module_input_method]:
selected_service_template_names = select_method[module_input_method]
# 通过选中的或输入的集群模板获取集群模板列表
service_template_list = get_service_template_list_by_names(
selected_service_template_names, service_template_list
)
# 根据输入获取空闲机module id
service_template_list.extend(
get_biz_inner_module_list(
var_ip_selector,
username,
bk_biz_id,
bk_supplier_account,
produce_method,
set_input_method=set_input_method,
module_input_method=module_input_method,
)
)
# 获取模块id列表
module_ids = get_module_id_list(
bk_biz_id, username, set_list, service_template_list, filter_set, filter_service_template, bk_supplier_account
)
if not var_module_name or var_module_name == "ip":
# 根据模块 id 列表获取 ip 并返回
data = get_ip_list_by_module_id(username, bk_biz_id, bk_supplier_account, module_ids)
else:
# 根据模块属性名获取模块信息
kwargs = {"bk_ids": module_ids, "fields": var_module_name.split(",")}
data = [module_attr[var_module_name] for module_attr in get_module_list(username, bk_biz_id, kwargs=kwargs)]
return data
| 5,343,428
|
def function_size(container: Result) -> Result:
"""
The size() function applied to a Value. Delegate to Python's :py:func:`len`.
(string) -> int string length
(bytes) -> int bytes length
(list(A)) -> int list size
(map(A, B)) -> int map size
For other types, this will raise a Python :exc:`TypeError`.
(This is captured and becomes an :exc:`CELEvalError` Result.)
.. todo:: check container type for celpy.celtypes.StringType, celpy.celtypes.BytesType,
celpy.celtypes.ListType and celpy.celtypes.MapType
"""
if container is None:
return celpy.celtypes.IntType(0)
sized_container = cast(Sized, container)
result = celpy.celtypes.IntType(len(sized_container))
logger.debug(f"function_size({container!r}) = {result!r}")
return result
| 5,343,429
|
def cluster_from_metis_config(config):
"""
Construct a Cluster from a metis-flavored object.
Args:
config (dict): Metis data.
Returns:
Cluster
"""
curie_settings = curie_server_state_pb2.CurieSettings()
cluster = curie_settings.Cluster()
cluster.cluster_name = config["cluster.name"]
log.info("Using cluster %s", cluster.cluster_name)
# Manager.
if config["manager.type"].lower() == "prism":
cluster.cluster_hypervisor_info.ahv_info.SetInParent()
prism_info = cluster.cluster_management_server_info.prism_info
prism_info.prism_host = config["prism.address"]
prism_info.prism_username = config["prism.username"]
prism_info.prism_password = config["prism.password"]
prism_info.prism_cluster_id = config["prism.cluster"]
prism_info.prism_container_id = config["prism.container"]
prism_info.prism_network_id = config["prism.network"]
elif config["manager.type"].lower() == "vcenter":
cluster.cluster_hypervisor_info.esx_info.SetInParent()
vcenter_info = cluster.cluster_management_server_info.vcenter_info
vcenter_info.vcenter_host = config["vcenter.address"]
vcenter_info.vcenter_user = config["vcenter.username"]
vcenter_info.vcenter_password = config["vcenter.password"]
vcenter_info.vcenter_datacenter_name = config["vcenter.datacenter"]
vcenter_info.vcenter_cluster_name = config["vcenter.cluster"]
vcenter_info.vcenter_datastore_name = config["vcenter.datastore"]
vcenter_info.vcenter_network_name = config["vcenter.network"]
elif config["manager.type"].lower() == "scvmm":
cluster.cluster_hypervisor_info.hyperv_info.SetInParent()
vmm_info = cluster.cluster_management_server_info.vmm_info
vmm_info.vmm_server = config["scvmm.address"]
vmm_info.vmm_user = config["scvmm.username"]
vmm_info.vmm_password = config["scvmm.password"]
vmm_info.vmm_library_server = config["scvmm.library_server_address"]
vmm_info.vmm_library_server_share_path = config["scvmm.library_server_share_path"]
vmm_info.vmm_cluster_name = config["scvmm.cluster"]
vmm_info.vmm_share_path = config["scvmm.share_path"]
vmm_info.vmm_network_name = config["scvmm.network"]
else:
raise ValueError("Unsupported manager.type '%s'" % config["manager.type"])
# OoB.
oob_management_info = curie_settings.ClusterNode.NodeOutOfBandManagementInfo
oob_interface_types = dict(oob_management_info.InterfaceType.items())
oob_vendors = dict(oob_management_info.Vendor.items())
if "k" + config["oob.type"].title() not in oob_interface_types:
raise ValueError("Unsupported oob.type '%s'" % config["oob.type"])
if "k" + config["ipmi.vendor"].title() not in oob_vendors:
raise ValueError("Unsupported ipmi.vendor '%s'" % config["ipmi.vendor"])
# Nodes.
for node_config in config["nodes"]:
cluster_node = cluster.cluster_nodes.add()
cluster_node.id = node_config["hypervisor_addr"]
try:
cluster_node.svm_addr = node_config["svm_addr"]
except KeyError as err:
log.warning("Error parsing JSON: %s", err.message)
cluster_node.node_out_of_band_management_info.SetInParent()
oob = cluster_node.node_out_of_band_management_info
oob.interface_type = oob_interface_types["k" + config["oob.type"].title()]
oob.vendor = oob_vendors["k" + config["ipmi.vendor"].title()]
oob.username = config["ipmi.username"]
oob.password = config["ipmi.password"]
oob.ip_address = node_config["ipmi_addr"]
# Cluster.
cluster_software_info = cluster.cluster_software_info
if config["cluster.type"].lower() == "nutanix":
cluster_software_info.nutanix_info.SetInParent()
nutanix_info = cluster_software_info.nutanix_info
# TODO (jklein): Remove once all of the CI configs have been updated.
if not config.get("prism.address"):
raise ValueError("Nutanix cluster is missing required virtual IP")
nutanix_info.prism_host = config["prism.address"]
nutanix_info.prism_user = config["prism.username"]
nutanix_info.prism_password = config["prism.password"]
elif config["cluster.type"].lower() == "vsan":
cluster_software_info.vsan_info.SetInParent()
else:
cluster_software_info.generic_info.SetInParent()
if cluster.cluster_hypervisor_info.HasField("esx_info"):
if cluster.cluster_software_info.HasField("nutanix_info"):
return NutanixVsphereCluster(cluster)
else:
return GenericVsphereCluster(cluster)
elif cluster.cluster_hypervisor_info.HasField("hyperv_info"):
if cluster.cluster_software_info.HasField("nutanix_info"):
return NutanixHypervCluster(cluster)
else:
return HyperVCluster(cluster)
elif cluster.cluster_hypervisor_info.HasField("ahv_info"):
return AcropolisCluster(cluster)
else:
raise ValueError("Unsupported set of hypervisor and cluster type")
| 5,343,430
|
def test_md041_bad_configuration_front_matter_title_bad():
"""
Test to verify that a configuration error is thrown when supplying the
front_matter_title value with a bad string.
"""
# Arrange
scanner = MarkdownScanner()
supplied_arguments = [
"--set",
"plugins.md041.front_matter_title=",
"--strict-config",
"scan",
"test/resources/rules/md041/good_heading_top_level_atx.md",
]
expected_return_code = 0
expected_output = ""
expected_error = ""
# Act
execute_results = scanner.invoke_main(
arguments=supplied_arguments, suppress_first_line_heading_rule=False
)
# Assert
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
| 5,343,431
|
def _single_style_loss(a, g):
""" Calculate the style loss at a certain layer
Inputs:
a is the feature representation of the real image
g is the feature representation of the generated image
Output:
the style loss at a certain layer (which is E_l in the paper)
"""
N = a.shape[3] # number of filters
M = a.shape[1] * a.shape[2] # height times width of the feature map
A = _gram_matrix(a, N, M)
G = _gram_matrix(g, N, M)
return tf.reduce_sum((G - A) ** 2 / ((2 * N * M) ** 2))
| 5,343,432
|
def hyp_dist_o(x):
"""
Computes hyperbolic distance between x and the origin.
"""
x_norm = x.norm(dim=-1, p=2, keepdim=True)
return 2 * arctanh(x_norm)
| 5,343,433
|
def is_alive(img, dt):
"""
This method checks if that server is alive and updates image shown on given widget accordingly.
:param img: It is image widget updated according to status of server communication.
:param dt: It is for handling callback input.
:return:
"""
try:
if database_api.testConnection():
img.source = "data/img/ico_connection_success.png"
else:
img.source = "data/img/ico_connection_fail.png"
except:
img.source = "data/img/ico_connection_wait.png"
finally:
img.reload()
| 5,343,434
|
def check_image(url):
"""A little wrapper for the :func:`get_image_info` function.
If the image doesn't match the ``flaskbb_config`` settings it will
return a tuple with a the first value is the custom error message and
the second value ``False`` for not passing the check.
If the check is successful, it will return ``None`` for the error message
and ``True`` for the passed check.
:param url: The image url to be checked.
"""
img_info = get_image_info(url)
error = None
if not img_info["content-type"] in flaskbb_config["AVATAR_TYPES"]:
error = "Image type is not allowed. Allowed types are: {}".format(
", ".join(flaskbb_config["AVATAR_TYPES"])
)
return error, False
if img_info["width"] > flaskbb_config["AVATAR_WIDTH"]:
error = "Image is too wide! {}px width is allowed.".format(
flaskbb_config["AVATAR_WIDTH"]
)
return error, False
if img_info["height"] > flaskbb_config["AVATAR_HEIGHT"]:
error = "Image is too high! {}px height is allowed.".format(
flaskbb_config["AVATAR_HEIGHT"]
)
return error, False
if img_info["size"] > flaskbb_config["AVATAR_SIZE"]:
error = "Image is too big! {}kb are allowed.".format(
flaskbb_config["AVATAR_SIZE"]
)
return error, False
return error, True
| 5,343,435
|
def run(x_train, y_train, x_test, y_test, clf):
"""Train and test"""
s = time.time()
clf.fit(x_train, y_train)
tallies = tally_predictions(clf, x_test, y_test)
bmetrics = basic_metrics(tallies)
ametrics = advanced_metrics(tallies, bmetrics)
pp(tallies, bmetrics, ametrics)
| 5,343,436
|
def preprocess(dataset_file_path, len_bound, num_examples = None, reverse = False):
"""
It reads the required files, creates input output pairs.
"""
min_sentence_length = len_bound[0]
max_sentence_length = len_bound[1]
lines = open(str(dataset_file_path), encoding='utf-8', errors = 'ignore').read().strip().split('\n')
if num_examples is not None:
lines = lines[:num_examples] # This takes only some lines
input_lang = []
output_lang = []
seen = set()
for line in lines:
_line = line.split('\t') # seperate the input line and output line
if (len(_line[0].split(" "))>min_sentence_length and len(_line[0].split(" "))<max_sentence_length
and len(_line[1].split(" "))>min_sentence_length and len(_line[1].split(" "))<max_sentence_length):
inp = clean_text(_line[0])
if inp in seen:
continue
seen.add(inp)
input_lang.append(inp)
output_lang.append(clean_text(_line[1]))
assert len(input_lang) == len(output_lang) # make both equal
print("Read %s sentence pairs" % len(input_lang))
if reverse:
return (input_lang, output_lang)
else:
return (output_lang, input_lang)
| 5,343,437
|
def validate_password_changed(editable):
"""Get the new password value and validate it"""
stager.pages.username_input.PASSWORD = editable.get_text()
if (
stager.pages.username_input.PASSWORD is None
or stager.pages.username_input.PASSWORD == ""
):
stager.utils.BUILDER.get_object(PASSWORD_ENTRY_ICON_ID).set_from_icon_name(
BLANK_ICON, INPUT_VALIDATOR_ICON_SIZE
)
stager.utils.BUILDER.get_object(PASSWORD_ENTRY_CONFIRM_ICON_ID).set_from_icon_name(
BLANK_ICON, INPUT_VALIDATOR_ICON_SIZE
)
return
stager.utils.validate.password()
if stager.pages.username_input.PASSWORD_VALID:
stager.pages.username_input.PASSWORD_VALID = True
stager.utils.BUILDER.get_object(PASSWORD_ENTRY_ICON_ID).set_from_icon_name(
stager.utils.CONFIG.input_valid, INPUT_VALIDATOR_ICON_SIZE
)
logger.info("Password meets requirements")
else:
stager.pages.username_input.PASSWORD_VALID = False
stager.utils.BUILDER.get_object(PASSWORD_ENTRY_ICON_ID).set_from_icon_name(
stager.utils.CONFIG.input_invalid, INPUT_VALIDATOR_ICON_SIZE
)
# Hide match validation
stager.utils.BUILDER.get_object(
PASSWORD_ENTRY_CONFIRM_ICON_ID
).set_from_icon_name(BLANK_ICON, INPUT_VALIDATOR_ICON_SIZE)
logger.debug("Password does not meet requirements")
| 5,343,438
|
def read_transcriptome(transcriptome):
"""
Parse transcriptome as a dictionary.
"""
result_dict = {}
for sequence in SeqIO.parse(transcriptome, 'fasta'):
result_dict[sequence.name] = sequence.seq
return result_dict
| 5,343,439
|
async def test_validate_tags(hass, mock_nextbus, mock_nextbus_lists):
"""Test that additional validation against the API is successful."""
# with self.subTest('Valid everything'):
assert nextbus.validate_tags(mock_nextbus(), VALID_AGENCY, VALID_ROUTE, VALID_STOP)
# with self.subTest('Invalid agency'):
assert not nextbus.validate_tags(
mock_nextbus(), "not-valid", VALID_ROUTE, VALID_STOP
)
# with self.subTest('Invalid route'):
assert not nextbus.validate_tags(mock_nextbus(), VALID_AGENCY, "0", VALID_STOP)
# with self.subTest('Invalid stop'):
assert not nextbus.validate_tags(mock_nextbus(), VALID_AGENCY, VALID_ROUTE, 0)
| 5,343,440
|
def test_zoom2():
"""Test the zoom function with invalid values. Zoom should generate an exception"""
# create a plane
tp = jp.JuliaPlane( 100, 200, -100, 0 )
try:
tp.zoom( "one", 100, -1, 3)
message = 'Test Failed, zoom did not catch use of an invalid parameter'
success = False
except TypeError:
"""Test succeeds, exception generated"""
success = True
assert success, message
| 5,343,441
|
def magnus(w, n):
"""
The 'Magnus' map
"""
expr = w.subs(x,1+eps*X).subs(y,1+eps*Y) - 1
return limit(expr / eps**n, eps, 0)
| 5,343,442
|
def initCmdLineParser():
"""
Initiate the optparse object, add all the groups and general command line flags
and returns the optparse object
"""
# Init parser and all general flags
logging.debug("initiating command line option parser")
usage = "usage: %prog [options]"
parser = OptionParser(usage)
parser.add_option("--gen-answer-file", help="Generate a template of an answer file, using this option excludes all other option")
parser.add_option("--answer-file", help="Runs the configuration in none-interactive mode, extracting all information from the \
configuration file. using this option excludes all other option")
# For each group, create a group option
for group in controller.getAllGroups():
groupParser = OptionGroup(parser, group.getKey("DESCRIPTION"))
for param in group.getAllParams():
cmdOption = param.getKey("CMD_OPTION")
paramUsage = param.getKey("USAGE")
optionsList = param.getKey("OPTION_LIST")
useDefault = param.getKey("USE_DEFAULT")
if not useDefault:
if optionsList:
groupParser.add_option("--%s" % cmdOption, metavar=optionsList, help=paramUsage, choices=optionsList)
else:
groupParser.add_option("--%s" % cmdOption, help=paramUsage)
# Add group parser to main parser
parser.add_option_group(groupParser)
return parser
| 5,343,443
|
def check_regions_and_camera_pairs(regions_dict, camera_pairs_dict):
"""
Check the regions and camera pairs dictionaries.
"""
if not regions_dict.keys() == camera_pairs_dict.keys():
raise ValueError, 'regions and camera_pairs dictionaries must have the save region names'
for region_cameras in regions_dict.values():
check_region(region_cameras)
for region_name in regions_dict:
check_camera_pairs(regions_dict[region_name], camera_pairs_dict[region_name])
| 5,343,444
|
def translate_node_coordinates(wn, offset_x, offset_y):
"""
Translate node coordinates
Parameters
-----------
wn: wntr WaterNetworkModel
A WaterNetworkModel object
offset_x: tuple
Translation in the x direction, in meters
offset_y: float
Translation in the y direction, in meters
Returns
--------
A WaterNetworkModel object with updated node coordinates
"""
wn2 = _deepcopy_wn(wn)
for name, node in wn2.nodes():
pos = node.coordinates
node.coordinates = (pos[0]+offset_x, pos[1]+offset_y)
return wn2
| 5,343,445
|
def get_demo_board():
"""Get a demo board"""
demo_board_id = 1
query = Board.query.filter(Board.id == demo_board_id)
query = query.options(joinedload(Board.tasks)).options(raiseload('*'))
board = query.one()
return BoardDetailsSchema().dump(board).data
| 5,343,446
|
def cool_KI(n, T):
"""
Returns Koyama & Inutsuka (2002) cooling function
"""
return 2e-19*n*n*(np.exp(-1.184e5/(T + 1e3)) +
1.4e-9*T**0.5*np.exp(-92.0/T))
| 5,343,447
|
def savefig_check(name, path='None'):
"""
Save a matplotlib figure as sanity check
:param name:name of figure
"""
if not path:
path = os.path.join('Figure', 'checks')
if not os.path.exists(path):
os.makedirs(path)
plt.savefig(os.path.join(path, "%s.png" % name))
| 5,343,448
|
async def message_deleted(event: Message, app: SirBot):
"""
Logs all message deletions not made by a bot.
"""
if not_bot_delete(event):
try:
logger.info(
f'CHANGE_LOGGING: deleted: {event["ts"]} for user: {event["previous_message"]["user"]}\n{event}')
except Exception as E:
logger.exception(E)
logger.debug(event)
| 5,343,449
|
def main(**args):
"""The main routine."""
if args is None:
args = sys.argv[1:]
| 5,343,450
|
def main(args):
"""
main entry point for the manifest CLI
"""
if len(args) < 2:
return usage("Command expected")
command = args[1]
rest = args[2:]
if "create".startswith(command):
return cli_create(rest)
elif "query".startswith(command):
return cli_query(rest)
elif "verify".startswith(command):
return cli_verify(rest)
else:
return usage("Unknown command: %s" % command)
| 5,343,451
|
def compute_correlations(states):
"""compute_correlations.
Calculate the average correlation of spin 0 and every other spin.
Parameters
----------
states : list of states.
``len(states)`` must be >= 1!
Returns
-------
correlations : list of floats.
"""
return [
sum(s[0] * s[i] for s in states) / len(states)
for i in range(len(states[0]))
]
| 5,343,452
|
def parse_sample_str(elems: Sequence[Any]) -> AOList[str]:
""" Choose n floats from a distribution.
Examples:
>>> c = parse_sample_str([4, ["choose", ["one", "two"]]])
>>> c
Sample(4, ChooseS([StrConst('one'), StrConst('two')]))
"""
str_func = "sample"
check_n_params(["n", "dist"], elems, str_func)
n = check_true_int_param(0, elems, "n", str_func)
this_dist = check_str_param(1, elems, "dist", str_func)
return dist.Sample(n, this_dist)
| 5,343,453
|
def test_load_records():
"""Test if loaded records match."""
report = darshan.DarshanReport("tests/input/sample.darshan")
report.mod_read_all_records("POSIX")
assert 1 == len(report.data['records']['POSIX'])
| 5,343,454
|
def htlc(TMPL_RCV,
TMPL_OWN,
TMPL_FEE,
TMPL_HASHIMG,
TMPL_HASHFN,
TMPL_TIMEOUT):
"""This contract implements a "hash time lock".
The contract will approve transactions spending algos from itself under two circumstances:
- If an argument arg_0 is passed to the script such that TMPL_HASHFN(arg_0) is equal to TMPL_HASHIMG,
then funds may be closed out to TMPL_RCV.
- If txn.FirstValid is greater than TMPL_TIMEOUT, then funds may be closed out to TMPL_OWN.
The idea is that by knowing the preimage to TMPL_HASHIMG, funds may be released to
TMPL_RCV (Scenario 1). Alternatively, after some timeout round TMPL_TIMEOUT,
funds may be closed back to their original owner, TMPL_OWN (Scenario 2).
Note that Scenario 1 may occur up until Scenario 2 occurs, even if TMPL_TIMEOUT has already passed.
Parameters:
TMPL_RCV: the address to send funds to when the preimage is supplied
TMPL_HASHFN: the specific hash function (sha256 or keccak256) to use (sha256 in this example)
TMPL_HASHIMG: the image of the hash function for which knowing the preimage under TMPL_HASHFN will release funds
TMPL_TIMEOUT: the round after which funds may be closed out to TMPL_OWN
TMPL_OWN: the address to refund funds to on timeout
TMPL_FEE: maximum fee of any transactions approved by this contract """
# First, check that the fee of this transaction is less than or equal to TMPL_FEE
fee_check = Txn.fee() < Int(TMPL_FEE)
# Next, check that this is a payment transaction.
pay_check = Txn.type_enum() == TxnType.Payment
# Next, check that the Receiver field for this transaction is empty
# Because this contract can approve transactions that close out its entire balance,
# it should never have a receiver.
rec_field_check = Txn.receiver() == Global.zero_address()
# Next, check that the Amount of algos transferred is 0. This is for the same reason as
# above: we only allow transactions that close out this account completely, which
# having a non-zero-address CloseRemainderTo will handle for us.
amount_check = Txn.amount() == Int(0)
# Always verify that the RekeyTo property of any transaction is set to the ZeroAddress
# unless the contract is specifically involved ina rekeying operation.
rekey_check = Txn.rekey_to() == Global.zero_address()
# fold all the above checks into a single boolean.
common_checks = And(
fee_check,
pay_check,
rec_field_check,
amount_check,
rekey_check
)
# Payout scenarios : At this point in the execution, there is one boolean variable on the
# stack that must be true in order for the transaction to be valid. The checks we have done
# above apply to any transaction that may be approved by this script.We will now check if we
# are in one of the two payment scenarios described in the functionality section."""
# Scenario 1: Hash preimage has been revealed
# First, check that the CloseRemainderTo field is set to be the TMPL_RCV address.
recv_field_check = Txn.close_remainder_to() == TMPL_RCV
# Next, we will check that arg_0 is the correct preimage for TMPL_HASHIMG under TMPL_HASHFN.
preimage_check = TMPL_HASHFN(Arg(0)) == Bytes("base64", TMPL_HASHIMG)
#Fold the "Scenario 1" checks into a single boolean.
scenario_1 = And(recv_field_check, preimage_check)
# Scenario 2: Contract has timed out
# First, check that the CloseRemainderTo field is set to be the TMPL_OWN address
# (presumably initialized to be the original owner of the funds).
owner_field_check = Txn.close_remainder_to() == TMPL_OWN
# Next, check that this transaction has only occurred after the TMPL_TIMEOUT round.
timeout_check = Txn.first_valid() > Int(TMPL_TIMEOUT)
#Fold the "Scenario 2" checks into a single boolean.
scenario_2 = And(owner_field_check, timeout_check)
# At this point in the program's execution, the stack has three values. At the base of the
# stack is a boolean holding the results of the initial transaction validity checks.
# This is followed by two booleans indicating the results of the scenario 1 and 2 checks.
# We want to approve this transaction if we are in scenario 1 or 2.
# So we logically OR the results of those checks together.
# Finally, we logically AND the scenario checks with the initial checks.
# At this point, the stack contains just one value: a boolean indicating
# whether or not it has been approved by this contract.
return And(Or(scenario_1, scenario_2), common_checks)
| 5,343,455
|
def parse(*args, is_flag=False, **kwargs):
"""alias of parser.parse"""
return _parser.parse(*args, is_flag=is_flag, **kwargs)
| 5,343,456
|
def check_attrs(res_cls, dset):
"""
Check dataset attributes extraction
"""
truth = res_cls.get_attrs(dset=dset)
test = res_cls.attrs[dset]
msg = "{} attributes do not match!".format(dset)
assert truth == test, msg
truth = res_cls.get_scale_factor(dset)
test = res_cls.scale_factors[dset]
msg = "{} scale factors do not match!".format(dset)
assert truth == test, msg
truth = res_cls.get_units(dset)
test = res_cls.units[dset]
msg = "{} units do not match!".format(dset)
assert truth == test, msg
| 5,343,457
|
def oauth_callback():
"""
return: str
"""
auth = tweepy.OAuthHandler(env.TWITTER_API_KEY, env.TWITTER_API_SECRET)
try:
auth.request_token = session['REQUEST_TOKEN']
verifier = request.args.get('oauth_verifier')
auth.get_access_token(verifier)
session['AUTH_TOKEN'],session['AUTH_TOKEN_SECRET'] = auth.access_token, auth.access_token_secret
redirect_url = '/share'
except Exception:
redirect_url = '/'
return redirect_url
| 5,343,458
|
def generate_random_instance(n_instants: int, cost_dim: int, items_per_instant: int = 1) -> \
Tuple[List[List[float]], List[List[List[float]]], float, float]:
"""Generates random values, costs and capacity for a Packing Problem instance.
Instances generated here may not respect guarantees constraints.
Parameters
----------
n_instants : int
Number of instants to be generated.
cost_dim : int
Dimension of the cost vectors to be generated.
items_per_instant : int
Number of items that should be available in each instant.
Returns
-------
values : list of list of float
A list containing, for each instant, a list with that instant item's values.
costs : list of list of list of float
A list containing, for each instant, a list with that instant item's cost vectors.
cap : float
A random problem capacity.
e : float
The best theorical epsilon for the generated problem.
"""
assert items_per_instant > 0
assert cost_dim > 0
values: List[List[float]] = _get_random_values(n_instants, items_per_instant)
costs: List[List[List[float]]] = _get_random_costs(n_instants, items_per_instant, cost_dim)
cap = random.random() * n_instants/2
e = sqrt(log(cost_dim, 2)/cap)
return values.copy(), deepcopy(costs), cap, e
| 5,343,459
|
def recommend_tags_questions(professional_id, threshold=0.01, top=5):
""" Recommends tags for an professional depending on answered questions.
:param professional_id: ID of the professional
:param threshold: Minimum percentage of questions with the tags.
:param top: Top N recommended tags (default: 5)
:return top_tags: DataFrame with the top tags and how many answered questions have these.
"""
professional_tags = get_user_tags(professional_id)
professional = professionals[professionals['professionals_id'] == professional_id]
professional_questions = answers[answers['answers_author_id'] == professional_id]['answers_question_id']
top_tags = tag_questions[tag_questions['tag_questions_question_id'].isin(professional_questions)]
top_tags = pd.merge(top_tags, tags, left_on='tag_questions_tag_id', right_on='tags_tag_id')
top_tags = top_tags[~top_tags['tags_tag_name'].isin(professional_tags)]
top_tags = top_tags.groupby('tags_tag_name').size()/len(professional_questions.index)
top_tags = top_tags[top_tags > threshold]
top_tags = top_tags.sort_values(ascending=False).head(top)
return top_tags
| 5,343,460
|
def simulationtable(request):
"""
called when the simulation page starts to get used
"""
from .tools import make_simulationtable
from .model import reservoirs
from .app import Embalses as App
# convert to the right name syntax so you can get the COM ids from the database
selected_reservoir = request.body.decode("utf-8")
reservoirs = reservoirs()
for reservoir in reservoirs:
if reservoirs[reservoir] == selected_reservoir:
selected_reservoir = reservoir
break
return JsonResponse(make_simulationtable(selected_reservoir))
| 5,343,461
|
def get_rate_limits(response):
"""Returns a list of rate limit information from a given response's headers."""
periods = response.headers['X-RateLimit-Period']
if not periods:
return []
rate_limits = []
periods = periods.split(',')
limits = response.headers['X-RateLimit-Limit'].split(',')
remaining = response.headers['X-RateLimit-Remaining'].split(',')
reset = response.headers['X-RateLimit-Reset'].split(',')
for idx, period in enumerate(periods):
rate_limit = {}
limit_period = get_readable_time_string(period)
rate_limit["period"] = limit_period
rate_limit["period_seconds"] = period
rate_limit["request_limit"] = limits[idx]
rate_limit["requests_remaining"] = remaining[idx]
reset_datetime = get_datetime_from_timestamp(reset[idx])
rate_limit["reset"] = reset_datetime
right_now = datetime.now()
if (reset_datetime is not None) and (right_now < reset_datetime):
# add 1 second because of rounding
seconds_remaining = (reset_datetime - right_now).seconds + 1
else:
seconds_remaining = 0
rate_limit["reset_in_seconds"] = seconds_remaining
rate_limit["time_to_reset"] = get_readable_time_string(seconds_remaining)
rate_limits.append(rate_limit)
return rate_limits
| 5,343,462
|
def plot_breakdown_percents(runs, event_labels=[],
title=None, colors=None):
"""
Plots a bar chart with the percent of the total wall-time of all events for
multiple runs.
Parameters
----------
runs: Run object or list of Run objects
The list of runs to display on the figure.
event_labels: string or list of strings, optional
Names of the events to display on the figure;
default: [].
title: string, optional
Title of the figure;
default: None.
colors: iterator, optional
Colors to use;
default: None.
Returns
-------
fig: Matplotlib Figure object
The figure.
ax: Matplotlib Axes object
Single or array of axes.
"""
if not isinstance(runs, (list, tuple)):
runs = [runs]
if not isinstance(event_labels, (list, tuple)):
event_labels = [event_labels]
fig, ax = pyplot.subplots(figsize=(8.0, 6.0))
ax.yaxis.grid(zorder=0)
ax.set_ylabel('% of wall-time', fontsize=16)
indices = numpy.arange(len(runs))
bar_width = 0.5
bar_offsets = numpy.zeros(len(runs))
for label in event_labels:
if colors:
color = next(colors)
else:
color = next(ax._get_lines.prop_cycler)['color']
percents = []
for run in runs:
if label in run.events.keys():
percents.append(run.events[label]['percent'])
else:
percents.append(0.0)
ax.bar(indices, percents, bar_width,
label=label,
bottom=bar_offsets,
color=color,
linewidth=0,
zorder=0)
bar_offsets += percents
ax.legend(bbox_to_anchor=(1.0, 1.0), frameon=False)
ax.set_xticks(indices + 0.25 * bar_width)
ax.set_xticklabels([run.label for run in runs], rotation=0, fontsize=16)
ax.set_yticks([0.0, 25.0, 50.0, 75.0, 100.0],
('0', '25', '50', '75', '100'))
ax.set_xlim(indices[0] - 0.5, indices[-1] + 1.0)
ax.set_ylim(0.0, 100.0)
if title:
fig.set_title(title)
return fig, ax
| 5,343,463
|
def test_geometry_get_edges(mk_creoson_post_dict, mk_getactivefile):
"""Test get_edges."""
c = creopyson.Client()
result = c.geometry_get_edges(["12", "34"], file_="file")
assert isinstance(result, (list))
result = c.geometry_get_edges(["12", "34"])
assert isinstance(result, (list))
| 5,343,464
|
def get_zarr_objs(path: Path):
"""Find subdir which are zarr obj roots."""
zio = ZarrIO()
for r in path.iterdir():
if next(r.glob(".zgroup"), None) is not None:
root_group = zarr.group(zio.get_root(r.as_uri()))
for g in get_groups_with_arrays(root_group):
yield r, g if g != "/" else None
| 5,343,465
|
def watcher(event_source, wiki_filter, namespaces_filter, callback):
"""Watcher captures and filters evens from mediawiki.
Args:
event_source: an interable source of streaming sse events.
wiki_filter: string for filtering 'wiki' class.
namespaces_filter: a set() of namespaces to keep.
callback: A method to invoke with the JSON params for each filterd event.
"""
for event in event_source:
if event.event == 'message' and event.data:
try:
change = json.loads(event.data)
except json.decoder.JSONDecodeError as err:
print('Error:', err)
pprint.pprint(event.data)
continue
if change['bot']:
continue
if change['wiki'] != wiki_filter:
continue
if change['namespace'] not in namespaces_filter:
continue
if 'revision' not in change:
continue
if 'old' not in change['revision']:
continue
callback(change)
| 5,343,466
|
def main():
"""Main method
Args:
none
Returns:
void
"""
htk = Gui.get_instance()
htk.mainloop()
| 5,343,467
|
def gen3_file(mock_gen3_auth):
"""
Mock Gen3File with auth
"""
return Gen3File(endpoint=mock_gen3_auth.endpoint, auth_provider=mock_gen3_auth)
| 5,343,468
|
def rhs_of_rule(rule):
""" This function takes a grammatical rule, and returns its RHS """
return rule[0]
| 5,343,469
|
def smiles_to_antechamber(smiles_string, gaff_mol2_filename, frcmod_filename, residue_name="MOL", strictStereo=False, protonation=False):
"""Build a molecule from a smiles string and run antechamber,
generating GAFF mol2 and frcmod files from a smiles string. Charges
will be generated using the OpenEye QuacPac AM1-BCC implementation.
Parameters
----------
smiles_string : str
Smiles string of molecule to construct and charge
gaff_mol2_filename : str
Filename of mol2 file output of antechamber, with charges
created from openeye
frcmod_filename : str
Filename of frcmod file output of antechamber. Most likely
this file will be almost empty, at least for typical molecules.
residue_name : str, optional, default="MOL"
OpenEye writes mol2 files with <0> as the residue / ligand name.
This chokes many mol2 parsers, so we replace it with a string of
your choosing. This might be useful for downstream applications
if the residue names are required to be unique.
strictStereo : bool, optional, default=False
If False, permits smiles strings with unspecified stereochemistry.
See https://docs.eyesopen.com/omega/usage.html
protonation : bool, optional, default=False
If True, uses OESetNeutralpHModel to set a pH model for the molecule
to attempt to obtain protonation states appropriate for neutral pH.
Depending on the application this may or may not be what you want, e.g. for
hydration free energy calculations you may want the typical depicted (neutral) form.
"""
oechem = import_("openeye.oechem")
if not oechem.OEChemIsLicensed(): raise(ImportError("Need License for oechem!"))
oequacpac = import_("openeye.oequacpac")
# Get the absolute path so we can find these filenames from inside a temporary directory.
gaff_mol2_filename = os.path.abspath(gaff_mol2_filename)
frcmod_filename = os.path.abspath(frcmod_filename)
m = smiles_to_oemol(smiles_string)
if protonation:
oequacpac.OESetNeutralpHModel(m)
m = get_charges(m, strictStereo=strictStereo, keep_confs=1)
with enter_temp_directory(): # Avoid dumping 50 antechamber files in local directory.
_unused = molecule_to_mol2(m, "./tmp.mol2", residue_name=residue_name)
net_charge = oechem.OENetCharge(m)
tmp_gaff_mol2_filename, tmp_frcmod_filename = run_antechamber("tmp", "./tmp.mol2", charge_method=None, net_charge=net_charge) # USE OE AM1BCC charges!
shutil.copy(tmp_gaff_mol2_filename, gaff_mol2_filename)
shutil.copy(tmp_frcmod_filename, frcmod_filename)
| 5,343,470
|
def main():
"""Unsubscribe from unwanted newsletters as someone else."""
options = parse_arguments()
message = create_message(options)
if not options.dry_run:
send_mail(message)
| 5,343,471
|
def flow_duration_curve(
x: Union[np.ndarray, pd.Series],
log: bool = True,
plot: bool = True,
non_exceeding:bool = True,
ax: Optional[Union[SubplotBase, Any]] = None,
**kwargs
) -> Union[np.ndarray, Figure]:
"""Calculate a flow duration curve
Calculate flow duration curve from the discharge measurements. The
function can either return a ``matplotlib`` plot or return the ordered (
non)-exceeding probabilities of the observations. These values can then
be used in any external plotting environment.
In case x.ndim > 1, the function will be called iteratively along axis 0.
Parameters
----------
x : numpy.ndarray, pandas.Series
Series of prefereably discharge measurements
log : bool, default=True
if `True` plot on loglog axis, ignored when plot is `False`
plot : bool, default=True
if `False` plotting will be suppressed and the resulting array will
be returned
non_exceeding : bool, default=True
if `True` use non-exceeding probabilities
ax : matplotlib.AxesSubplot | bokeh.Figure , default=None
if not None, will plot into that AxesSubplot or Figure instance.
.. note::
If you pass an object, be sure to set the correct plotting
backend first.
kwargs : kwargs,
will be passed to the ``matplotlib.pyplot.plot`` function
Returns
-------
matplotlib.Figure :
if `plot` was `True`
numpy.ndarray :
if `plot was `False`
Notes
-----
The probabilities are calculated using the Weibull empirical probability.
Following [1]_, this probability can be calculated as:
.. math:: p =m/(n + 1)
where `m` is the rank of an observation in the ordered time series and
`n` are the total observations. The increasion by one will prevent 0%
and 100% probabilities.
References
----------
.. [1] Sloto, R. a., & Crouse, M. Y. (1996). Hysep: a computer program
for streamflow hydrograph separation and analysis. U.S. Geological
Survey Water-Resources Investigations Report, 96(4040), 54.
"""
# omit the Series index
if isinstance(x, pd.Series):
x = x.values
# if x has more than one dimension call this func recursive along axis=0
if x.ndim > 1:
# check if plot was None, then iterate along axis=0
if not plot:
return np.apply_along_axis(flow_duration_curve, 0, x, non_exceeding=non_exceeding, plot=False)
else:
# plot, if ax is None, create
if ax is None:
fig, ax = plt.subplots(1,1)
last_ax = list(map(lambda x: flow_duration_curve(x, log=log, non_exceeding=non_exceeding, ax=ax), x.T))[-1]
return last_ax
# calculate the ranks
ranks = rankdata(x, method='average')
# calculate weibull pdf
N = x.size
# calculate probabilities
p = np.fromiter(map(lambda r: r / (N + 1), ranks), dtype=np.float)
# create sorting index
if non_exceeding:
index = np.argsort(p)
else:
index = np.argsort(p)[::-1]
if not plot:
return p[index]
else:
pfunc = plot_function_loader('flow_duration_curve')
fig = pfunc(func_args=dict(
x=x[index],
y=p[index],
non_exceeding=non_exceeding,
log=log,
figure=ax),
plot_args=kwargs
)
return fig
| 5,343,472
|
def filterEndRender():
"""Perform actions just after the image has been rendered."""
_logger.debug("filterEndRender")
_PYFILTER_MANAGER.run_operations_for_stage("filter_end_render")
| 5,343,473
|
def test_ap_hs20_release_number_1(dev, apdev):
"""Hotspot 2.0 with AP claiming support for Release 1"""
run_ap_hs20_release_number(dev, apdev, 1)
| 5,343,474
|
def get_fields(filters):
"""
Return sql fields ready to be used on query
"""
fields = (
("(SELECT p.posting_date FROM `tabPurchase Invoice` p Join `tabPurchase Invoice Item` i On p.name = i.parent WHERE i.item_code = `tabItem`.item_code And p.docstatus = 1 limit 1) as pinv_date"),
("CONCAT(`tabItem`._default_supplier, ' - ', `tabAddress`.city, ', ', `tabAddress`.state) as location"),
("Item", "vim_number"),
("Item", "make"),
("Item", "model"),
("Item", "bl"),
("Item", "item_type"),
("Item", "booking_no"),
("Item", "container_no"),
("Item", "part_type"),
("Item", "year"),
("Item", "exterior_color"),
("Item", "status"),
("Delivery Checklist", "status", "vehicle_release"),
("Sales Invoice Item", "item_code"),
("Sales Invoice Item", "vim_number", "cont_vim"),
("Sales Invoice Item", "item_name"),
# ("Sales Invoice", "due_date", "due_date"),
("""(SELECT SUM(b.grand_total) FROM `tabSales Invoice` as b WHERE b.is_return = 1 and b.docstatus = 1 and b.return_against = `tabSales Invoice`.name ) as credit_note"""),
("""0 as gst_total"""),
("""0 as pst_total"""),
("""0 as g_gst_total"""),
# ("""SUM( IF(`tabSales Taxes and Charges`.tax_type = 'GST', `tabSales Taxes and Charges`.tax_amount, 0) ) as gst_total"""),
# ("""SUM( IF(`tabSales Taxes and Charges`.tax_type = 'PST', `tabSales Taxes and Charges`.tax_amount, 0 ) ) as pst_total"""),
# ("""SUM( IF(`tabSales Taxes and Charges`.tax_type = 'GST', `tabSales Taxes and Charges`.g_tax, 0 ) ) as g_gst_total"""),
("Sales Invoice", "company"),
("Sales Invoice", "is_return"),
("Sales Invoice", "posting_date", "sinv_date"),
("Sales Invoice", "customer"),
("Sales Invoice", "invoice_type"),
("Sales Invoice", "net_total"),
("Sales Invoice", "currency"),
("Sales Invoice", "base_grand_total"),
("Sales Invoice", "grand_total"),
("Sales Invoice", "name", "sinv_name"),
("Sales Invoice", "outstanding_amount"),
("Sales Invoice", "total_g", "gprice"),
("Payment Entry", "posting_date", "p_posting_date"),
("Payment Entry", "mode_of_payment"),
("Payment Entry Reference", "parent", "payment_entry"),
("Payment Entry Reference", "allocated_amount", "breakdown"),
("`viewPayment and Refunds`.paid_amount"),
("`viewPayment and Refunds`.refund_amount"),
("(SELECT `view_vehicle_g_cost`.purchase_cost + COALESCE(view_vehicle_g_cost.net_lcv, 0) from `view_vehicle_g_cost` where `view_vehicle_g_cost`.item_code = `tabItem`.item_code) as net_cost")
)
sql_fields = []
for args in fields:
sql_field = get_field(args)
sql_fields.append(sql_field)
# frappe.errprint(", ".join(sql_fields))
return ", ".join(sql_fields)
| 5,343,475
|
def timeit_pipeline(rc, num):
"""
Time how long it takes to run a number of set/get:s inside a cluster pipeline
"""
for i in range(0, num//2): # noqa
s = "foo{0}".format(i)
p = rc.pipeline()
p.set(s, i)
p.get(s)
p.execute()
| 5,343,476
|
def count_POS_tag(df_pos):
"""Count how often each POS tag occurs
Args:
df_pos ([dataframe]): dataframe, where the entries are list of tuples (token, POS tag)
Returns:
df_pos_stats ([dataframe]): dataframe containing POS tag statistics
"""
# POS tag list
tag_lst = ['CC', 'CD', 'DT', 'EX', 'FW', 'IN', 'JJ', 'JJR', 'JJS', 'LS', 'MD', 'NN', 'NNS', 'NNP', 'NNPS',
'PDT', 'POS', 'PRP', 'PRP$', 'RB', 'RBR', 'RBS', 'RP', 'SYM', 'TO', 'UH', 'VB', 'VBD', 'VBG',
'VBN', 'VBP', 'VBZ', 'WDT', 'WP', 'WP$', 'WRB', '$', "''", '(', ')', ',', '.', ':', '``']
# init dataframe
df_pos_stats = pd.DataFrame(0, index=range(len(df_pos)), columns=tag_lst)
# count POS tag
for index, pos in enumerate(df_pos):
count_dict = Counter(tag for _, tag in pos)
for tag, count in count_dict.items():
if tag in tag_lst:
df_pos_stats.loc[index, tag] = count
return df_pos_stats
| 5,343,477
|
def catch_all(path):
"""
Gets dummy message.
"""
return json.dumps({
'message': 'no one was here',
'ms': get_epochtime_ms()
})
| 5,343,478
|
def make_archive_obj(filepath, fileobj=None, inmemory_processing=True, allow_unsafe_extraction=False):
"""This method allows for smart opening of an archive file. Currently this
method can handle tar and zip archives. For the tar files, if the python
library has issues, the file is attempted to be processed by using the
tar command. (Note: the native classes are implemented to work only on
posix machines)"""
if not fileobj:
assert os.path.isfile(filepath)
test_arg = filepath
else:
test_arg = fileobj
obj = None
if zipfile.is_zipfile(test_arg):
obj = ZipFile(filepath, fileobj, inmemory_processing,
allow_unsafe_extraction)
elif is_tarfile(test_arg):
try:
obj = TarFile(filepath, fileobj, inmemory_processing,
allow_unsafe_extraction)
except:
obj = NativeTarFile(filepath, fileobj, inmemory_processing,
allow_unsafe_extraction)
else:
raise Exception("Unknown Archive Type: " +
"You should really just give me something I can digest!")
return obj
| 5,343,479
|
def fixture_set_log_level_info(caplog):
"""Set the log-level capture to info for all tests"""
caplog.set_level(logging.INFO)
| 5,343,480
|
def dimensions_to_space_time_index(dims, t_idx = (), t_len = (), s_idx = (), s_len = (),
next_idx_valid = 0, invalid = False,
min_port_width = 0, max_port_width = 0, total_time = 0,
first_call = True) -> typing.Tuple[List[SpaceTimeIndex], int]:
"""
Convert a space-time Type to a flat list of SpaceTimeIndexs with the s and t values along with the flat_idx.
This is a recursive function. The parameters other than dim are the status of the current call.
The values are needed to compute the flat t, s, and flat_idx of each inner value
:param dims: The type, it's space and time dimensions
:param t_idx: The index in each of the parent calls' that are TSeqs
:param t_len: The lengths of each of the parent calls' TSeqs
:param s_idx: The index in each of the parent calls' that are SSeqs
:param S_len: The lengths of each of the parent calls' SSeqs
:param next_idx_valid: The next flat_idx to use for valids
:param invalid: Whether this call is in and invalid part of a type. Any invalid parent makes all the children
invalid
:param min_port_width: The minimum width of this type and the other (output or input).
This is used when adding padding at end of top call.
:param max_port_width: The maximum width of this type and the other (output or input).
This is used when adding padding at end of top call.
:param total_time: The total time required by this type.
This is used when adding padding at end of top call.
:param first_val: Whether this is the top, non-recursive call to this function
:return: A list of SpaceTimeIndex
"""
if type(dims) == ST_SSeq or type(dims) == ST_SSeq_Tuple:
nested_result = []
for s in range(dims.n):
(res, next_idx_valid) = \
dimensions_to_space_time_index(dims.t, t_idx, t_len,
tuple([s]) + s_idx, tuple([dims.n]) + s_len,
next_idx_valid, invalid, 0, 0, 0, False)
nested_result += [res]
result = flatten(nested_result), next_idx_valid
elif type(dims) == ST_TSeq:
nested_result = []
for t in range(dims.n + dims.i):
(res, next_idx_valid) = \
dimensions_to_space_time_index(dims.t, tuple([t]) + t_idx, tuple([dims.n + dims.i]) + t_len,
s_idx, s_len, next_idx_valid,
invalid or (t >= dims.n), 0, 0, 0, False)
nested_result += [res]
result = flatten(nested_result), next_idx_valid
else:
# track how much time each t_idx indicates due to nested index structure
# drop the last value because each t_idx time is the product of all
# time dimensions inside of it. No t_idx contains last dimension
time_per_t_len = list(accumulate([1] + list(t_len), lambda x,y : x*y))[:-1]
t_idx_with_time_per_len = zip(time_per_t_len, list(t_idx))
time_per_t_idx = list(map(lambda x: x[0]*x[1], t_idx_with_time_per_len))
t = reduce(lambda x,y: x+y, [0] + time_per_t_idx)
# do same computation for space
time_per_s_len = list(accumulate([1] + list(s_len), lambda x,y : x*y))
s_idx_with_time_per_len = zip(time_per_s_len, list(s_idx))
time_per_s_idx = list(map(lambda x: x[0]*x[1], s_idx_with_time_per_len))
s = reduce(lambda x,y: x+y, [0] + time_per_s_idx)
if invalid:
result = [SpaceTimeIndex(FlatIndex(True, (t, s)), s, t)], next_idx_valid
else:
next_idx_valid += 1
result = [SpaceTimeIndex(FlatIndex(False, next_idx_valid - 1), s, t)], next_idx_valid
if first_call:
padded_result = pad_space_dimension_with_invalids(result[0], min_port_width, max_port_width, total_time)
return fix_invalid_indexes(padded_result), result[1]
else:
return result
| 5,343,481
|
def AtomEditorComponents_Material_AddedToEntity():
"""
Summary:
Tests the Material component can be added to an entity and has the expected functionality.
Test setup:
- Wait for Editor idle loop.
- Open the "Base" level.
Expected Behavior:
The component can be added, used in game mode, hidden/shown, deleted, and has accurate required components.
Creation and deletion undo/redo should also work.
Test Steps:
1) Create a Material entity with no components.
2) Add a Material component to Material entity.
3) UNDO the entity creation and component addition.
4) REDO the entity creation and component addition.
5) Verify Material component not enabled.
6) Add Actor component since it is required by the Material component.
7) Verify Material component is enabled.
8) UNDO add Actor component
9) Verify Material component not enabled.
10) Add Mesh component since it is required by the Material component.
11) Verify Material component is enabled.
12) Enter/Exit game mode.
13) Test IsHidden.
14) Test IsVisible.
15) Delete Material entity.
16) UNDO deletion.
17) REDO deletion.
18) Look for errors.
:return: None
"""
import azlmbr.legacy.general as general
from editor_python_test_tools.editor_entity_utils import EditorEntity
from editor_python_test_tools.utils import Report, Tracer, TestHelper
from Atom.atom_utils.atom_constants import AtomComponentProperties
with Tracer() as error_tracer:
# Test setup begins.
# Setup: Wait for Editor idle loop before executing Python hydra scripts then open "Base" level.
TestHelper.init_idle()
TestHelper.open_level("Graphics", "base_empty")
# Test steps begin.
# 1. Create a Material entity with no components.
material_entity = EditorEntity.create_editor_entity(AtomComponentProperties.material())
Report.critical_result(Tests.material_creation, material_entity.exists())
# 2. Add a Material component to Material entity.
material_component = material_entity.add_component(AtomComponentProperties.material())
Report.critical_result(
Tests.material_component,
material_entity.has_component(AtomComponentProperties.material()))
# 3. UNDO the entity creation and component addition.
# -> UNDO component addition.
general.undo()
# -> UNDO naming entity.
general.undo()
# -> UNDO selecting entity.
general.undo()
# -> UNDO entity creation.
general.undo()
general.idle_wait_frames(1)
Report.result(Tests.creation_undo, not material_entity.exists())
# 4. REDO the entity creation and component addition.
# -> REDO entity creation.
general.redo()
# -> REDO selecting entity.
general.redo()
# -> REDO naming entity.
general.redo()
# -> REDO component addition.
general.redo()
general.idle_wait_frames(1)
Report.result(Tests.creation_redo, material_entity.exists())
# 5. Verify Material component not enabled.
Report.result(Tests.material_disabled, not material_component.is_enabled())
# 6. Add Actor component since it is required by the Material component.
material_entity.add_component(AtomComponentProperties.actor())
Report.result(Tests.actor_component, material_entity.has_component(AtomComponentProperties.actor()))
# 7. Verify Material component is enabled.
Report.result(Tests.material_enabled, material_component.is_enabled())
# 8. UNDO component addition.
general.undo()
general.idle_wait_frames(1)
Report.result(Tests.actor_undo, not material_entity.has_component(AtomComponentProperties.actor()))
# 9. Verify Material component not enabled.
Report.result(Tests.material_disabled, not material_component.is_enabled())
# 10. Add Mesh component since it is required by the Material component.
material_entity.add_component(AtomComponentProperties.mesh())
Report.result(Tests.mesh_component, material_entity.has_component(AtomComponentProperties.mesh()))
# 11. Verify Material component is enabled.
Report.result(Tests.material_enabled, material_component.is_enabled())
# 12. Enter/Exit game mode.
TestHelper.enter_game_mode(Tests.enter_game_mode)
general.idle_wait_frames(1)
TestHelper.exit_game_mode(Tests.exit_game_mode)
# 13. Test IsHidden.
material_entity.set_visibility_state(False)
Report.result(Tests.is_hidden, material_entity.is_hidden() is True)
# 14. Test IsVisible.
material_entity.set_visibility_state(True)
general.idle_wait_frames(1)
Report.result(Tests.is_visible, material_entity.is_visible() is True)
# 15. Delete Material entity.
material_entity.delete()
Report.result(Tests.entity_deleted, not material_entity.exists())
# 16. UNDO deletion.
general.undo()
general.idle_wait_frames(1)
Report.result(Tests.deletion_undo, material_entity.exists())
# 17. REDO deletion.
general.redo()
general.idle_wait_frames(1)
Report.result(Tests.deletion_redo, not material_entity.exists())
# 18. Look for errors or asserts.
TestHelper.wait_for_condition(lambda: error_tracer.has_errors or error_tracer.has_asserts, 1.0)
for error_info in error_tracer.errors:
Report.info(f"Error: {error_info.filename} {error_info.function} | {error_info.message}")
for assert_info in error_tracer.asserts:
Report.info(f"Assert: {assert_info.filename} {assert_info.function} | {assert_info.message}")
| 5,343,482
|
def unparse(input_dict, output=None, encoding='utf-8', **kwargs):
"""Emit an XML document for the given `input_dict` (reverse of `parse`).
The resulting XML document is returned as a string, but if `output` (a
file-like object) is specified, it is written there instead.
Dictionary keys prefixed with `attr_prefix` (default=`'@'`) are interpreted
as XML node attributes, whereas keys equal to `cdata_key`
(default=`'#text'`) are treated as character data.
The `pretty` parameter (default=`False`) enables pretty-printing. In this
mode, lines are terminated with `'\n'` and indented with `'\t'`, but this
can be customized with the `newl` and `indent` parameters.
"""
((key, value),) = input_dict.items()
must_return = False
if output == None:
output = StringIO()
must_return = True
content_handler = XMLGenerator(output, encoding)
content_handler.startDocument()
_emit(key, value, content_handler, **kwargs)
content_handler.endDocument()
if must_return:
value = output.getvalue()
try: # pragma no cover
value = value.decode(encoding)
except AttributeError: # pragma no cover
pass
return value
| 5,343,483
|
def test_svgp_vs_gpr_means(with_tf_random_seed, output_dim):
"""
Test that the SVGP with Gaussian Likelihood and number of inducing points equal to
number of data-points gives the same results as GPR.
Tested with a mean function.
"""
svgp, gpr = _svgp_gpr_setup(tuple(), output_dim, LinearMeanFunction(1.5))
# TODO(sam): output dim of 2 with mean function seems to reduce the tightness of fit for no
# reason.
_test_svgp_vs_gpr(svgp, gpr, tol=1e-4)
| 5,343,484
|
def create_callbacks(model, data, ARGS):
"""Create keras custom callback with checkpoint and logging"""
# Create callbacks
if not os.path.exists(ARGS.out_directory):
os.makedirs(ARGS.out_directory)
# log to Model/log.txt as specified by ARGS.out_directory
checkpoint_cb = ModelCheckpoint(filepath=ARGS.out_directory + '/weights.{epoch:03d}.hdf5',
verbose=2, save_best_only=True)
earlystopping_cb = EarlyStopping(monitor='val_loss', patience=3)
# Use builtin logger instead of LogEval
csv_cb = CSVLogger(f'{ARGS.out_directory}/log.txt', append=False, separator='\t')
# custom_callback = LogEval(f'{ARGS.out_directory}/log.txt', model, data, ARGS, interval=1, extlog=True)
callback_list = [checkpoint_cb, earlystopping_cb, csv_cb]
return callback_list
| 5,343,485
|
def customization_data(client=None):
"""
Returns a Generator of ImportDefinitions (Customizations).
Install them using `resilient-circuits customize`
Contents:
- Message Destinations:
- fn_ioc_parser_v2
- Functions:
- func_ioc_parser_v2
- Workflows:
- example_parse_iocs_artifact
- example_parse_iocs_attachment
- Rules:
- Parse IOCs (Attachment)
- Parse IOCs (Artifact)
"""
yield ImportDefinition(u"""
eyJhY3Rpb25fb3JkZXIiOiBbXSwgImFjdGlvbnMiOiBbeyJhdXRvbWF0aW9ucyI6IFtdLCAiY29u
ZGl0aW9ucyI6IFt7ImV2YWx1YXRpb25faWQiOiBudWxsLCAiZmllbGRfbmFtZSI6ICJhcnRpZmFj
dC50eXBlIiwgIm1ldGhvZCI6ICJpbiIsICJ0eXBlIjogbnVsbCwgInZhbHVlIjogWyJSRkMgODIy
IEVtYWlsIE1lc3NhZ2UgRmlsZSIsICJFbWFpbCBBdHRhY2htZW50IiwgIkxvZyBGaWxlIiwgIk90
aGVyIEZpbGUiLCAiU3RyaW5nIl19XSwgImVuYWJsZWQiOiB0cnVlLCAiZXhwb3J0X2tleSI6ICJQ
YXJzZSBJT0NzIChBcnRpZmFjdCkiLCAiaWQiOiAxNTAsICJsb2dpY190eXBlIjogImFsbCIsICJt
ZXNzYWdlX2Rlc3RpbmF0aW9ucyI6IFtdLCAibmFtZSI6ICJQYXJzZSBJT0NzIChBcnRpZmFjdCki
LCAib2JqZWN0X3R5cGUiOiAiYXJ0aWZhY3QiLCAidGFncyI6IFtdLCAidGltZW91dF9zZWNvbmRz
IjogODY0MDAsICJ0eXBlIjogMSwgInV1aWQiOiAiYjVjZTQ1ZDAtYjU1Mi00YmI3LThkNGItNDRj
YTFmZjcyMjgwIiwgInZpZXdfaXRlbXMiOiBbXSwgIndvcmtmbG93cyI6IFsiZXhhbXBsZV9wYXJz
ZV9pb2NzX2FydGlmYWN0Il19LCB7ImF1dG9tYXRpb25zIjogW10sICJjb25kaXRpb25zIjogW10s
ICJlbmFibGVkIjogdHJ1ZSwgImV4cG9ydF9rZXkiOiAiUGFyc2UgSU9DcyAoQXR0YWNobWVudCki
LCAiaWQiOiAxNTEsICJsb2dpY190eXBlIjogImFsbCIsICJtZXNzYWdlX2Rlc3RpbmF0aW9ucyI6
IFtdLCAibmFtZSI6ICJQYXJzZSBJT0NzIChBdHRhY2htZW50KSIsICJvYmplY3RfdHlwZSI6ICJh
dHRhY2htZW50IiwgInRhZ3MiOiBbXSwgInRpbWVvdXRfc2Vjb25kcyI6IDg2NDAwLCAidHlwZSI6
IDEsICJ1dWlkIjogIjZlZjBiNDAxLWY5OGQtNDNhMy1iOGUwLWEyOTA4NmJmZDFlOCIsICJ2aWV3
X2l0ZW1zIjogW10sICJ3b3JrZmxvd3MiOiBbImV4YW1wbGVfcGFyc2VfaW9jc19hdHRhY2htZW50
Il19XSwgImF1dG9tYXRpY190YXNrcyI6IFtdLCAiZXhwb3J0X2RhdGUiOiAxNTk3MDk0NjIzMTQ4
LCAiZXhwb3J0X2Zvcm1hdF92ZXJzaW9uIjogMiwgImZpZWxkcyI6IFt7ImFsbG93X2RlZmF1bHRf
dmFsdWUiOiBmYWxzZSwgImJsYW5rX29wdGlvbiI6IGZhbHNlLCAiY2FsY3VsYXRlZCI6IGZhbHNl
LCAiY2hhbmdlYWJsZSI6IHRydWUsICJjaG9zZW4iOiBmYWxzZSwgImRlZmF1bHRfY2hvc2VuX2J5
X3NlcnZlciI6IGZhbHNlLCAiZGVwcmVjYXRlZCI6IGZhbHNlLCAiZXhwb3J0X2tleSI6ICJfX2Z1
bmN0aW9uL2lvY19wYXJzZXJfdjJfdGFza19pZCIsICJoaWRlX25vdGlmaWNhdGlvbiI6IGZhbHNl
LCAiaWQiOiAxMDIwLCAiaW5wdXRfdHlwZSI6ICJudW1iZXIiLCAiaW50ZXJuYWwiOiBmYWxzZSwg
ImlzX3RyYWNrZWQiOiBmYWxzZSwgIm5hbWUiOiAiaW9jX3BhcnNlcl92Ml90YXNrX2lkIiwgIm9w
ZXJhdGlvbl9wZXJtcyI6IHt9LCAib3BlcmF0aW9ucyI6IFtdLCAicGxhY2Vob2xkZXIiOiAiIiwg
InByZWZpeCI6IG51bGwsICJyZWFkX29ubHkiOiBmYWxzZSwgInJpY2hfdGV4dCI6IGZhbHNlLCAi
dGFncyI6IFtdLCAidGVtcGxhdGVzIjogW10sICJ0ZXh0IjogImlvY19wYXJzZXJfdjJfdGFza19p
ZCIsICJ0b29sdGlwIjogIklEIG9mIHRoZSB0YXNrIiwgInR5cGVfaWQiOiAxMSwgInV1aWQiOiAi
NWFiYzhhZGUtYTJkZS00Y2E0LTg1MzgtZWNjNGE1NDE1MmQzIiwgInZhbHVlcyI6IFtdfSwgeyJh
bGxvd19kZWZhdWx0X3ZhbHVlIjogZmFsc2UsICJibGFua19vcHRpb24iOiBmYWxzZSwgImNhbGN1
bGF0ZWQiOiBmYWxzZSwgImNoYW5nZWFibGUiOiB0cnVlLCAiY2hvc2VuIjogZmFsc2UsICJkZWZh
dWx0X2Nob3Nlbl9ieV9zZXJ2ZXIiOiBmYWxzZSwgImRlcHJlY2F0ZWQiOiBmYWxzZSwgImV4cG9y
dF9rZXkiOiAiX19mdW5jdGlvbi9pb2NfcGFyc2VyX3YyX2FydGlmYWN0X3ZhbHVlIiwgImhpZGVf
bm90aWZpY2F0aW9uIjogZmFsc2UsICJpZCI6IDEwMjIsICJpbnB1dF90eXBlIjogInRleHQiLCAi
aW50ZXJuYWwiOiBmYWxzZSwgImlzX3RyYWNrZWQiOiBmYWxzZSwgIm5hbWUiOiAiaW9jX3BhcnNl
cl92Ml9hcnRpZmFjdF92YWx1ZSIsICJvcGVyYXRpb25fcGVybXMiOiB7fSwgIm9wZXJhdGlvbnMi
OiBbXSwgInBsYWNlaG9sZGVyIjogIiIsICJwcmVmaXgiOiBudWxsLCAicmVhZF9vbmx5IjogZmFs
c2UsICJyaWNoX3RleHQiOiBmYWxzZSwgInRhZ3MiOiBbXSwgInRlbXBsYXRlcyI6IFtdLCAidGV4
dCI6ICJpb2NfcGFyc2VyX3YyX2FydGlmYWN0X3ZhbHVlIiwgInRvb2x0aXAiOiAiQXJ0aWZhY3Qn
cyB2YWx1ZSIsICJ0eXBlX2lkIjogMTEsICJ1dWlkIjogIjE2MWMzMzU5LWU3ZTMtNGI0OS1iMzRj
LTZmMzkwMTI2YTlkMSIsICJ2YWx1ZXMiOiBbXX0sIHsiYWxsb3dfZGVmYXVsdF92YWx1ZSI6IGZh
bHNlLCAiYmxhbmtfb3B0aW9uIjogZmFsc2UsICJjYWxjdWxhdGVkIjogZmFsc2UsICJjaGFuZ2Vh
YmxlIjogdHJ1ZSwgImNob3NlbiI6IGZhbHNlLCAiZGVmYXVsdF9jaG9zZW5fYnlfc2VydmVyIjog
ZmFsc2UsICJkZXByZWNhdGVkIjogZmFsc2UsICJleHBvcnRfa2V5IjogIl9fZnVuY3Rpb24vaW9j
X3BhcnNlcl92Ml9hdHRhY2htZW50X2lkIiwgImhpZGVfbm90aWZpY2F0aW9uIjogZmFsc2UsICJp
ZCI6IDEwMjQsICJpbnB1dF90eXBlIjogIm51bWJlciIsICJpbnRlcm5hbCI6IGZhbHNlLCAiaXNf
dHJhY2tlZCI6IGZhbHNlLCAibmFtZSI6ICJpb2NfcGFyc2VyX3YyX2F0dGFjaG1lbnRfaWQiLCAi
b3BlcmF0aW9uX3Blcm1zIjoge30sICJvcGVyYXRpb25zIjogW10sICJwbGFjZWhvbGRlciI6ICIi
LCAicHJlZml4IjogbnVsbCwgInJlYWRfb25seSI6IGZhbHNlLCAicmljaF90ZXh0IjogZmFsc2Us
ICJ0YWdzIjogW10sICJ0ZW1wbGF0ZXMiOiBbXSwgInRleHQiOiAiaW9jX3BhcnNlcl92Ml9hdHRh
Y2htZW50X2lkIiwgInRvb2x0aXAiOiAiSUQgb2YgdGhlIGF0dGFjaG1lbnQiLCAidHlwZV9pZCI6
IDExLCAidXVpZCI6ICI2ZTY0Y2I5MS05YTllLTQ4ZDUtOWYwOC1mNDg5YmU1NGMzMGIiLCAidmFs
dWVzIjogW119LCB7ImFsbG93X2RlZmF1bHRfdmFsdWUiOiBmYWxzZSwgImJsYW5rX29wdGlvbiI6
IGZhbHNlLCAiY2FsY3VsYXRlZCI6IGZhbHNlLCAiY2hhbmdlYWJsZSI6IHRydWUsICJjaG9zZW4i
OiBmYWxzZSwgImRlZmF1bHRfY2hvc2VuX2J5X3NlcnZlciI6IGZhbHNlLCAiZGVwcmVjYXRlZCI6
IGZhbHNlLCAiZXhwb3J0X2tleSI6ICJfX2Z1bmN0aW9uL2lvY19wYXJzZXJfdjJfYXJ0aWZhY3Rf
aWQiLCAiaGlkZV9ub3RpZmljYXRpb24iOiBmYWxzZSwgImlkIjogMTAyMywgImlucHV0X3R5cGUi
OiAibnVtYmVyIiwgImludGVybmFsIjogZmFsc2UsICJpc190cmFja2VkIjogZmFsc2UsICJuYW1l
IjogImlvY19wYXJzZXJfdjJfYXJ0aWZhY3RfaWQiLCAib3BlcmF0aW9uX3Blcm1zIjoge30sICJv
cGVyYXRpb25zIjogW10sICJwbGFjZWhvbGRlciI6ICIiLCAicHJlZml4IjogbnVsbCwgInJlYWRf
b25seSI6IGZhbHNlLCAicmljaF90ZXh0IjogZmFsc2UsICJ0YWdzIjogW10sICJ0ZW1wbGF0ZXMi
OiBbXSwgInRleHQiOiAiaW9jX3BhcnNlcl92Ml9hcnRpZmFjdF9pZCIsICJ0b29sdGlwIjogIklE
IG9mIHRoZSBhcnRpZmFjdCIsICJ0eXBlX2lkIjogMTEsICJ1dWlkIjogImRlZDkwZDE1LTQ1MjEt
NDVkMy05YzRhLTJiMTdhNDJlOTdlYyIsICJ2YWx1ZXMiOiBbXX0sIHsiYWxsb3dfZGVmYXVsdF92
YWx1ZSI6IGZhbHNlLCAiYmxhbmtfb3B0aW9uIjogZmFsc2UsICJjYWxjdWxhdGVkIjogZmFsc2Us
ICJjaGFuZ2VhYmxlIjogdHJ1ZSwgImNob3NlbiI6IGZhbHNlLCAiZGVmYXVsdF9jaG9zZW5fYnlf
c2VydmVyIjogZmFsc2UsICJkZXByZWNhdGVkIjogZmFsc2UsICJleHBvcnRfa2V5IjogIl9fZnVu
Y3Rpb24vaW9jX3BhcnNlcl92Ml9pbmNpZGVudF9pZCIsICJoaWRlX25vdGlmaWNhdGlvbiI6IGZh
bHNlLCAiaWQiOiAxMDIxLCAiaW5wdXRfdHlwZSI6ICJudW1iZXIiLCAiaW50ZXJuYWwiOiBmYWxz
ZSwgImlzX3RyYWNrZWQiOiBmYWxzZSwgIm5hbWUiOiAiaW9jX3BhcnNlcl92Ml9pbmNpZGVudF9p
ZCIsICJvcGVyYXRpb25fcGVybXMiOiB7fSwgIm9wZXJhdGlvbnMiOiBbXSwgInBsYWNlaG9sZGVy
IjogIiIsICJwcmVmaXgiOiBudWxsLCAicmVhZF9vbmx5IjogZmFsc2UsICJyaWNoX3RleHQiOiBm
YWxzZSwgInRhZ3MiOiBbXSwgInRlbXBsYXRlcyI6IFtdLCAidGV4dCI6ICJpb2NfcGFyc2VyX3Yy
X2luY2lkZW50X2lkIiwgInRvb2x0aXAiOiAiSUQgb2YgdGhlIGluY2lkZW50IiwgInR5cGVfaWQi
OiAxMSwgInV1aWQiOiAiZGNjOWM4ODQtMjc0Mi00ZjE3LWEwZGUtNTYyNGI2OGI3YjEzIiwgInZh
bHVlcyI6IFtdfSwgeyJleHBvcnRfa2V5IjogImluY2lkZW50L2ludGVybmFsX2N1c3RvbWl6YXRp
b25zX2ZpZWxkIiwgImlkIjogMCwgImlucHV0X3R5cGUiOiAidGV4dCIsICJpbnRlcm5hbCI6IHRy
dWUsICJuYW1lIjogImludGVybmFsX2N1c3RvbWl6YXRpb25zX2ZpZWxkIiwgInJlYWRfb25seSI6
IHRydWUsICJ0ZXh0IjogIkN1c3RvbWl6YXRpb25zIEZpZWxkIChpbnRlcm5hbCkiLCAidHlwZV9p
ZCI6IDAsICJ1dWlkIjogImJmZWVjMmQ0LTM3NzAtMTFlOC1hZDM5LTRhMDAwNDA0NGFhMSJ9XSwg
ImZ1bmN0aW9ucyI6IFt7ImNyZWF0b3IiOiB7ImRpc3BsYXlfbmFtZSI6ICJSZXNpbGllbnQgU3lz
YWRtaW4iLCAiaWQiOiAzLCAibmFtZSI6ICJhQGV4YW1wbGUuY29tIiwgInR5cGUiOiAidXNlciJ9
LCAiZGVzY3JpcHRpb24iOiB7ImZvcm1hdCI6ICJ0ZXh0IiwgImNvbnRlbnQiOiAiRXh0cmFjdCBJ
T0NzIGZyb20gaW5jaWRlbnQvdGFzay9hcnRpZmFjdCBhdHRhY2htZW50cywgYW5kIHN0cmluZy1i
YXNlZCBhcnRpZmFjdHMuIn0sICJkZXN0aW5hdGlvbl9oYW5kbGUiOiAiZm5faW9jX3BhcnNlcl92
MiIsICJkaXNwbGF5X25hbWUiOiAiSU9DIFBhcnNlciB2MiIsICJleHBvcnRfa2V5IjogImZ1bmNf
aW9jX3BhcnNlcl92MiIsICJpZCI6IDU4LCAibGFzdF9tb2RpZmllZF9ieSI6IHsiZGlzcGxheV9u
YW1lIjogIlJlc2lsaWVudCBTeXNhZG1pbiIsICJpZCI6IDMsICJuYW1lIjogImFAZXhhbXBsZS5j
b20iLCAidHlwZSI6ICJ1c2VyIn0sICJsYXN0X21vZGlmaWVkX3RpbWUiOiAxNTk3MDg3MDM0MzMy
LCAibmFtZSI6ICJmdW5jX2lvY19wYXJzZXJfdjIiLCAidGFncyI6IFtdLCAidXVpZCI6ICI1Mjgw
MzU4MS0wMTlhLTRjNjgtOGY4ZC04YmU5Y2EzMWIyZjYiLCAidmVyc2lvbiI6IDIsICJ2aWV3X2l0
ZW1zIjogW3siY29udGVudCI6ICJkY2M5Yzg4NC0yNzQyLTRmMTctYTBkZS01NjI0YjY4YjdiMTMi
LCAiZWxlbWVudCI6ICJmaWVsZF91dWlkIiwgImZpZWxkX3R5cGUiOiAiX19mdW5jdGlvbiIsICJz
aG93X2lmIjogbnVsbCwgInNob3dfbGlua19oZWFkZXIiOiBmYWxzZSwgInN0ZXBfbGFiZWwiOiBu
dWxsfSwgeyJjb250ZW50IjogImRlZDkwZDE1LTQ1MjEtNDVkMy05YzRhLTJiMTdhNDJlOTdlYyIs
ICJlbGVtZW50IjogImZpZWxkX3V1aWQiLCAiZmllbGRfdHlwZSI6ICJfX2Z1bmN0aW9uIiwgInNo
b3dfaWYiOiBudWxsLCAic2hvd19saW5rX2hlYWRlciI6IGZhbHNlLCAic3RlcF9sYWJlbCI6IG51
bGx9LCB7ImNvbnRlbnQiOiAiMTYxYzMzNTktZTdlMy00YjQ5LWIzNGMtNmYzOTAxMjZhOWQxIiwg
ImVsZW1lbnQiOiAiZmllbGRfdXVpZCIsICJmaWVsZF90eXBlIjogIl9fZnVuY3Rpb24iLCAic2hv
d19pZiI6IG51bGwsICJzaG93X2xpbmtfaGVhZGVyIjogZmFsc2UsICJzdGVwX2xhYmVsIjogbnVs
bH0sIHsiY29udGVudCI6ICI2ZTY0Y2I5MS05YTllLTQ4ZDUtOWYwOC1mNDg5YmU1NGMzMGIiLCAi
ZWxlbWVudCI6ICJmaWVsZF91dWlkIiwgImZpZWxkX3R5cGUiOiAiX19mdW5jdGlvbiIsICJzaG93
X2lmIjogbnVsbCwgInNob3dfbGlua19oZWFkZXIiOiBmYWxzZSwgInN0ZXBfbGFiZWwiOiBudWxs
fSwgeyJjb250ZW50IjogIjVhYmM4YWRlLWEyZGUtNGNhNC04NTM4LWVjYzRhNTQxNTJkMyIsICJl
bGVtZW50IjogImZpZWxkX3V1aWQiLCAiZmllbGRfdHlwZSI6ICJfX2Z1bmN0aW9uIiwgInNob3df
aWYiOiBudWxsLCAic2hvd19saW5rX2hlYWRlciI6IGZhbHNlLCAic3RlcF9sYWJlbCI6IG51bGx9
XSwgIndvcmtmbG93cyI6IFt7ImFjdGlvbnMiOiBbXSwgImRlc2NyaXB0aW9uIjogbnVsbCwgIm5h
bWUiOiAiUGFyc2UgSU9DcyAoQXJ0aWZhY3QpIiwgIm9iamVjdF90eXBlIjogImFydGlmYWN0Iiwg
InByb2dyYW1tYXRpY19uYW1lIjogImV4YW1wbGVfcGFyc2VfaW9jc19hcnRpZmFjdCIsICJ0YWdz
IjogW10sICJ1dWlkIjogbnVsbCwgIndvcmtmbG93X2lkIjogNzV9LCB7ImFjdGlvbnMiOiBbXSwg
ImRlc2NyaXB0aW9uIjogbnVsbCwgIm5hbWUiOiAiUGFyc2UgSU9DcyAoQXR0YWNobWVudCkiLCAi
b2JqZWN0X3R5cGUiOiAiYXR0YWNobWVudCIsICJwcm9ncmFtbWF0aWNfbmFtZSI6ICJleGFtcGxl
X3BhcnNlX2lvY3NfYXR0YWNobWVudCIsICJ0YWdzIjogW10sICJ1dWlkIjogbnVsbCwgIndvcmtm
bG93X2lkIjogNzR9XX1dLCAiZ2VvcyI6IG51bGwsICJncm91cHMiOiBudWxsLCAiaWQiOiAxNSwg
ImluYm91bmRfbWFpbGJveGVzIjogbnVsbCwgImluY2lkZW50X2FydGlmYWN0X3R5cGVzIjogW10s
ICJpbmNpZGVudF90eXBlcyI6IFt7InVwZGF0ZV9kYXRlIjogMTU5NzA5OTQ0NTMzNCwgImNyZWF0
ZV9kYXRlIjogMTU5NzA5OTQ0NTMzNCwgInV1aWQiOiAiYmZlZWMyZDQtMzc3MC0xMWU4LWFkMzkt
NGEwMDA0MDQ0YWEwIiwgImRlc2NyaXB0aW9uIjogIkN1c3RvbWl6YXRpb24gUGFja2FnZXMgKGlu
dGVybmFsKSIsICJleHBvcnRfa2V5IjogIkN1c3RvbWl6YXRpb24gUGFja2FnZXMgKGludGVybmFs
KSIsICJuYW1lIjogIkN1c3RvbWl6YXRpb24gUGFja2FnZXMgKGludGVybmFsKSIsICJlbmFibGVk
IjogZmFsc2UsICJzeXN0ZW0iOiBmYWxzZSwgInBhcmVudF9pZCI6IG51bGwsICJoaWRkZW4iOiBm
YWxzZSwgImlkIjogMH1dLCAiaW5kdXN0cmllcyI6IG51bGwsICJsYXlvdXRzIjogW10sICJsb2Nh
bGUiOiBudWxsLCAibWVzc2FnZV9kZXN0aW5hdGlvbnMiOiBbeyJhcGlfa2V5cyI6IFtdLCAiZGVz
dGluYXRpb25fdHlwZSI6IDAsICJleHBlY3RfYWNrIjogdHJ1ZSwgImV4cG9ydF9rZXkiOiAiZm5f
aW9jX3BhcnNlcl92MiIsICJuYW1lIjogImZuX2lvY19wYXJzZXJfdjIiLCAicHJvZ3JhbW1hdGlj
X25hbWUiOiAiZm5faW9jX3BhcnNlcl92MiIsICJ0YWdzIjogW10sICJ1c2VycyI6IFsiYUBleGFt
cGxlLmNvbSJdLCAidXVpZCI6ICIzNzdlY2QzZC1kZTg3LTQ4YTQtODlmZi1hNjI2Y2I3OGE0MDEi
fV0sICJub3RpZmljYXRpb25zIjogbnVsbCwgIm92ZXJyaWRlcyI6IFtdLCAicGhhc2VzIjogW10s
ICJyZWd1bGF0b3JzIjogbnVsbCwgInJvbGVzIjogW10sICJzY3JpcHRzIjogW10sICJzZXJ2ZXJf
dmVyc2lvbiI6IHsiYnVpbGRfbnVtYmVyIjogMzIsICJtYWpvciI6IDM1LCAibWlub3IiOiAyLCAi
dmVyc2lvbiI6ICIzNS4yLjMyIn0sICJ0YWdzIjogW10sICJ0YXNrX29yZGVyIjogW10sICJ0aW1l
ZnJhbWVzIjogbnVsbCwgInR5cGVzIjogW10sICJ3b3JrZmxvd3MiOiBbeyJhY3Rpb25zIjogW10s
ICJjb250ZW50IjogeyJ2ZXJzaW9uIjogNywgIndvcmtmbG93X2lkIjogImV4YW1wbGVfcGFyc2Vf
aW9jc19hdHRhY2htZW50IiwgInhtbCI6ICI8P3htbCB2ZXJzaW9uPVwiMS4wXCIgZW5jb2Rpbmc9
XCJVVEYtOFwiPz48ZGVmaW5pdGlvbnMgeG1sbnM9XCJodHRwOi8vd3d3Lm9tZy5vcmcvc3BlYy9C
UE1OLzIwMTAwNTI0L01PREVMXCIgeG1sbnM6YnBtbmRpPVwiaHR0cDovL3d3dy5vbWcub3JnL3Nw
ZWMvQlBNTi8yMDEwMDUyNC9ESVwiIHhtbG5zOm9tZ2RjPVwiaHR0cDovL3d3dy5vbWcub3JnL3Nw
ZWMvREQvMjAxMDA1MjQvRENcIiB4bWxuczpvbWdkaT1cImh0dHA6Ly93d3cub21nLm9yZy9zcGVj
L0RELzIwMTAwNTI0L0RJXCIgeG1sbnM6cmVzaWxpZW50PVwiaHR0cDovL3Jlc2lsaWVudC5pYm0u
Y29tL2JwbW5cIiB4bWxuczp4c2Q9XCJodHRwOi8vd3d3LnczLm9yZy8yMDAxL1hNTFNjaGVtYVwi
IHhtbG5zOnhzaT1cImh0dHA6Ly93d3cudzMub3JnLzIwMDEvWE1MU2NoZW1hLWluc3RhbmNlXCIg
dGFyZ2V0TmFtZXNwYWNlPVwiaHR0cDovL3d3dy5jYW11bmRhLm9yZy90ZXN0XCI+PHByb2Nlc3Mg
aWQ9XCJleGFtcGxlX3BhcnNlX2lvY3NfYXR0YWNobWVudFwiIGlzRXhlY3V0YWJsZT1cInRydWVc
IiBuYW1lPVwiUGFyc2UgSU9DcyAoQXR0YWNobWVudClcIj48ZG9jdW1lbnRhdGlvbj48IVtDREFU
QVtFeGFtcGxlIHdvcmtmbG93IHNob3dpbmcgaG93IHRvIGV4dHJhY3QgSU9DJ3MgKEluZGljYXRv
cnMgb2YgQ29tcHJvbWlzZSkgZnJvbSBJbmNpZGVudC9UYXNrIEF0dGFjaG1lbnRzLiBFYWNoIHVu
aXF1ZSBJT0MgaXMgYWRkZWQgdG8gdGhlIEluY2lkZW50IGFzIGFuIEFydGlmYWN0XV0+PC9kb2N1
bWVudGF0aW9uPjxzdGFydEV2ZW50IGlkPVwiU3RhcnRFdmVudF8xNTVhc3htXCI+PG91dGdvaW5n
PlNlcXVlbmNlRmxvd18waGJqeDZtPC9vdXRnb2luZz48L3N0YXJ0RXZlbnQ+PHNlcnZpY2VUYXNr
IGlkPVwiU2VydmljZVRhc2tfMXFibjcwb1wiIG5hbWU9XCJJT0MgUGFyc2VyIHYyXCIgcmVzaWxp
ZW50OnR5cGU9XCJmdW5jdGlvblwiPjxleHRlbnNpb25FbGVtZW50cz48cmVzaWxpZW50OmZ1bmN0
aW9uIHV1aWQ9XCI1MjgwMzU4MS0wMTlhLTRjNjgtOGY4ZC04YmU5Y2EzMWIyZjZcIj57XCJpbnB1
dHNcIjp7fSxcInBvc3RfcHJvY2Vzc2luZ19zY3JpcHRcIjpcImltcG9ydCByZVxcblxcbmRlZiBn
ZXRfYXJ0aWZhY3RfdHlwZShhcnRpZmFjdF92YWx1ZSwgYXJ0aWZhY3RfdHlwZSk6XFxuICBcXFwi
XFxcIlxcXCJVc2Ugc29tZSByZWdleCBleHByZXNzaW9ucyB0byB0cnkgYW5kIGlkZW50aWZ5XFxu
ICBmcm9tIHRoZSBBcnRpZmFjdCdzIHZhbHVlLCB3aGF0IEFydGlmYWN0IHR5cGUgaXQgaXMuXFxu
ICBSZXR1cm4gb3JpZ2luYWwgYXJ0aWZhY3RfdHlwZSBpZiB3ZSBjYW5ub3QgZmlndXJlIGl0IG91
dFxcXCJcXFwiXFxcIlxcblxcbiAgZG5zX25hbWVfcmVnZXggPSByZS5jb21waWxlKHInXigoW2Et
ekEtWl17MX0pfChbYS16QS1aXXsxfVthLXpBLVpdezF9KXwoW2EtekEtWl17MX1bMC05XXsxfSl8
KFswLTldezF9W2EtekEtWl17MX0pfChbYS16QS1aMC05XVthLXpBLVowLTktX117MSw2MX1bYS16
QS1aMC05XSkpXFxcXC4oW2EtekEtWl17Miw2fXxbYS16QS1aMC05LV17MiwzMH1cXFxcLlthLXpB
LVpdezIsM30pJCcpXFxuICBcXG4gIGlmIHJlLm1hdGNoKGRuc19uYW1lX3JlZ2V4LCBhcnRpZmFj
dF92YWx1ZSk6XFxuICAgIHJldHVybiBcXFwiRE5TIE5hbWVcXFwiXFxuICBcXG4gIHJldHVybiBh
cnRpZmFjdF90eXBlXFxuXFxuIyBNYXAgaW9jLnR5cGUgdG8gUmVzaWxpZW50IEFydGlmYWN0IFR5
cGVcXG5pb2NfdHlwZV90b19hcnRpZmFjdF90eXBlX21hcCA9IHtcXG4gICAgJ3VyaSc6ICdVUkkg
UGF0aCcsXFxuICAgICdJUCc6ICdJUCBBZGRyZXNzJyxcXG4gICAgJ21kNSc6ICdNYWx3YXJlIE1E
NSBIYXNoJyxcXG4gICAgJ3NoYTEnOiAnTWFsd2FyZSBTSEEtMSBIYXNoJyxcXG4gICAgJ3NoYTI1
Nic6ICdNYWx3YXJlIFNIQS0yNTYgSGFzaCcsXFxuICAgICdDVkUnOiAnVGhyZWF0IENWRSBJRCcs
XFxuICAgICdlbWFpbCc6ICdFbWFpbCBTZW5kZXInLFxcbiAgICAnZmlsZW5hbWUnOiAnRmlsZSBO
YW1lJyxcXG4gICAgJ2ZpbGUnOiAnRmlsZSBOYW1lJ1xcbn1cXG5cXG4jIEdldCB0aGUgSU9Dc1xc
bmlvY3MgPSByZXN1bHRzLmlvY3NcXG5cXG5hcnRpZmFjdF9saXN0ID0gW11cXG5pZiBpb2NzOlxc
biAgICAjIExvb3AgSU9DcyBhbmQgYWRkIGVhY2ggb24gYXMgYW4gQXJ0aWZhY3RcXG4gICAgZm9y
IGlvYyBpbiBpb2NzOlxcbiAgICAgIFxcbiAgICAgICMgSWYgYXR0YWNobWVudF9maWxlX25hbWUg
aXMgbm90IGRlZmluZWQsIHVzZSB0aGUgaW9jLnZhbHVlIGFzIGluIHRoZSBBcnRpZmFjdCdzIERl
c2NyaXB0aW9uXFxuICAgICAgaWYgcmVzdWx0cy5hdHRhY2htZW50X2ZpbGVfbmFtZTpcXG4gICAg
ICAgIGFydGlmYWN0X2Rlc2NyaXB0aW9uID0gdVxcXCJUaGlzIElPQyBvY2N1cnJlZCB7MH0gdGlt
ZShzKSBpbiB0aGUgYXR0YWNobWVudDogezF9XFxcIi5mb3JtYXQoIHVuaWNvZGUoaW9jLmNvdW50
KSwgdW5pY29kZShyZXN1bHRzLmF0dGFjaG1lbnRfZmlsZV9uYW1lKSApXFxuICAgICAgXFxuICAg
ICAgZWxzZTpcXG4gICAgICAgIGFydGlmYWN0X2Rlc2NyaXB0aW9uID0gdVxcXCJUaGlzIElPQyBv
Y2N1cnJlZCB7MH0gdGltZShzKSBpbiB0aGUgYXR0YWNobWVudDogezF9XFxcIi5mb3JtYXQoIHVu
aWNvZGUoaW9jLmNvdW50KSwgdW5pY29kZShpb2MudmFsdWUpIClcXG5cXG4gICAgICBhcnRpZmFj
dF92YWx1ZSA9IGlvYy52YWx1ZVxcbiAgICAgIGFydGlmYWN0X3R5cGUgPSBpb2NfdHlwZV90b19h
cnRpZmFjdF90eXBlX21hcC5nZXQoaW9jLnR5cGUsIFxcXCJTdHJpbmdcXFwiKVxcbiAgICAgIFxc
biAgICAgICMgSWYgdGhlIGFydGlmYWN0X3R5cGUgaXMgJ1VSSSBQYXRoJywgY2FsbCBnZXRfYXJ0
aWZhY3RfdHlwZSB0byB0cnkgaW50ZW50aWZ5IHRoZSB0eXBlIHVzaW5nIHJlZ2V4XFxuICAgICAg
aWYgYXJ0aWZhY3RfdHlwZSA9PSBcXFwiVVJJIFBhdGhcXFwiOlxcbiAgICAgICAgYXJ0aWZhY3Rf
dHlwZSA9IGdldF9hcnRpZmFjdF90eXBlKGFydGlmYWN0X3ZhbHVlLCBhcnRpZmFjdF90eXBlKVxc
biAgICAgIFxcbiAgICAgIGluY2lkZW50LmFkZEFydGlmYWN0KGFydGlmYWN0X3R5cGUsIGFydGlm
YWN0X3ZhbHVlLCBhcnRpZmFjdF9kZXNjcmlwdGlvbilcXG4gICAgICBhcnRpZmFjdF9saXN0LmFw
cGVuZCh1XFxcInt9OiB7fVxcXCIuZm9ybWF0KGFydGlmYWN0X3R5cGUsIGFydGlmYWN0X3ZhbHVl
KSlcXG4gICAgICBcXG5cXG5pZiBhcnRpZmFjdF9saXN0OlxcbiAgbXNnID0gdVxcXCJUaGUgZm9s
bG93aW5nIGFydGlmYWN0cyB3ZXJlIGFkZGVkIGZyb20ge31hdHRhY2htZW50OiB7fVxcXFxue31c
XFwiLmZvcm1hdChcXFwiVGFzayBcXFwiIGlmIHRhc2sgZWxzZSBcXFwiXFxcIiwgcmVzdWx0cy5h
dHRhY2htZW50X2ZpbGVfbmFtZSwgXFxcIlxcXFxuXFxcIi5qb2luKGFydGlmYWN0X2xpc3QpKVxc
bmVsc2U6XFxuICBtc2cgPSB1XFxcIk5vIGFydGlmYWN0cyB3ZXJlIGFkZGVkIGZyb20ge31hdHRh
Y2htZW50OiB7fVxcXFxue31cXFwiLmZvcm1hdChcXFwiVGFzayBcXFwiIGlmIHRhc2sgZWxzZSBc
XFwiXFxcIiwgcmVzdWx0cy5hdHRhY2htZW50X2ZpbGVfbmFtZSwgXFxcIlxcXFxuXFxcIi5qb2lu
KGFydGlmYWN0X2xpc3QpKVxcbiAgXFxuaW5jaWRlbnQuYWRkTm90ZShtc2cpXFxuaWYgdGFzazpc
XG4gIHRhc2suYWRkTm90ZShtc2cpXFxuXCIsXCJwcmVfcHJvY2Vzc2luZ19zY3JpcHRcIjpcIiMg
RGVmaW5lIFByZS1Qcm9jZXNzIElucHV0c1xcbmlucHV0cy5pb2NfcGFyc2VyX3YyX2luY2lkZW50
X2lkID0gaW5jaWRlbnQuaWRcXG5pbnB1dHMuaW9jX3BhcnNlcl92Ml9hdHRhY2htZW50X2lkID0g
YXR0YWNobWVudC5pZFxcblxcbiMgSWYgdGhpcyBpcyBhIFRhc2ssIHNldCB0aGUgVGFzayBJRFxc
bmlmIHRhc2s6XFxuICAgICBpbnB1dHMuaW9jX3BhcnNlcl92Ml90YXNrX2lkID0gdGFzay5pZFwi
fTwvcmVzaWxpZW50OmZ1bmN0aW9uPjwvZXh0ZW5zaW9uRWxlbWVudHM+PGluY29taW5nPlNlcXVl
bmNlRmxvd18waGJqeDZtPC9pbmNvbWluZz48b3V0Z29pbmc+U2VxdWVuY2VGbG93XzBleXFjcWw8
L291dGdvaW5nPjwvc2VydmljZVRhc2s+PGVuZEV2ZW50IGlkPVwiRW5kRXZlbnRfMTRwbXh2Zlwi
PjxpbmNvbWluZz5TZXF1ZW5jZUZsb3dfMGV5cWNxbDwvaW5jb21pbmc+PC9lbmRFdmVudD48c2Vx
dWVuY2VGbG93IGlkPVwiU2VxdWVuY2VGbG93XzBleXFjcWxcIiBzb3VyY2VSZWY9XCJTZXJ2aWNl
VGFza18xcWJuNzBvXCIgdGFyZ2V0UmVmPVwiRW5kRXZlbnRfMTRwbXh2ZlwiLz48c2VxdWVuY2VG
bG93IGlkPVwiU2VxdWVuY2VGbG93XzBoYmp4Nm1cIiBzb3VyY2VSZWY9XCJTdGFydEV2ZW50XzE1
NWFzeG1cIiB0YXJnZXRSZWY9XCJTZXJ2aWNlVGFza18xcWJuNzBvXCIvPjx0ZXh0QW5ub3RhdGlv
biBpZD1cIlRleHRBbm5vdGF0aW9uXzB0bms5NnBcIj48dGV4dD48IVtDREFUQVtBcnRpZmFjdHMg
YWRkZWQgdG8gdGhlIGluY2lkZW50IGZyb20gdGhlIGFydGlmYWN0IGZpbGUuIEEgbm90ZSBpcyBj
cmVhdGVkIHdpdGggc3VtbWFyeSBpbmZvcm1hdGlvblxuLl1dPjwvdGV4dD48L3RleHRBbm5vdGF0
aW9uPjxhc3NvY2lhdGlvbiBpZD1cIkFzc29jaWF0aW9uXzBjdTk1c2xcIiBzb3VyY2VSZWY9XCJT
ZXJ2aWNlVGFza18xcWJuNzBvXCIgdGFyZ2V0UmVmPVwiVGV4dEFubm90YXRpb25fMHRuazk2cFwi
Lz48L3Byb2Nlc3M+PGJwbW5kaTpCUE1ORGlhZ3JhbSBpZD1cIkJQTU5EaWFncmFtXzFcIj48YnBt
bmRpOkJQTU5QbGFuZSBicG1uRWxlbWVudD1cInVuZGVmaW5lZFwiIGlkPVwiQlBNTlBsYW5lXzFc
Ij48YnBtbmRpOkJQTU5TaGFwZSBicG1uRWxlbWVudD1cIlN0YXJ0RXZlbnRfMTU1YXN4bVwiIGlk
PVwiU3RhcnRFdmVudF8xNTVhc3htX2RpXCI+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIzNlwiIHdp
ZHRoPVwiMzZcIiB4PVwiMTc5XCIgeT1cIjE4OFwiLz48YnBtbmRpOkJQTU5MYWJlbD48b21nZGM6
Qm91bmRzIGhlaWdodD1cIjBcIiB3aWR0aD1cIjkwXCIgeD1cIjE3NFwiIHk9XCIyMjNcIi8+PC9i
cG1uZGk6QlBNTkxhYmVsPjwvYnBtbmRpOkJQTU5TaGFwZT48YnBtbmRpOkJQTU5TaGFwZSBicG1u
RWxlbWVudD1cIlNlcnZpY2VUYXNrXzFxYm43MG9cIiBpZD1cIlNlcnZpY2VUYXNrXzFxYm43MG9f
ZGlcIj48b21nZGM6Qm91bmRzIGhlaWdodD1cIjgwXCIgd2lkdGg9XCIxMDBcIiB4PVwiMzI5XCIg
eT1cIjE2NlwiLz48L2JwbW5kaTpCUE1OU2hhcGU+PGJwbW5kaTpCUE1OU2hhcGUgYnBtbkVsZW1l
bnQ9XCJFbmRFdmVudF8xNHBteHZmXCIgaWQ9XCJFbmRFdmVudF8xNHBteHZmX2RpXCI+PG9tZ2Rj
OkJvdW5kcyBoZWlnaHQ9XCIzNlwiIHdpZHRoPVwiMzZcIiB4PVwiNTI5XCIgeT1cIjE4OFwiLz48
YnBtbmRpOkJQTU5MYWJlbD48b21nZGM6Qm91bmRzIGhlaWdodD1cIjEzXCIgd2lkdGg9XCIwXCIg
eD1cIjU0N1wiIHk9XCIyMjdcIi8+PC9icG1uZGk6QlBNTkxhYmVsPjwvYnBtbmRpOkJQTU5TaGFw
ZT48YnBtbmRpOkJQTU5FZGdlIGJwbW5FbGVtZW50PVwiU2VxdWVuY2VGbG93XzBleXFjcWxcIiBp
ZD1cIlNlcXVlbmNlRmxvd18wZXlxY3FsX2RpXCI+PG9tZ2RpOndheXBvaW50IHg9XCI0MjlcIiB4
c2k6dHlwZT1cIm9tZ2RjOlBvaW50XCIgeT1cIjIwNlwiLz48b21nZGk6d2F5cG9pbnQgeD1cIjUy
OVwiIHhzaTp0eXBlPVwib21nZGM6UG9pbnRcIiB5PVwiMjA2XCIvPjxicG1uZGk6QlBNTkxhYmVs
PjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiMTNcIiB3aWR0aD1cIjBcIiB4PVwiNDc5XCIgeT1cIjE4
NFwiLz48L2JwbW5kaTpCUE1OTGFiZWw+PC9icG1uZGk6QlBNTkVkZ2U+PGJwbW5kaTpCUE1ORWRn
ZSBicG1uRWxlbWVudD1cIlNlcXVlbmNlRmxvd18waGJqeDZtXCIgaWQ9XCJTZXF1ZW5jZUZsb3df
MGhiang2bV9kaVwiPjxvbWdkaTp3YXlwb2ludCB4PVwiMjE1XCIgeHNpOnR5cGU9XCJvbWdkYzpQ
b2ludFwiIHk9XCIyMDZcIi8+PG9tZ2RpOndheXBvaW50IHg9XCIzMjlcIiB4c2k6dHlwZT1cIm9t
Z2RjOlBvaW50XCIgeT1cIjIwNlwiLz48YnBtbmRpOkJQTU5MYWJlbD48b21nZGM6Qm91bmRzIGhl
aWdodD1cIjEzXCIgd2lkdGg9XCIwXCIgeD1cIjI3MlwiIHk9XCIxODQuNVwiLz48L2JwbW5kaTpC
UE1OTGFiZWw+PC9icG1uZGk6QlBNTkVkZ2U+PGJwbW5kaTpCUE1OU2hhcGUgYnBtbkVsZW1lbnQ9
XCJUZXh0QW5ub3RhdGlvbl8wdG5rOTZwXCIgaWQ9XCJUZXh0QW5ub3RhdGlvbl8wdG5rOTZwX2Rp
XCI+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCI1MlwiIHdpZHRoPVwiMjA4XCIgeD1cIjQxMFwiIHk9
XCI4MVwiLz48L2JwbW5kaTpCUE1OU2hhcGU+PGJwbW5kaTpCUE1ORWRnZSBicG1uRWxlbWVudD1c
IkFzc29jaWF0aW9uXzBjdTk1c2xcIiBpZD1cIkFzc29jaWF0aW9uXzBjdTk1c2xfZGlcIj48b21n
ZGk6d2F5cG9pbnQgeD1cIjQyNVwiIHhzaTp0eXBlPVwib21nZGM6UG9pbnRcIiB5PVwiMTcyXCIv
PjxvbWdkaTp3YXlwb2ludCB4PVwiNDc5XCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIx
MzNcIi8+PC9icG1uZGk6QlBNTkVkZ2U+PC9icG1uZGk6QlBNTlBsYW5lPjwvYnBtbmRpOkJQTU5E
aWFncmFtPjwvZGVmaW5pdGlvbnM+In0sICJjb250ZW50X3ZlcnNpb24iOiA3LCAiY3JlYXRvcl9p
ZCI6ICJhQGV4YW1wbGUuY29tIiwgImRlc2NyaXB0aW9uIjogIkV4YW1wbGUgd29ya2Zsb3cgc2hv
d2luZyBob3cgdG8gZXh0cmFjdCBJT0MncyAoSW5kaWNhdG9ycyBvZiBDb21wcm9taXNlKSBmcm9t
IEluY2lkZW50L1Rhc2sgQXR0YWNobWVudHMuIEVhY2ggdW5pcXVlIElPQyBpcyBhZGRlZCB0byB0
aGUgSW5jaWRlbnQgYXMgYW4gQXJ0aWZhY3QiLCAiZXhwb3J0X2tleSI6ICJleGFtcGxlX3BhcnNl
X2lvY3NfYXR0YWNobWVudCIsICJsYXN0X21vZGlmaWVkX2J5IjogImFAZXhhbXBsZS5jb20iLCAi
bGFzdF9tb2RpZmllZF90aW1lIjogMTU5NzA5NDQ5NTQ5NywgIm5hbWUiOiAiUGFyc2UgSU9DcyAo
QXR0YWNobWVudCkiLCAib2JqZWN0X3R5cGUiOiAiYXR0YWNobWVudCIsICJwcm9ncmFtbWF0aWNf
bmFtZSI6ICJleGFtcGxlX3BhcnNlX2lvY3NfYXR0YWNobWVudCIsICJ0YWdzIjogW10sICJ1dWlk
IjogIjVhMjZjMGYxLTM0MzctNGY3MC1iODc0LWQzYmUwOTY1ZjQxNSIsICJ3b3JrZmxvd19pZCI6
IDc0fSwgeyJhY3Rpb25zIjogW10sICJjb250ZW50IjogeyJ2ZXJzaW9uIjogOCwgIndvcmtmbG93
X2lkIjogImV4YW1wbGVfcGFyc2VfaW9jc19hcnRpZmFjdCIsICJ4bWwiOiAiPD94bWwgdmVyc2lv
bj1cIjEuMFwiIGVuY29kaW5nPVwiVVRGLThcIj8+PGRlZmluaXRpb25zIHhtbG5zPVwiaHR0cDov
L3d3dy5vbWcub3JnL3NwZWMvQlBNTi8yMDEwMDUyNC9NT0RFTFwiIHhtbG5zOmJwbW5kaT1cImh0
dHA6Ly93d3cub21nLm9yZy9zcGVjL0JQTU4vMjAxMDA1MjQvRElcIiB4bWxuczpvbWdkYz1cImh0
dHA6Ly93d3cub21nLm9yZy9zcGVjL0RELzIwMTAwNTI0L0RDXCIgeG1sbnM6b21nZGk9XCJodHRw
Oi8vd3d3Lm9tZy5vcmcvc3BlYy9ERC8yMDEwMDUyNC9ESVwiIHhtbG5zOnJlc2lsaWVudD1cImh0
dHA6Ly9yZXNpbGllbnQuaWJtLmNvbS9icG1uXCIgeG1sbnM6eHNkPVwiaHR0cDovL3d3dy53My5v
cmcvMjAwMS9YTUxTY2hlbWFcIiB4bWxuczp4c2k9XCJodHRwOi8vd3d3LnczLm9yZy8yMDAxL1hN
TFNjaGVtYS1pbnN0YW5jZVwiIHRhcmdldE5hbWVzcGFjZT1cImh0dHA6Ly93d3cuY2FtdW5kYS5v
cmcvdGVzdFwiPjxwcm9jZXNzIGlkPVwiZXhhbXBsZV9wYXJzZV9pb2NzX2FydGlmYWN0XCIgaXNF
eGVjdXRhYmxlPVwidHJ1ZVwiIG5hbWU9XCJQYXJzZSBJT0NzIChBcnRpZmFjdClcIj48ZG9jdW1l
bnRhdGlvbj48IVtDREFUQVtFeGFtcGxlIHdvcmtmbG93IHNob3dpbmcgaG93IHRvIGV4dHJhY3Qg
SU9DJ3MgKEluZGljYXRvcnMgb2YgQ29tcHJvbWlzZSkgZnJvbSBhbiBhcnRpZmFjdCBmaWxlIG9y
IHRleHQtYmFzZWQgYXJ0aWZhY3QuIEVhY2ggdW5pcXVlIElPQyBpcyBhZGRlZCB0byB0aGUgaW5j
aWRlbnQgYXMgYW4gYXJ0aWZhY3QuXV0+PC9kb2N1bWVudGF0aW9uPjxzdGFydEV2ZW50IGlkPVwi
U3RhcnRFdmVudF8xNTVhc3htXCI+PG91dGdvaW5nPlNlcXVlbmNlRmxvd18wMGpyczR1PC9vdXRn
b2luZz48L3N0YXJ0RXZlbnQ+PHNlcnZpY2VUYXNrIGlkPVwiU2VydmljZVRhc2tfMDhzdjl2ZFwi
IG5hbWU9XCJJT0MgUGFyc2VyIHYyXCIgcmVzaWxpZW50OnR5cGU9XCJmdW5jdGlvblwiPjxleHRl
bnNpb25FbGVtZW50cz48cmVzaWxpZW50OmZ1bmN0aW9uIHV1aWQ9XCI1MjgwMzU4MS0wMTlhLTRj
NjgtOGY4ZC04YmU5Y2EzMWIyZjZcIj57XCJpbnB1dHNcIjp7fSxcInBvc3RfcHJvY2Vzc2luZ19z
Y3JpcHRcIjpcImltcG9ydCByZVxcblxcbmRlZiBnZXRfYXJ0aWZhY3RfdHlwZShhcnRpZmFjdF92
YWx1ZSwgYXJ0aWZhY3RfdHlwZSk6XFxuICBcXFwiXFxcIlxcXCJVc2Ugc29tZSByZWdleCBleHBy
ZXNzaW9ucyB0byB0cnkgYW5kIGlkZW50aWZ5XFxuICBmcm9tIHRoZSBBcnRpZmFjdCdzIHZhbHVl
LCB3aGF0IEFydGlmYWN0IHR5cGUgaXQgaXMuXFxuICBSZXR1cm4gb3JpZ2luYWwgYXJ0aWZhY3Rf
dHlwZSBpZiB3ZSBjYW5ub3QgZmlndXJlIGl0IG91dFxcXCJcXFwiXFxcIlxcblxcbiAgZG5zX25h
bWVfcmVnZXggPSByZS5jb21waWxlKHInXigoW2EtekEtWl17MX0pfChbYS16QS1aXXsxfVthLXpB
LVpdezF9KXwoW2EtekEtWl17MX1bMC05XXsxfSl8KFswLTldezF9W2EtekEtWl17MX0pfChbYS16
QS1aMC05XVthLXpBLVowLTktX117MSw2MX1bYS16QS1aMC05XSkpXFxcXC4oW2EtekEtWl17Miw2
fXxbYS16QS1aMC05LV17MiwzMH1cXFxcLlthLXpBLVpdezIsM30pJCcpXFxuICBcXG4gIGlmIHJl
Lm1hdGNoKGRuc19uYW1lX3JlZ2V4LCBhcnRpZmFjdF92YWx1ZSk6XFxuICAgIHJldHVybiBcXFwi
RE5TIE5hbWVcXFwiXFxuICBcXG4gIHJldHVybiBhcnRpZmFjdF90eXBlXFxuXFxuIyBNYXAgaW9j
LnR5cGUgdG8gUmVzaWxpZW50IEFydGlmYWN0IFR5cGVcXG5pb2NfdHlwZV90b19hcnRpZmFjdF90
eXBlX21hcCA9IHtcXG4gICAgJ3VyaSc6ICdVUkkgUGF0aCcsXFxuICAgICdJUCc6ICdJUCBBZGRy
ZXNzJyxcXG4gICAgJ21kNSc6ICdNYWx3YXJlIE1ENSBIYXNoJyxcXG4gICAgJ3NoYTEnOiAnTWFs
d2FyZSBTSEEtMSBIYXNoJyxcXG4gICAgJ3NoYTI1Nic6ICdNYWx3YXJlIFNIQS0yNTYgSGFzaCcs
XFxuICAgICdDVkUnOiAnVGhyZWF0IENWRSBJRCcsXFxuICAgICdlbWFpbCc6ICdFbWFpbCBTZW5k
ZXInLFxcbiAgICAnZmlsZW5hbWUnOiAnRmlsZSBOYW1lJyxcXG4gICAgJ2ZpbGUnOiAnRmlsZSBO
YW1lJ1xcbn1cXG5cXG4jIEdldCB0aGUgSU9Dc1xcbmlvY3MgPSByZXN1bHRzLmlvY3NcXG5cXG5h
cnRpZmFjdF9saXN0ID0gW11cXG5pZiBpb2NzOlxcbiAgICAjIExvb3AgSU9DcyBhbmQgYWRkIGVh
Y2ggb24gYXMgYW4gQXJ0aWZhY3RcXG4gICAgZm9yIGlvYyBpbiBpb2NzOlxcbiAgICAgIFxcbiAg
ICAgICMgSWYgYXR0YWNobWVudF9maWxlX25hbWUgaXMgbm90IGRlZmluZWQsIHVzZSB0aGUgaW9j
LnZhbHVlIGFzIGluIHRoZSBBcnRpZmFjdCdzIERlc2NyaXB0aW9uXFxuICAgICAgaWYgcmVzdWx0
cy5hdHRhY2htZW50X2ZpbGVfbmFtZTpcXG4gICAgICAgIGFydGlmYWN0X2Rlc2NyaXB0aW9uID0g
dVxcXCJUaGlzIElPQyBvY2N1cnJlZCB7MH0gdGltZShzKSBpbiB0aGUgYXJ0aWZhY3Q6IHsxfVxc
XCIuZm9ybWF0KCB1bmljb2RlKGlvYy5jb3VudCksIHVuaWNvZGUocmVzdWx0cy5hdHRhY2htZW50
X2ZpbGVfbmFtZSkgKVxcbiAgICAgIFxcbiAgICAgIGVsc2U6XFxuICAgICAgICBhcnRpZmFjdF9k
ZXNjcmlwdGlvbiA9IHVcXFwiVGhpcyBJT0Mgb2NjdXJyZWQgezB9IHRpbWUocykgaW4gdGhlIGFy
dGlmYWN0OiB7MX1cXFwiLmZvcm1hdCggdW5pY29kZShpb2MuY291bnQpLCB1bmljb2RlKGlvYy52
YWx1ZSkgKVxcblxcbiAgICAgIGFydGlmYWN0X3ZhbHVlID0gaW9jLnZhbHVlXFxuICAgICAgYXJ0
aWZhY3RfdHlwZSA9IGlvY190eXBlX3RvX2FydGlmYWN0X3R5cGVfbWFwLmdldChpb2MudHlwZSwg
XFxcIlN0cmluZ1xcXCIpXFxuICAgICAgXFxuICAgICAgIyBJZiB0aGUgYXJ0aWZhY3RfdHlwZSBp
cyAnVVJJIFBhdGgnLCBjYWxsIGdldF9hcnRpZmFjdF90eXBlIHRvIHRyeSBpbnRlbnRpZnkgdGhl
IHR5cGUgdXNpbmcgcmVnZXhcXG4gICAgICBpZiBhcnRpZmFjdF90eXBlID09IFxcXCJVUkkgUGF0
aFxcXCI6XFxuICAgICAgICBhcnRpZmFjdF90eXBlID0gZ2V0X2FydGlmYWN0X3R5cGUoYXJ0aWZh
Y3RfdmFsdWUsIGFydGlmYWN0X3R5cGUpXFxuICAgICAgXFxuICAgICAgaW5jaWRlbnQuYWRkQXJ0
aWZhY3QoYXJ0aWZhY3RfdHlwZSwgYXJ0aWZhY3RfdmFsdWUsIGFydGlmYWN0X2Rlc2NyaXB0aW9u
KVxcbiAgICAgIGFydGlmYWN0X2xpc3QuYXBwZW5kKHVcXFwie306IHt9XFxcIi5mb3JtYXQoYXJ0
aWZhY3RfdHlwZSwgYXJ0aWZhY3RfdmFsdWUpKVxcbiAgICAgIFxcbmlmIGFydGlmYWN0X2xpc3Q6
XFxuICBpbmNpZGVudC5hZGROb3RlKHVcXFwiVGhlIGZvbGxvd2luZyBhcnRpZmFjdHMgd2VyZSBh
ZGRlZCBmcm9tIGFydGlmYWN0OiB7fSAtIHt9XFxcXG57fVxcXCIuZm9ybWF0KGFydGlmYWN0LnR5
cGUsIHJlc3VsdHMuYXR0YWNobWVudF9maWxlX25hbWUgb3IgYXJ0aWZhY3QudmFsdWUsIFxcXCJc
XFxcblxcXCIuam9pbihhcnRpZmFjdF9saXN0KSkpXFxuZWxzZTpcXG4gIGluY2lkZW50LmFkZE5v
dGUodVxcXCJObyBhcnRpZmFjdHMgd2VyZSBhZGRlZCBmcm9tIGFydGlmYWN0OiB7fSAtIHt9XFxc
XG57fVxcXCIuZm9ybWF0KGFydGlmYWN0LnR5cGUsIHJlc3VsdHMuYXR0YWNobWVudF9maWxlX25h
bWUgb3IgYXJ0aWZhY3QudmFsdWUsIFxcXCJcXFxcblxcXCIuam9pbihhcnRpZmFjdF9saXN0KSkp
XFxuXFxuICBcXG5cIixcInByZV9wcm9jZXNzaW5nX3NjcmlwdFwiOlwiIyBEZWZpbmUgUHJlLVBy
b2Nlc3MgSW5wdXRzXFxuaW5wdXRzLmlvY19wYXJzZXJfdjJfaW5jaWRlbnRfaWQgPSBpbmNpZGVu
dC5pZFxcbmlucHV0cy5pb2NfcGFyc2VyX3YyX2FydGlmYWN0X2lkID0gYXJ0aWZhY3QuaWRcXG5p
bnB1dHMuaW9jX3BhcnNlcl92Ml9hcnRpZmFjdF92YWx1ZSA9IGFydGlmYWN0LnZhbHVlXCJ9PC9y
ZXNpbGllbnQ6ZnVuY3Rpb24+PC9leHRlbnNpb25FbGVtZW50cz48aW5jb21pbmc+U2VxdWVuY2VG
bG93XzAwanJzNHU8L2luY29taW5nPjxvdXRnb2luZz5TZXF1ZW5jZUZsb3dfMGo5dnFnNTwvb3V0
Z29pbmc+PC9zZXJ2aWNlVGFzaz48c2VxdWVuY2VGbG93IGlkPVwiU2VxdWVuY2VGbG93XzAwanJz
NHVcIiBzb3VyY2VSZWY9XCJTdGFydEV2ZW50XzE1NWFzeG1cIiB0YXJnZXRSZWY9XCJTZXJ2aWNl
VGFza18wOHN2OXZkXCIvPjxlbmRFdmVudCBpZD1cIkVuZEV2ZW50XzBmaDF5aWhcIj48aW5jb21p
bmc+U2VxdWVuY2VGbG93XzBqOXZxZzU8L2luY29taW5nPjwvZW5kRXZlbnQ+PHNlcXVlbmNlRmxv
dyBpZD1cIlNlcXVlbmNlRmxvd18wajl2cWc1XCIgc291cmNlUmVmPVwiU2VydmljZVRhc2tfMDhz
djl2ZFwiIHRhcmdldFJlZj1cIkVuZEV2ZW50XzBmaDF5aWhcIi8+PHRleHRBbm5vdGF0aW9uIGlk
PVwiVGV4dEFubm90YXRpb25fMTR0b3ZudlwiPjx0ZXh0PjwhW0NEQVRBW2FydGlmYWN0cyBhZGRl
ZCB0byB0aGUgaW5jaWRlbnQgZnJvbSB0aGUgYXJ0aWZhY3QgZmlsZS4gQSBub3RlIGlzIGNyZWF0
ZWQgd2l0aCBzdW1tYXJ5IGluZm9ybWF0aW9uXG4uXV0+PC90ZXh0PjwvdGV4dEFubm90YXRpb24+
PGFzc29jaWF0aW9uIGlkPVwiQXNzb2NpYXRpb25fMDBkaDBlbVwiIHNvdXJjZVJlZj1cIlNlcnZp
Y2VUYXNrXzA4c3Y5dmRcIiB0YXJnZXRSZWY9XCJUZXh0QW5ub3RhdGlvbl8xNHRvdm52XCIvPjwv
cHJvY2Vzcz48YnBtbmRpOkJQTU5EaWFncmFtIGlkPVwiQlBNTkRpYWdyYW1fMVwiPjxicG1uZGk6
QlBNTlBsYW5lIGJwbW5FbGVtZW50PVwidW5kZWZpbmVkXCIgaWQ9XCJCUE1OUGxhbmVfMVwiPjxi
cG1uZGk6QlBNTlNoYXBlIGJwbW5FbGVtZW50PVwiU3RhcnRFdmVudF8xNTVhc3htXCIgaWQ9XCJT
dGFydEV2ZW50XzE1NWFzeG1fZGlcIj48b21nZGM6Qm91bmRzIGhlaWdodD1cIjM2XCIgd2lkdGg9
XCIzNlwiIHg9XCIxNjJcIiB5PVwiMTg4XCIvPjxicG1uZGk6QlBNTkxhYmVsPjxvbWdkYzpCb3Vu
ZHMgaGVpZ2h0PVwiMFwiIHdpZHRoPVwiOTBcIiB4PVwiMTU3XCIgeT1cIjIyM1wiLz48L2JwbW5k
aTpCUE1OTGFiZWw+PC9icG1uZGk6QlBNTlNoYXBlPjxicG1uZGk6QlBNTlNoYXBlIGJwbW5FbGVt
ZW50PVwiU2VydmljZVRhc2tfMDhzdjl2ZFwiIGlkPVwiU2VydmljZVRhc2tfMDhzdjl2ZF9kaVwi
PjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiODBcIiB3aWR0aD1cIjEwMFwiIHg9XCIyNzBcIiB5PVwi
MTY2XCIvPjwvYnBtbmRpOkJQTU5TaGFwZT48YnBtbmRpOkJQTU5FZGdlIGJwbW5FbGVtZW50PVwi
U2VxdWVuY2VGbG93XzAwanJzNHVcIiBpZD1cIlNlcXVlbmNlRmxvd18wMGpyczR1X2RpXCI+PG9t
Z2RpOndheXBvaW50IHg9XCIxOThcIiB4c2k6dHlwZT1cIm9tZ2RjOlBvaW50XCIgeT1cIjIwNlwi
Lz48b21nZGk6d2F5cG9pbnQgeD1cIjI3MFwiIHhzaTp0eXBlPVwib21nZGM6UG9pbnRcIiB5PVwi
MjA2XCIvPjxicG1uZGk6QlBNTkxhYmVsPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiMTNcIiB3aWR0
aD1cIjkwXCIgeD1cIjE4OVwiIHk9XCIxODQuNVwiLz48L2JwbW5kaTpCUE1OTGFiZWw+PC9icG1u
ZGk6QlBNTkVkZ2U+PGJwbW5kaTpCUE1OU2hhcGUgYnBtbkVsZW1lbnQ9XCJFbmRFdmVudF8wZmgx
eWloXCIgaWQ9XCJFbmRFdmVudF8wZmgxeWloX2RpXCI+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIz
NlwiIHdpZHRoPVwiMzZcIiB4PVwiNDI4XCIgeT1cIjE4OFwiLz48YnBtbmRpOkJQTU5MYWJlbD48
b21nZGM6Qm91bmRzIGhlaWdodD1cIjEzXCIgd2lkdGg9XCI5MFwiIHg9XCI0MDFcIiB5PVwiMjI3
XCIvPjwvYnBtbmRpOkJQTU5MYWJlbD48L2JwbW5kaTpCUE1OU2hhcGU+PGJwbW5kaTpCUE1ORWRn
ZSBicG1uRWxlbWVudD1cIlNlcXVlbmNlRmxvd18wajl2cWc1XCIgaWQ9XCJTZXF1ZW5jZUZsb3df
MGo5dnFnNV9kaVwiPjxvbWdkaTp3YXlwb2ludCB4PVwiMzcwXCIgeHNpOnR5cGU9XCJvbWdkYzpQ
b2ludFwiIHk9XCIyMDZcIi8+PG9tZ2RpOndheXBvaW50IHg9XCI0MjhcIiB4c2k6dHlwZT1cIm9t
Z2RjOlBvaW50XCIgeT1cIjIwNlwiLz48YnBtbmRpOkJQTU5MYWJlbD48b21nZGM6Qm91bmRzIGhl
aWdodD1cIjEzXCIgd2lkdGg9XCI5MFwiIHg9XCIzNTRcIiB5PVwiMTg0LjVcIi8+PC9icG1uZGk6
QlBNTkxhYmVsPjwvYnBtbmRpOkJQTU5FZGdlPjxicG1uZGk6QlBNTlNoYXBlIGJwbW5FbGVtZW50
PVwiVGV4dEFubm90YXRpb25fMTR0b3ZudlwiIGlkPVwiVGV4dEFubm90YXRpb25fMTR0b3Zudl9k
aVwiPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiNjRcIiB3aWR0aD1cIjIwOVwiIHg9XCIzNzdcIiB5
PVwiOTFcIi8+PC9icG1uZGk6QlBNTlNoYXBlPjxicG1uZGk6QlBNTkVkZ2UgYnBtbkVsZW1lbnQ9
XCJBc3NvY2lhdGlvbl8wMGRoMGVtXCIgaWQ9XCJBc3NvY2lhdGlvbl8wMGRoMGVtX2RpXCI+PG9t
Z2RpOndheXBvaW50IHg9XCIzNzBcIiB4c2k6dHlwZT1cIm9tZ2RjOlBvaW50XCIgeT1cIjE4MVwi
Lz48b21nZGk6d2F5cG9pbnQgeD1cIjQyMVwiIHhzaTp0eXBlPVwib21nZGM6UG9pbnRcIiB5PVwi
MTU1XCIvPjwvYnBtbmRpOkJQTU5FZGdlPjwvYnBtbmRpOkJQTU5QbGFuZT48L2JwbW5kaTpCUE1O
RGlhZ3JhbT48L2RlZmluaXRpb25zPiJ9LCAiY29udGVudF92ZXJzaW9uIjogOCwgImNyZWF0b3Jf
aWQiOiAiYUBleGFtcGxlLmNvbSIsICJkZXNjcmlwdGlvbiI6ICJFeGFtcGxlIHdvcmtmbG93IHNo
b3dpbmcgaG93IHRvIGV4dHJhY3QgSU9DJ3MgKEluZGljYXRvcnMgb2YgQ29tcHJvbWlzZSkgZnJv
bSBhbiBhcnRpZmFjdCBmaWxlIG9yIHRleHQtYmFzZWQgYXJ0aWZhY3QuIEVhY2ggdW5pcXVlIElP
QyBpcyBhZGRlZCB0byB0aGUgaW5jaWRlbnQgYXMgYW4gYXJ0aWZhY3QuIiwgImV4cG9ydF9rZXki
OiAiZXhhbXBsZV9wYXJzZV9pb2NzX2FydGlmYWN0IiwgImxhc3RfbW9kaWZpZWRfYnkiOiAiYUBl
eGFtcGxlLmNvbSIsICJsYXN0X21vZGlmaWVkX3RpbWUiOiAxNTk3MDk0NDY1OTc5LCAibmFtZSI6
ICJQYXJzZSBJT0NzIChBcnRpZmFjdCkiLCAib2JqZWN0X3R5cGUiOiAiYXJ0aWZhY3QiLCAicHJv
Z3JhbW1hdGljX25hbWUiOiAiZXhhbXBsZV9wYXJzZV9pb2NzX2FydGlmYWN0IiwgInRhZ3MiOiBb
XSwgInV1aWQiOiAiN2FkOGUyMTAtYzQ3ZS00NzI1LTk4YmQtNDc4MjQwZmZmMGY5IiwgIndvcmtm
bG93X2lkIjogNzV9XSwgIndvcmtzcGFjZXMiOiBbXX0=
""")
| 5,343,486
|
def fetch_or_use_cached(temp_dir, file_name, url):
# type: (str, str, str) -> str
"""
Check for a cached copy of the indicated file in our temp directory.
If a copy doesn't exist, download the file.
Arg:
temp_dir: Local temporary dir
file_name: Name of the file within the temp dir, not including the temp
dir path
url: Full URL from which to download the file, including remote file
name, which can be different from file_name
Returns the path of the cached file.
"""
if not os.path.exists(temp_dir):
os.mkdir(temp_dir)
cached_filename = "{}/{}".format(temp_dir, file_name)
if not os.path.exists(cached_filename):
print("Downloading {} to {}".format(url, cached_filename))
urllib.request.urlretrieve(url, cached_filename)
return cached_filename
| 5,343,487
|
def to_bin(val):
"""
Receive int and return a string in binary. Padded by 32 bits considering 2's complement for negative values
"""
COMMON_DIGITS = 32
val_str = "{:b}".format(val) # Count '-' in negative case
padded_len = len(val_str) + ((COMMON_DIGITS - (len(val_str) % COMMON_DIGITS)) % COMMON_DIGITS)
if val < 0:
val_2_complement = val & ((1 << padded_len) - 1)
final_val_str = "{:b}".format(val_2_complement)
else:
final_val_str = "0" * (padded_len - len(val_str)) + val_str
return(final_val_str)
| 5,343,488
|
def PSingle (refLamb2, lamb2, qflux, qsigma, uflux, usigma, err, nterm=2):
""" Fit RM, EVPA0 to Q, U flux measurements
Also does error analysis
Returns array of fitter parameters, errors for each and Chi Squares of fit
refLamb2 = Reference lambda^2 for fit (m^2)
lamb2 = Array of lambda^2 for fit (m^2)
qflux = Array of Q fluxes (Jy) same dim as lamb2
qsigma = Array of Q errors (Jy) same dim as lamb2
uflux = Array of U fluxes (Jy) same dim as lamb2
usigma = Array of U errors (Jy) same dim as lamb2
err = Obit error stack
nterm = Number of coefficients to fit (1 or 2)
"""
################################################################
#
nlamb2 = len(lamb2)
ret = Obit.RMFitSingle(nlamb2, nterm, refLamb2, lamb2,
qflux, qsigma, uflux, usigma, err.me)
OErr.printErr(err)
OErr.printErrMsg(err,"Fitting failed")
return ret
# end PSingle
| 5,343,489
|
def exec_waveform_function(wf_func: str, t: np.ndarray, pulse_info: dict) -> np.ndarray:
"""
Returns the result of the pulse's waveform function.
If the wf_func is defined outside quantify-scheduler then the
wf_func is dynamically loaded and executed using
:func:`~quantify_scheduler.helpers.waveforms.exec_custom_waveform_function`.
Parameters
----------
wf_func
The custom waveform function path.
t
The linear timespace.
pulse_info
The dictionary containing pulse information.
Returns
-------
:
Returns the computed waveform.
"""
whitelist: List[str] = ["square", "ramp", "soft_square", "drag"]
fn_name: str = wf_func.split(".")[-1]
waveform: np.ndarray = []
if wf_func.startswith("quantify_scheduler.waveforms") and fn_name in whitelist:
if fn_name == "square":
waveform = waveforms.square(t=t, amp=pulse_info["amp"])
elif fn_name == "ramp":
waveform = waveforms.ramp(t=t, amp=pulse_info["amp"])
elif fn_name == "soft_square":
waveform = waveforms.soft_square(t=t, amp=pulse_info["amp"])
elif fn_name == "drag":
waveform = waveforms.drag(
t=t,
G_amp=pulse_info["G_amp"],
D_amp=pulse_info["D_amp"],
duration=pulse_info["duration"],
nr_sigma=pulse_info["nr_sigma"],
phase=pulse_info["phase"],
)
else:
waveform = exec_custom_waveform_function(wf_func, t, pulse_info)
return waveform
| 5,343,490
|
def generate_straight_pipeline():
""" Simple linear pipeline """
node_scaling = PrimaryNode('scaling')
node_ridge = SecondaryNode('ridge', nodes_from=[node_scaling])
node_linear = SecondaryNode('linear', nodes_from=[node_ridge])
pipeline = Pipeline(node_linear)
return pipeline
| 5,343,491
|
def get_socialnetwork_image_path(instance, filename):
"""
Builds a dynamic path for SocialNetwork images. This method takes an
instance an builds the path like the next pattern:
/simplesite/socialnetwork/PAGE_SLUG/slugified-path.ext
"""
return '{0}/{1}/{2}/{3}'.format(instance._meta.app_label,
str(instance._meta.model_name),
str(instance.slug),
get_slugified_file_name(filename)
)
| 5,343,492
|
def policy_gradient(agent, environ, explore=None, *, batch_size, mc, epsilon, baseline):
"""Training generator under reinforcement learning framework,
The rewoard is only the final reward given by environment (predictor).
agent (model.Generator): the exploitation network for SMILES string generation
environ (util.Activity): the environment provide the final reward for each SMILES
explore (model.Generator): the exploration network for SMILES string generation,
it has the same architecture with the agent.
"""
seqs = []
# repeated sampling with MC times
for _ in range(mc):
seq = agent.sample(batch_size, explore=explore, epsilon=epsilon)
seqs.append(seq)
seqs = torch.cat(seqs, dim=0)
ix = util.unique(seqs)
seqs = seqs[ix]
smiles, valids = util.check_smiles(seqs, agent.voc)
# obtaining the reward
preds = environ(smiles)
preds[valids == False] = 0
preds -= baseline
preds = torch.Tensor(preds.reshape(-1, 1)).to(util.dev)
ds = TensorDataset(seqs, preds)
loader = DataLoader(ds, batch_size=batch_size)
# Training Loop
for seq, pred in loader:
score = agent.likelihood(seq)
agent.optim.zero_grad()
loss = agent.PGLoss(score, pred)
loss.backward()
agent.optim.step()
| 5,343,493
|
def openReadBytesFile(path: str):
"""
以只读模式打开二进制文件
:param path: 文件路径
:return: IO文件对象
"""
return openFile(path, "rb")
| 5,343,494
|
def diff_time(a:datetime.time,b:datetime.time):
"""
a-b in seconds
"""
return 3600 * (a.hour -b.hour) + 60*(a.minute-b.minute) + (a.second-b.second) + (a.microsecond-b.microsecond)/1000000
| 5,343,495
|
def pytest_after_base_config(base_config, request):
"""
called after base_config has been initted successfully (and ssh has connected)
"""
| 5,343,496
|
def _create_pseudo_names(tensors, prefix):
"""Creates pseudo {input | output} names for subclassed Models.
Warning: this function should only be used to define default
names for `Metics` and `SavedModel`. No other use cases should
rely on a `Model`'s input or output names.
Example with dict:
`{'a': [x1, x2], 'b': x3}` becomes:
`['a_1', 'a_2', 'b']`
Example with list:
`[x, y]` becomes:
`['output_1', 'output_2']`
Args:
tensors: `Model`'s outputs or inputs.
prefix: 'output_' for outputs, 'input_' for inputs.
Returns:
Flattened list of pseudo names.
"""
def one_index(ele):
# Start with "output_1" instead of "output_0".
if isinstance(ele, int):
return ele + 1
return ele
flat_paths = list(nest.yield_flat_paths(tensors))
flat_paths = nest.map_structure(one_index, flat_paths)
names = []
for path in flat_paths:
if not path:
name = prefix + '1' # Single output.
else:
name = '_'.join(str(p) for p in path)
if isinstance(path[0], int):
name = prefix + name
names.append(name)
return names
| 5,343,497
|
def search_for_breakpoint(db_name, ids):
"""
Function will retrieve ID of last caluclated grid node to continue interrupted grid caclulation.
:param db_name: str;
:param ids: numpy.array; list of grid node ids to calculate in this batch
:return: int; grid node from which start the calculation
"""
conn = sqlite3.connect(db_name, detect_types=sqlite3.PARSE_DECLTYPES)
conn.row_factory = lambda cursor, row: row[0]
cursor = conn.cursor()
sql = f"SELECT last_index FROM auxiliary"
last_idx = np.array(cursor.execute(sql).fetchall())
if last_idx.size == 0:
return 0
elif last_idx[0] in ids:
return np.where(last_idx[0] == ids)[0][0]
else:
raise ValueError('IDs of already calculated objects do not correspond to the generated ID. Breakpoint cannot '
'be generated.')
conn.close()
| 5,343,498
|
def reddit_client_secret() -> str:
"""Client secret of the reddit app."""
value = os.getenv("REDDIT_CLIENT_SECRET")
if not value:
raise ValueError("REDDIT_CLIENT_SECRET environment variable not set")
return value
| 5,343,499
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.