content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def distance_along_glacier(nx, map_dx):
"""Calculates the distance along the glacier in km.
Parameters
----------
nx : int
number of grid points
map_dx : int
grid point spacing
Returns
-------
ndarray
distance along the glacier in km.
"""
return np.linspace(0, nx, nx) * map_dx * 1e-3
| 5,342,800
|
def make_docs():
"""Create documentation"""
with lcd("docs"):
## generate Sphinx HTML documentation, including API docs
try:
remove("docs/fab_support.rst")
except FileNotFoundError:
pass
try:
remove("docs/modules.rst")
except FileNotFoundError:
pass
local("sphinx-apidoc -o . ../fab_support")
# $ (MAKE) - C docs clean
local("make html")
# $(BROWSER) docs / _build / html / index.html
| 5,342,801
|
def p_entity_body_outer(p: YaccProduction) -> None:
"""entity_body_outer : mls entity_body END"""
p[0] = (p[1], p[2])
| 5,342,802
|
def make_postdict_to_fetch_token(token_endpoint: str, grant_type: str,
code: str, client_id: str,
client_secret: str,
redirect_uri: str) -> dict:
"""POST dictionary is the API of the requests library"""
return {'url': token_endpoint,
'data': {
'grant_type': grant_type,
'code': code,
'client_id': client_id,
'client_secret': client_secret,
'redirect_uri': redirect_uri,
},
'headers': {
'Content-Type': 'application/x-www-form-urlencoded',
}}
| 5,342,803
|
def _haxe_std_lib(ctx):
"""
_haxe_std_lib implementation.
Args:
ctx: Bazel context.
"""
toolchain = ctx.toolchains["@rules_haxe//:toolchain_type"]
build_source_file = ctx.actions.declare_file("StdBuild.hx")
toolchain.create_std_build(
ctx,
ctx.attr.target,
build_source_file,
)
hxml = create_hxml_map(ctx, toolchain, for_std_build = True)
hxml["classpaths"].append(build_source_file.dirname)
hxml["args"].append("--dce no")
# Handle the case where we're building in an external directory.
if hxml["external_dir"] != "":
ext_idx = build_source_file.path.find("external/")
hxml["external_dir"] = build_source_file.path[ext_idx:-11]
build_file = ctx.actions.declare_file("{}-std-build.hxml".format(ctx.attr.name))
create_build_hxml(ctx, toolchain, hxml, build_file, suffix = "-intermediate")
intermediate = ctx.actions.declare_directory(hxml["output_dir"])
# Do the compilation.
runfiles = [build_source_file] + find_direct_sources(ctx) + find_direct_resources(ctx)
toolchain.compile(
ctx,
hxml = build_file,
runfiles = runfiles,
out = intermediate,
)
# Post process the output file.
output = ctx.actions.declare_file(hxml["output_dir"].replace("-intermediate", ""))
output_file = ctx.actions.declare_file("{}/{}".format(ctx.attr.name, hxml["output_file"])) if "output_file" in hxml else None
if hxml["target"] == "java":
toolchain.create_final_jar(
ctx,
find_direct_sources(ctx),
intermediate,
output,
hxml["output_file"],
False,
output_file = output_file,
)
else:
inputs = [intermediate]
hxcpp_include_dir = None
if hxml["target"] == "cpp":
hxcpp_include_dir = ctx.actions.declare_directory("hxcpp_includes")
toolchain.copy_cpp_includes(ctx, hxcpp_include_dir)
inputs.append(hxcpp_include_dir)
cmd = "mkdir -p {} && cp -r {}/* {}".format(output.path, intermediate.path, output.path)
if hxcpp_include_dir != None:
cmd += " && cp -r {}/* {}/{}/include".format(hxcpp_include_dir.path, output.path, hxml["name"])
ctx.actions.run_shell(
outputs = [output, output_file],
inputs = inputs,
command = cmd,
use_default_shell_env = True,
)
return calc_provider_response(ctx, toolchain, hxml, output, output_file = output_file, library_name = "StdBuild")
| 5,342,804
|
def detect_vswhere_path():
"""
Attempt to detect the location of vswhere, which is used to query the installed visual studio tools (version 2017+)
:return: The validated path to vswhere
"""
# Find VS Where
path_program_files_x86 = os.environ['ProgramFiles(x86)']
if not path_program_files_x86 or not os.path.isdir(path_program_files_x86):
raise Errors.WafError("Unable to determine folder 'Program Files (x86)'")
path_visual_studio_installer = os.path.normpath(os.path.join(path_program_files_x86, 'Microsoft Visual Studio\\Installer\\'))
if not os.path.isdir(path_visual_studio_installer):
raise Errors.WafError("Unable to locate Visual Studio Installer.")
path_vswhere = os.path.normpath(os.path.join(path_visual_studio_installer, 'vswhere.exe'))
if not os.path.isfile(path_vswhere):
raise Errors.WafError("Unable to locate 'vswhere' in path '{}'.".format(path_visual_studio_installer))
return path_vswhere
| 5,342,805
|
def spread_match_network(expr_df_in, node_names_in):
"""
Matches S (spreadsheet of gene expressions) and N (network)
The function returns expr_df_out which is formed by reshuffling columns of
expr_df_in. Also, node_names_out is formed by reshuffling node_names_in. The
intersection of node_names_out and column names of expr_df_out are placed at
the beginning of both lists.
Input:
expr_df_in: A pandas dataframe corresponding to gene expression
node_names_in: Name of the nodes in the network
Output:
expr_df_out: Reorganized dataframe of gene expressions
nodes_names_out: Reordered node names
nodes_genes_intersect: Sorted list of shared genes
"""
node_names_in_set = set(node_names_in)
gene_names_in_set = set(expr_df_in.columns.values)
nodes_genes_intersect = sorted(list(gene_names_in_set & node_names_in_set))
nodes_minus_genes = sorted(list(node_names_in_set - gene_names_in_set))
genes_minus_nodes = sorted(list(gene_names_in_set - node_names_in_set))
genes_names_out = nodes_genes_intersect + genes_minus_nodes
nodes_names_out = nodes_genes_intersect + nodes_minus_genes
expr_df_out = expr_df_in[genes_names_out]
return(expr_df_out, nodes_names_out, nodes_genes_intersect)
| 5,342,806
|
def _InstallNanny():
"""Installs the nanny program."""
# We need to copy the nanny sections to the registry to ensure the
# service is correctly configured.
new_config = config.CONFIG.MakeNewConfig()
new_config.SetWriteBack(config.CONFIG["Config.writeback"])
for option in _NANNY_OPTIONS:
new_config.Set(option, config.CONFIG.Get(option))
new_config.Write()
args = [
config.CONFIG["Nanny.binary"], "--service_key",
config.CONFIG["Client.config_key"], "install"
]
logging.debug("Calling %s", (args,))
output = subprocess.check_output(
args, shell=True, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
logging.debug("%s", output)
| 5,342,807
|
def top_level(url, data):
"""Read top level names from compressed file."""
sb = io.BytesIO(data)
txt = None
with Archive(url, sb) as archive:
file = None
for name in archive.names:
if name.lower().endswith('top_level.txt'):
file = name
break
if file:
txt = archive.read(file).decode('utf-8')
sb.close()
return [name.replace('/', '.') for name in txt.splitlines()] if txt else []
| 5,342,808
|
def get_timeseries_data(request):
"""
AJAX Controller for getting time series data.
"""
return_obj = {}
# -------------------- #
# VERIFIES REQUEST #
# -------------------- #
if not (request.is_ajax() and request.method == "POST"):
return_obj["error"] = "Unable to establish a secure connection."
return JsonResponse(return_obj)
# -------------------------- #
# GETS DATA FROM REQUEST #
# -------------------------- #
layer_code = request.POST.get("layer_code")
site_code = request.POST.get("site_code")
variable_code = request.POST.get("var_code")
site_name = request.POST.get("site_name")
variable_name = request.POST.get("var_name")
# ------------------------- #
# GETS TIME SERIES DATA #
# ------------------------- #
network_id = layer_code.split(":")[0].split("-")[1]
database_id = ":".join(layer_code.split(":")[1:])
request_url = f"{hydroserver_url}/wof/{network_id}/{database_id}/values/"
params = {
"site_code": site_code,
"variable_code": variable_code
}
response = requests.get(request_url, params=params)
waterml = etree.fromstring(response.content)
no_data_value = waterml.find("{http://www.cuahsi.org/waterML/1.1/}timeSeries").find("{http://www.cuahsi.org/waterML/1.1/}variable").find("{http://www.cuahsi.org/waterML/1.1/}noDataValue").text
try:
unit_name = waterml.find("{http://www.cuahsi.org/waterML/1.1/}timeSeries").find("{http://www.cuahsi.org/waterML/1.1/}variable").find("{http://www.cuahsi.org/waterML/1.1/}unit").find("{http://www.cuahsi.org/waterML/1.1/}unitAbbreviation").text
except:
unit_name = None
timeseries_data = [[
x.get('dateTime'),
x.text if x.text != no_data_value else None
] for x in waterml.find("{http://www.cuahsi.org/waterML/1.1/}timeSeries").find("{http://www.cuahsi.org/waterML/1.1/}values").iter("{http://www.cuahsi.org/waterML/1.1/}value")]
# -------------------------- #
# RETURNS DATA TO CLIENT #
# -------------------------- #
return_obj["timeseries_data"] = timeseries_data
return_obj["no_data_value"] = no_data_value
return_obj["site_name"] = site_name
return_obj["variable_name"] = variable_name
return_obj["unit_name"] = unit_name
return_obj["variable_code"] = variable_code
return_obj["site_code"] = site_code
return_obj["layer_code"] = layer_code
return JsonResponse(return_obj)
| 5,342,809
|
def line_col(lbreaks: List[int], pos: int) -> Tuple[int, int]:
"""
Returns the position within a text as (line, column)-tuple based
on a list of all line breaks, including -1 and EOF.
"""
if not lbreaks and pos >= 0:
return 0, pos
if pos < 0 or pos > lbreaks[-1]: # one character behind EOF is still an allowed position!
raise ValueError('Position %i outside text of length %s !' % (pos, lbreaks[-1]))
line = bisect.bisect_left(lbreaks, pos)
column = pos - lbreaks[line - 1]
return line, column
| 5,342,810
|
def show_img_augmentation(X, Y, sess):
"""This function is to visulize the result of data augmentation for testing"""
i = 0 # show the i-th image
img_ori, img_aug, hm_ori, hm_aug = sess.run([x_in, x_batch, y_in, hm_target_batch], feed_dict={x_in: X, y_in: Y})
img_aug = reshape_img(img_aug, i)
img_ori = reshape_img(img_ori, i)
hm_ori = reshape_hm(hm_ori, i)
hm_aug = reshape_hm(hm_aug, i)
# plt.figure(1)
# plt.imshow(img_ori)
# plt.savefig('img/img_orig.png', dpi=300)
# plt.clf()
plt.figure(2)
plt.imshow(img_aug)
plt.savefig('img/img_augm.png', dpi=300)
plt.clf()
# plt.figure(3)
# plt.imshow(hm_ori)
# plt.savefig('img/hm_orig.png', dpi=300)
# plt.clf()
plt.figure(4)
plt.imshow(hm_aug)
plt.savefig('img/hm_augm.png', dpi=300)
plt.clf()
| 5,342,811
|
def init_setup(dbhost, dbname, dbusername, dbpassword, projectpath):
"""
This function will check/create the config.json in project root.
then it'll check/create the revision table in database
"""
cwd = get_cwd()
json_dict = { "host": dbhost, "database": dbname, "user": dbusername, "password": dbpassword }
try:
CMobj = ConfigManager()
CMobj.init(json_dict, projectpath)
except Exception as e:
print(e)
custom_exit(1, "Issues while creating migration directory.", e)
config = CMobj.get_config(cwd)
db = DBManager(config.get("database"))
db.create_version_table()
| 5,342,812
|
def add_scrollbars_with_tags(outer, InnerType, *inner_args, **inner_kw):
""" Wrapper around `add_scrollbars`. Returns tuple of InnerType instance
and scroll tag. Scroll tag should be added to all `inner` child widgets that
affect scrolling.
"""
scrolltag = "tag_" + str(next(tags_count))
inner = add_scrollbars(outer, InnerType, *inner_args, **inner_kw)
inner.bindtags((scrolltag, ) + inner.bindtags())
canvas = inner.master
if OS == "Linux" :
def _on_mousewheel(event):
if event.num == 4:
canvas.yview("scroll", -1, "units")
elif event.num == 5:
canvas.yview("scroll", 1, "units")
inner.bind_class(scrolltag,
"<ButtonPress-4>", _on_mousewheel, '+'
)
inner.bind_class(scrolltag,
"<ButtonPress-5>", _on_mousewheel, '+'
)
elif OS == "Windows":
def _on_mousewheel(event):
canvas.yview("scroll", -event.delta // 120, "units")
inner.bind_class(scrolltag,
"<MouseWheel>", _on_mousewheel, '+'
)
else:
stderr.write("add_scrollbars_with_tags: OS %s not supported" % (OS))
return inner, scrolltag
| 5,342,813
|
def sample_from_script(script_path, num_lines, chars_per_line):
"""Sample num_lines from a script.
Parameters
----------
script_path : str
Path to the script
num_lines : int
Number of lines to sample.
chars_per_line : int
Numer of consecutive characters considered a line.
Returns
-------
lines : List
All the sampled lines.
"""
script = read_script(script_path)
script = split_n_lines(script, num_chars=chars_per_line)
# sample with replacement since some scripts are sparse.
lines = np.random.choice(script, num_lines, replace=True)
return lines
| 5,342,814
|
def is_valid_uuid(x):
"""Determine whether this is a valid hex-encoded uuid."""
if not x or len(x) != 36:
return False
return (parse_uuid(x) != None)
| 5,342,815
|
def wait_for_needle_list(
loops: int,
needle_list: list[tuple[str, tuple[int, int, int, int]]],
sleep_range: tuple[int, int],
):
"""
Works like vision.wait_for_needle(), except multiple needles can be
searched for simultaneously.
Args:
loops: The number of tries to look for each needle in needle_list.
needle_list: A list of filepaths to the needles to look for. Each
item in the list is a 2-tuple containing:
- The filepath to the needle.
- The region in which to search for that needle.
sleep_range: A 2-tuple containing the minimum and maximum number
of miliseconds to wait after each loop.
Returns:
If a needle in needle_list is found, returns a 2-tuple containing
the ltwh dimensions of the needle and the index of the needle in
needle_list (This is so the function knows which needle was found).
Returns false if no needles in needle_list could be found.
"""
for _ in range(1, loops):
for item in needle_list:
needle, region = item
needle_found = Vision(
region=region, needle=needle, loop_num=1
).wait_for_needle(get_tuple=True)
if needle_found is True:
return needle_found, needle_list.index(needle)
misc.sleep_rand(sleep_range[0], sleep_range[1])
return False
| 5,342,816
|
def get_word():
"""Returns random word."""
words = ['Charlie', 'Woodstock', 'Snoopy', 'Lucy', 'Linus',
'Schroeder', 'Patty', 'Sally', 'Marcie']
return random.choice(words).upper()
| 5,342,817
|
def write_out_tf_examples(objects, path):
"""Writes out list of objects out as a single tf_example
Args:
objects (list): list of objects to put into the tf_example
path (Path): directory to write this tf_example to, encompassing the name
"""
num_shards = (len(objects) // 1000) + 1
with open(str(path) + '.numexamples', 'w') as output:
output.write(str(len(objects)))
with contextlib2.ExitStack() as tf_record_close_stack:
output_tfrecords = tf_record_creation_util.open_sharded_output_tfrecords(
tf_record_close_stack, str(path.absolute()), num_shards)
for index, object_item in enumerate(objects):
tf_example = object_item.export_as_TFExample()
output_shard_index = index % num_shards
output_tfrecords[output_shard_index].write(
tf_example.SerializeToString())
| 5,342,818
|
async def test_get_scm(client):
"""Test case for get_scm
"""
headers = {
'Accept': 'application/json',
'Authorization': 'BasicZm9vOmJhcg==',
}
response = await client.request(
method='GET',
path='/blue/rest/organizations/{organization}/scm/{scm}'.format(organization='organization_example', scm='scm_example'),
headers=headers,
)
assert response.status == 200, 'Response body is : ' + (await response.read()).decode('utf-8')
| 5,342,819
|
def is_admin(user):
"""Check if the user is administrator"""
admin_user = current_app.config['ADMIN_USER']
if user.email == admin_user or user.email.replace('@cern.ch', '') == admin_user:
current_app.logger.debug('User {user} is admin'.format(user=user.email))
return True
return False
| 5,342,820
|
def test_task_to_data_relationship(dbsession):
"""Test data integrity across relationship between tables."""
get_id = uuid4()
g = GetTickerTask(
id=get_id, ticker='AUD_JPY', price='M', granularity='M15',
_from=d.strptime('2018-02-01 13:00:00', '%Y-%m-%d %H:%M:%S'),
to=d.strptime('2018-04-30 13:00:00', '%Y-%m-%d %H:%M:%S'))
s = [SubTickerTask(batch_id=get_id, _from=d.strptime(
'2018-02-01 13:00:00', '%Y-%m-%d %H:%M:%S'),
to=d.strptime(
'2018-03-01 12:45:00', '%Y-%m-%d %H:%M:%S')),
SubTickerTask(batch_id=get_id, _from=d.strptime(
'2018-03-01 13:00:00', '%Y-%m-%d %H:%M:%S'),
to=d.strptime(
'2018-04-01 12:45:00', '%Y-%m-%d %H:%M:%S')),
SubTickerTask(batch_id=get_id, _from=d.strptime(
'2018-04-01 13:00:00', '%Y-%m-%d %H:%M:%S'),
to=d.strptime(
'2018-04-30 13:00:00', '%Y-%m-%d %H:%M:%S'))]
entries = [g] + s
dbsession.add_all(entries)
assert len(dbsession.query(GetTickerTask).get(get_id).sub_tasks) == 3
| 5,342,821
|
def test_get_port_from_range(port_range):
"""Test getting random port from given range."""
assert get_port(port_range) in list(range(2000, 3000 + 1))
| 5,342,822
|
def parsed_json_to_dict(parsed):
"""
Convert parsed dict into dict with python built-in type
param:
parsed parsed dict by json decoder
"""
new_bangumi = {}
new_bangumi['name'] = parsed['name']
new_bangumi['start_date'] = datetime.strptime(
parsed['start_date'], '%Y-%m-%d').date()
if 'translation_team' in parsed:
new_bangumi['translation_team'] = parsed['translation_team']
else:
new_bangumi['translation_team'] = []
if 'total_ep' in parsed:
new_bangumi['total_ep'] = int(parsed['total_ep'])
else:
new_bangumi['total_ep'] = 99
if 'dled_ep' in parsed:
new_bangumi['dled_ep'] = int(parsed['dled_ep'])
else:
new_bangumi['dled_ep'] = 0
if 'keyword' in parsed:
new_bangumi['keyword'] = parsed['keyword']
else:
new_bangumi['keyword'] = new_bangumi['name']
new_bangumi['folder'] = parsed['folder'] if 'folder' in parsed and parsed[
'folder'] is not '' else new_bangumi['name']
new_bangumi['offset'] = int(parsed['offset']) if 'offset' in parsed else 0
return new_bangumi
| 5,342,823
|
def download_osmnx_graph(): # pragma: no cover
"""Load a simple street map from Open Street Map.
Generated from:
.. code:: python
>>> import osmnx as ox # doctest:+SKIP
>>> address = 'Holzgerlingen DE' # doctest:+SKIP
>>> graph = ox.graph_from_address(address, dist=500, network_type='drive') # doctest:+SKIP
>>> pickle.dump(graph, open('osmnx_graph.p', 'wb')) # doctest:+SKIP
Returns
-------
networkx.classes.multidigraph.MultiDiGraph
An osmnx graph of the streets of Holzgerlingen, Germany.
Examples
--------
>>> from pyvista import examples
>>> graph = examples.download_osmnx_graph() # doctest:+SKIP
See :ref:`open_street_map_example` for a full example using this dataset.
"""
import pickle
try:
import osmnx # noqa
except ImportError:
raise ImportError('Install `osmnx` to use this example')
filename, _ = _download_file('osmnx_graph.p')
return pickle.load(open(filename, 'rb'))
| 5,342,824
|
def plot_shift_type_by_frequency(tidy_schedule: pandas.DataFrame) -> tuple:
"""
Plots a bar graph of shift type frequencies.
:param tidy_schedule: A pandas data frame containing a schedule,
as loaded by load_tidy_schedule().
:type tidy_schedule: pandas.DataFrame
:return: A tuple with a figure and an axis containing a matplotlib bar
graph.
:rtype: tuple
"""
return_data = (
tidy_schedule
.groupby('shift_type')
.agg({'shift_type': 'count'})
.query('shift_type > 0')
.rename_axis(None)
.sort_values(by='shift_type', ascending=False)
)
dates = medinetparsepy.get_min_max_dates.get_min_max_dates(tidy_schedule)
fig, ax = matplotlib.pyplot.subplots()
ax.bar(return_data.index, return_data['shift_type'])
ax.set_xlabel('Shift Type')
ax.set_ylabel('Frequency')
ax.set_title(f'Shift Type by Frequency\nBetween {dates[0]} and {dates[1]}')
return (fig, ax)
| 5,342,825
|
def main(args):
"""Main function.
Parses the raw uri file and generates a new file without
length number at the beginning of the line
If the line is empty a warning log will be shown
:param args: command-line retrieved arguments
:type args: ArgumentParser.parse_args()
:raises FileNotFoundError: if file does not exist
"""
log.info(LOG_INFO_MAIN)
output_file_name = output_file_def(args.input, args.output)
file_out = open(output_file_name, 'w')
file_warn = open(WARN_FILE, 'w')
exist_warnings = False
try:
with open(args.input, 'r', encoding='ISO-8859-1', errors='ignore') as file:
count = 1
raw_cp = get_raw_uri_file_compiled_pattern()
for line in file:
result = raw_cp.search(line)
uri = ''
if result is not None:
uri_first_index = result.span('uri')[0]
uri = uri_fixer(line[uri_first_index:])
elif len(line)>0:
uri = uri_fixer(line)
else:
exist_warnings = True
file_warn.write(URI_WARN.format(count, line))
if len(uri) > 0: file_out.write(uri)
count += 1
file.close()
file_out.close()
file_warn.close()
if exist_warnings:
log.warn(URI_LOG_WARN)
except FileNotFoundError:
log.error(FILE_NOT_EXISTS_ERROR % args.file_location)
log.info(LOG_INFO_END % output_file_name)
| 5,342,826
|
def experiment(variant):
"""
This is a job script for running NPG/DAPG on hand tasks and other gym envs.
Note that DAPG generalizes PG and BC init + PG finetuning.
With appropriate settings of parameters, we can recover the full family.
"""
import mj_envs
job_data = default_job_data.copy()
job_data.update(variant)
env_params = ENV_PARAMS[variant['env_class']]
job_data.update(env_params)
assert 'algorithm' in job_data.keys()
assert any([job_data['algorithm'] == a for a in ['NPG', 'BCRL', 'DAPG']])
JOB_DIR = logger.get_snapshot_dir()
# ===============================================================================
# Train Loop
# ===============================================================================
seed = int(job_data['seedid'])
e = GymEnv(job_data['env_id'])
policy = MLP(e.spec, hidden_sizes=job_data['policy_size'], seed=seed)
baseline = MLPBaseline(e.spec, reg_coef=1e-3, batch_size=job_data['vf_batch_size'],
epochs=job_data['vf_epochs'], learn_rate=job_data['vf_learn_rate'])
# Get demonstration data if necessary and behavior clone
if job_data['algorithm'] != 'NPG':
print("========================================")
print("Collecting expert demonstrations")
print("========================================")
demo_paths = load_local_or_remote_file(job_data['demo_file'], 'rb')
bc_agent = BC(demo_paths, policy=policy, epochs=job_data['bc_epochs'], batch_size=job_data['bc_batch_size'],
lr=job_data['bc_learn_rate'], loss_type='MSE', set_transforms=False)
in_shift, in_scale, out_shift, out_scale = bc_agent.compute_transformations()
bc_agent.set_transformations(in_shift, in_scale, out_shift, out_scale)
bc_agent.set_variance_with_data(out_scale)
ts = timer.time()
print("========================================")
print("Running BC with expert demonstrations")
print("========================================")
bc_agent.train()
print("========================================")
print("BC training complete !!!")
print("time taken = %f" % (timer.time() - ts))
print("========================================")
if job_data['eval_rollouts'] >= 1:
score = e.evaluate_policy(policy, num_episodes=job_data['eval_rollouts'], mean_action=True)
print("Score with behavior cloning = %f" % score[0][0])
if job_data['algorithm'] != 'DAPG':
# We throw away the demo data when training from scratch or fine-tuning with RL without explicit augmentation
demo_paths = None
# ===============================================================================
# RL Loop
# ===============================================================================
rl_agent = DAPG(e, policy, baseline, demo_paths,
normalized_step_size=job_data['rl_step_size'],
lam_0=job_data['lam_0'], lam_1=job_data['lam_1'],
seed=seed, save_logs=True
)
print("========================================")
print("Starting reinforcement learning phase")
print("========================================")
ts = timer.time()
train_agent(job_name=JOB_DIR,
agent=rl_agent,
seed=seed,
niter=job_data['rl_num_iter'],
gamma=job_data['rl_gamma'],
gae_lambda=job_data['rl_gae'],
num_cpu=job_data['num_cpu'],
sample_mode='trajectories',
num_traj=job_data['rl_num_traj'],
save_freq=job_data['save_freq'],
evaluation_rollouts=job_data['eval_rollouts'])
print("time taken = %f" % (timer.time()-ts))
| 5,342,827
|
def load_image(path, size=None):
"""
Load the image from the given file-path and resize it to the given size if not None.
Eg: size = (width, height)
"""
img = Image.open(path)
if (size != None) and (size != ''):
img = img.resize(size=size, resample=Image.LANCZOS)
img = np.array(img)
# Scale image-pixels so they fall between 0.0 and 1.0
# img = img / 255.0
# Convert 2-dim gray-scale array to 3-dim RGB array.
if (len(img.shape) == 2):
img = np.repeat(img[:, :, np.newaxis], 3, axis=2)
return np.array(img)
| 5,342,828
|
def test_indexer():
"""
Test Indexer class
"""
assert dataclasses.asdict(IdrDex) == {
'Ed25519_Sig': 'A',
'ECDSA_256k1_Sig': 'B',
'Ed448_Sig': '0A',
'Label': '0B'
}
assert IdrDex.Ed25519_Sig == 'A' # Ed25519 signature.
assert IdrDex.ECDSA_256k1_Sig == 'B' # ECDSA secp256k1 signature.
assert Indexer.Codex == IdrDex
# first character of code with hard size of code
assert Indexer.Hards == {
'A': 1, 'B': 1, 'C': 1, 'D': 1, 'E': 1, 'F': 1, 'G': 1, 'H': 1, 'I': 1,
'J': 1, 'K': 1, 'L': 1, 'M': 1, 'N': 1, 'O': 1, 'P': 1, 'Q': 1, 'R': 1,
'S': 1, 'T': 1, 'U': 1, 'V': 1, 'W': 1, 'X': 1, 'Y': 1, 'Z': 1,
'a': 1, 'b': 1, 'c': 1, 'd': 1, 'e': 1, 'f': 1, 'g': 1, 'h': 1, 'i': 1,
'j': 1, 'k': 1, 'l': 1, 'm': 1, 'n': 1, 'o': 1, 'p': 1, 'q': 1, 'r': 1,
's': 1, 't': 1, 'u': 1, 'v': 1, 'w': 1, 'x': 1, 'y': 1, 'z': 1,
'0': 2, '1': 2, '2': 2, '3': 2, '4': 3, '5': 4
}
# Codes table with sizes of code (hard) and full primitive material
assert Indexer.Sizes == {
'A': Sizage(hs=1, ss=1, fs=88, ls=0),
'B': Sizage(hs=1, ss=1, fs=88, ls=0),
'0A': Sizage(hs=2, ss=2, fs=156, ls=0),
'0B': Sizage(hs=2, ss=2, fs=None, ls=0)
}
assert Indexer.Sizes['A'].hs == 1 # hard size
assert Indexer.Sizes['A'].ss == 1 # soft size
assert Indexer.Sizes['A'].fs == 88 # full size
assert Indexer.Sizes['A'].ls == 0 # lead size
# verify first hs Sizes matches hs in Codes for same first char
for ckey in Indexer.Sizes.keys():
assert Indexer.Hards[ckey[0]] == Indexer.Sizes[ckey].hs
# verify all Codes have hs > 0 and ss > 0 and fs >= hs + ss if fs is not None
for val in Indexer.Sizes.values():
assert val.hs > 0 and val.ss > 0
if val.fs is not None:
assert val.fs >= val.hs + val.ss
# Bizes maps bytes of sextet of decoded first character of code with hard size of code
# verify equivalents of items for Sizes and Bizes
for skey, sval in Indexer.Hards.items():
ckey = b64ToB2(skey)
assert Indexer.Bards[ckey] == sval
with pytest.raises(EmptyMaterialError):
indexer = Indexer()
# Test signatures
sig = (b"\x99\xd2<9$$0\x9fk\xfb\x18\xa0\x8c@r\x122.k\xb2\xc7\x1fp\x0e'm\x8f@"
b'\xaa\xa5\x8c\xc8n\x85\xc8!\xf6q\x91p\xa9\xec\xcf\x92\xaf)\xde\xca'
b'\xfc\x7f~\xd7o|\x17\x82\x1d\xd4<o"\x81&\t')
assert len(sig) == 64
sig64b = encodeB64(sig)
sig64 = sig64b.decode("utf-8")
assert len(sig64) == 88
assert sig64 == 'mdI8OSQkMJ9r-xigjEByEjIua7LHH3AOJ22PQKqljMhuhcgh9nGRcKnsz5KvKd7K_H9-1298F4Id1DxvIoEmCQ=='
# replace pad "==" with code "AA"
qsc = IdrDex.Ed25519_Sig + intToB64(0, l=1)
assert qsc == 'AA'
qscb = qsc.encode("utf-8")
qsig64 = qsc + sig64[:-2]
assert qsig64 == 'AAmdI8OSQkMJ9r-xigjEByEjIua7LHH3AOJ22PQKqljMhuhcgh9nGRcKnsz5KvKd7K_H9-1298F4Id1DxvIoEmCQ'
assert len(qsig64) == 88
qsig64b = qsig64.encode("utf-8")
qsig2b = decodeB64(qsig64b)
assert len(qsig2b) == 66
assert qsig2b == (b'\x00\t\x9d#\xc3\x92BC\t\xf6\xbf\xb1\x8a\x08\xc4\x07!#"\xe6\xbb,q\xf7'
b'\x00\xe2v\xd8\xf4\n\xaaX\xcc\x86\xe8\\\x82\x1fg\x19\x17\n\x9e\xcc'
b'\xf9*\xf2\x9d\xec\xaf\xc7\xf7\xedv\xf7\xc1x!\xddC\xc6\xf2(\x12`\x90')
indexer = Indexer(raw=sig)
assert indexer.raw == sig
assert indexer.code == IdrDex.Ed25519_Sig
assert indexer.index == 0
assert indexer.qb64 == qsig64
assert indexer.qb2 == qsig2b
# test wrong size of raw
longsig = sig + bytes([10, 11, 12])
indexer = Indexer(raw=longsig)
shortsig = sig[:-3]
with pytest.raises(RawMaterialError):
indexer = Indexer(raw=shortsig)
indexer = Indexer(qb64b=qsig64b) # test with bytes not str
assert indexer.raw == sig
assert indexer.code == IdrDex.Ed25519_Sig
assert indexer.index == 0
assert indexer.qb64 == qsig64
assert indexer.qb64b == qsig64b
assert indexer.qb2 == qsig2b
indexer = Indexer(qb64=qsig64) # test with str not bytes
assert indexer.raw == sig
assert indexer.code == IdrDex.Ed25519_Sig
assert indexer.index == 0
assert indexer.qb64 == qsig64
assert indexer.qb64b == qsig64b
assert indexer.qb2 == qsig2b
# test truncates extra bytes from qb64 parameter
longqsig64 = qsig64 + "ABCD"
indexer = Indexer(qb64=longqsig64)
assert len(indexer.qb64) == Indexer.Sizes[indexer.code].fs
# test raises ShortageError if not enough bytes in qb64 parameter
shortqsig64 = qsig64[:-4] # too short
with pytest.raises(ShortageError):
indexer = Indexer(qb64=shortqsig64)
indexer = Indexer(qb2=qsig2b) # test with qb2
assert indexer.raw == sig
assert indexer.code == IdrDex.Ed25519_Sig
assert indexer.index == 0
assert indexer.qb64 == qsig64
assert indexer.qb64b == qsig64b
assert indexer.qb2 == qsig2b
# test truncates extra bytes from qb2 parameter
longqsig2b = qsig2b + bytearray([1, 2, 3, 4, 5]) # extra bytes in size
indexer = Indexer(qb2=longqsig2b)
assert indexer.qb2 == qsig2b
assert len(indexer.qb64) == Indexer.Sizes[indexer.code].fs
# test raises ShortageError if not enough bytes in qb2 parameter
shortqsig2b = qsig2b[:-4] # too few bytes in size
with pytest.raises(ShortageError):
indexer = Indexer(qb2=shortqsig2b)
# test with non-zero index=5
# replace pad "==" with code "AF"
qsc = IdrDex.Ed25519_Sig + intToB64(5, l=1)
assert qsc == 'AF'
qscb = qsc.encode("utf-8")
qsig64 = qsc + sig64[:-2]
assert qsig64 == 'AFmdI8OSQkMJ9r-xigjEByEjIua7LHH3AOJ22PQKqljMhuhcgh9nGRcKnsz5KvKd7K_H9-1298F4Id1DxvIoEmCQ'
assert len(qsig64) == 88
qsig64b = qsig64.encode("utf-8")
qsig2b = (b'\x00Y\x9d#\xc3\x92BC\t\xf6\xbf\xb1\x8a\x08\xc4\x07!#"\xe6\xbb,q\xf7'
b'\x00\xe2v\xd8\xf4\n\xaaX\xcc\x86\xe8\\\x82\x1fg\x19\x17\n\x9e\xcc'
b'\xf9*\xf2\x9d\xec\xaf\xc7\xf7\xedv\xf7\xc1x!\xddC\xc6\xf2(\x12`\x90')
indexer = Indexer(raw=sig, code=IdrDex.Ed25519_Sig, index=5)
assert indexer.raw == sig
assert indexer.code == IdrDex.Ed25519_Sig
assert indexer.index == 5
assert indexer.qb64 == qsig64
assert indexer.qb64b == qsig64b
assert indexer.qb2 == qsig2b
indexer = Indexer(qb2=qsig2b)
assert indexer.raw == sig
assert indexer.code == IdrDex.Ed25519_Sig
assert indexer.index == 5
assert indexer.qb64 == qsig64
assert indexer.qb64b == qsig64b
assert indexer.qb2 == qsig2b
indexer = Indexer(qb64=qsig64)
assert indexer.raw == sig
assert indexer.code == IdrDex.Ed25519_Sig
assert indexer.index == 5
assert indexer.qb64 == qsig64
assert indexer.qb64b == qsig64b
assert indexer.qb2 == qsig2b
# Label Code (variable length)
label = b'Hello_World_Peep'
index = len(label) // 4
assert not len(label) % 4
assert index == 4
lraw = decodeB64(label)
assert len(lraw) == len(label) * 3 // 4
assert lraw == b'\x1d\xe9e\xa3\xf5\xa8\xaeW\x7f=\xe7\xa9'
ltext = encodeB64(lraw)
assert ltext == b'Hello_World_Peep' == label
qsc = IdrDex.Label + intToB64(index, l=2)
assert qsc == '0BAE'
qscb = qsc.encode("utf-8")
lq64b = qscb + label
assert lq64b == b"0BAEHello_World_Peep"
lq64 = lq64b.decode("utf-8")
indexer = Indexer(raw=lraw, code=IdrDex.Label, index=index)
assert indexer.raw == lraw
assert indexer.code == IdrDex.Label
assert indexer.index == index
assert indexer.qb64b == lq64b
assert indexer.qb64 == lq64
assert indexer.qb2 == b'\xd0\x10\x04\x1d\xe9e\xa3\xf5\xa8\xaeW\x7f=\xe7\xa9'
# index zero for empty label
indexer = Indexer(raw=lraw, code=IdrDex.Label, index=0)
assert indexer.raw == b''
assert indexer.code == IdrDex.Label
assert indexer.index == 0
assert indexer.qb64b == b'0BAA'
assert indexer.qb64 == '0BAA'
assert indexer.qb2 == b'\xd0\x10\x00'
# Test ._bexfil
indexer = Indexer(qb64=qsig64) #
raw = indexer.raw
code = indexer.code
index = indexer.index
qb2 = indexer.qb2
indexer._bexfil(qb2)
assert indexer.raw == raw
assert indexer.code == code
assert indexer.index == index
assert indexer.qb64 == qsig64
assert indexer.qb2 == qb2
# Test ._binfil
test = indexer._binfil()
assert test == qb2
# test strip ims
# strip ignored if qb64
indexer = Indexer(qb64=qsig64)
assert indexer.raw == sig
assert indexer.code == IdrDex.Ed25519_Sig
assert indexer.index == 5
assert indexer.qb64 == qsig64
assert indexer.qb64b == qsig64b
assert indexer.qb2 == qsig2b
ims = bytearray(qsig64b)
indexer = Indexer(qb64b=ims, strip=True)
assert indexer.raw == sig
assert indexer.code == IdrDex.Ed25519_Sig
assert indexer.index == 5
assert indexer.qb64 == qsig64
assert indexer.qb64b == qsig64b
assert indexer.qb2 == qsig2b
assert not ims
ims = bytearray(qsig2b)
indexer = Indexer(qb2=ims, strip=True)
assert indexer.raw == sig
assert indexer.code == IdrDex.Ed25519_Sig
assert indexer.index == 5
assert indexer.qb64 == qsig64
assert indexer.qb64b == qsig64b
assert indexer.qb2 == qsig2b
assert not ims
# test extra bytes in ims qb64b
extra = bytearray(b"ABCD")
ims = bytearray(qsig64b) + extra
indexer = Indexer(qb64b=ims, strip=True)
assert indexer.raw == sig
assert indexer.code == IdrDex.Ed25519_Sig
assert indexer.index == 5
assert indexer.qb64 == qsig64
assert indexer.qb64b == qsig64b
assert indexer.qb2 == qsig2b
assert ims == extra
# test extra bytes in ims qb2
extra = bytearray([1, 2, 3, 4, 5])
ims = bytearray(qsig2b) + extra
indexer = Indexer(qb2=ims, strip=True)
assert indexer.raw == sig
assert indexer.code == IdrDex.Ed25519_Sig
assert indexer.index == 5
assert indexer.qb64 == qsig64
assert indexer.qb64b == qsig64b
assert indexer.qb2 == qsig2b
assert ims == extra
""" Done Test """
| 5,342,829
|
def train_linkpred(model,
optimizer,
train_loader_generator,
validation_loader_generator
):
"""
:param model:
:param optimizer:
:param train_loader_generator:
:param validation_loader_generator:
:return:
"""
for epoch in range(3):
count = 0
time_start = time.time()
train_loader = train_loader_generator.get_edge_loader()
for g in train_loader:
for step, (input_nodes, positive_graph, negative_graph, blocks) in enumerate(g):
pos_score = model(positive_graph)
neg_score = model(positive_graph, negative_graph=negative_graph)
score = torch.cat([pos_score, neg_score])
label = torch.cat([torch.ones_like(pos_score), torch.zeros_like(neg_score)])
loss = F.binary_cross_entropy_with_logits(score, label)
optimizer.zero_grad()
loss.backward()
optimizer.step()
count += len(input_nodes)
# if True or not count % 50:
# print(count, loss.item(), time.time() - time_start)
print(f"EPOCH {epoch}, time for the epoch : {time.time() - time_start:2f}, last loss {loss.item():2f}")
aucs = []
count = 0
model.eval()
validation_loader = validation_loader_generator.get_edge_loader()
for i, g in enumerate(validation_loader):
print("val graph ", i)
for input_nodes, positive_graph, negative_graph, blocks in g:
with torch.no_grad():
pos_score = model(positive_graph)
neg_score = model(positive_graph, negative_graph=negative_graph)
score = torch.cat([pos_score, neg_score]).detach().numpy()
label = torch.cat([torch.ones_like(pos_score), torch.zeros_like(neg_score)])
label = label.detach().numpy()
# print(score, label)
aucs.append(roc_auc_score(label, score))
count += 1
print('Time used : ', time.time() - time_start)
print("AUC", np.mean(aucs))
pass
| 5,342,830
|
def call_wifi(label):
"""Wifi connect function
Parameters
----------
label : str
Output label
Returns
-------
None
"""
try:
# Setup wifi and connection
print(wifi.radio.connect(secrets['ssid'], secrets['password']))
print('ip', wifi.radio.ipv4_address)
show_text("ip: {}".format(wifi.radio.ipv4_address), label)
ipv4 = ipaddress.ip_address('8.8.8.8')
ping_result = wifi.radio.ping(ipv4)
print('ping', ping_result)
show_text("ping: {}".format(ping_result), label)
except:
return False
| 5,342,831
|
def load_normalized_face_landmarks():
"""
Loads the locations of each of the 68 landmarks
:return:
"""
normalized_face_landmarks = np.float32([
(0.0792396913815, 0.339223741112), (0.0829219487236, 0.456955367943),
(0.0967927109165, 0.575648016728), (0.122141515615, 0.691921601066),
(0.168687863544, 0.800341263616), (0.239789390707, 0.895732504778),
(0.325662452515, 0.977068762493), (0.422318282013, 1.04329000149),
(0.531777802068, 1.06080371126), (0.641296298053, 1.03981924107),
(0.738105872266, 0.972268833998), (0.824444363295, 0.889624082279),
(0.894792677532, 0.792494155836), (0.939395486253, 0.681546643421),
(0.96111933829, 0.562238253072), (0.970579841181, 0.441758925744),
(0.971193274221, 0.322118743967), (0.163846223133, 0.249151738053),
(0.21780354657, 0.204255863861), (0.291299351124, 0.192367318323),
(0.367460241458, 0.203582210627), (0.4392945113, 0.233135599851),
(0.586445962425, 0.228141644834), (0.660152671635, 0.195923841854),
(0.737466449096, 0.182360984545), (0.813236546239, 0.192828009114),
(0.8707571886, 0.235293377042), (0.51534533827, 0.31863546193),
(0.516221448289, 0.396200446263), (0.517118861835, 0.473797687758),
(0.51816430343, 0.553157797772), (0.433701156035, 0.604054457668),
(0.475501237769, 0.62076344024), (0.520712933176, 0.634268222208),
(0.565874114041, 0.618796581487), (0.607054002672, 0.60157671656),
(0.252418718401, 0.331052263829), (0.298663015648, 0.302646354002),
(0.355749724218, 0.303020650651), (0.403718978315, 0.33867711083),
(0.352507175597, 0.349987615384), (0.296791759886, 0.350478978225),
(0.631326076346, 0.334136672344), (0.679073381078, 0.29645404267),
(0.73597236153, 0.294721285802), (0.782865376271, 0.321305281656),
(0.740312274764, 0.341849376713), (0.68499850091, 0.343734332172),
(0.353167761422, 0.746189164237), (0.414587777921, 0.719053835073),
(0.477677654595, 0.706835892494), (0.522732900812, 0.717092275768),
(0.569832064287, 0.705414478982), (0.635195811927, 0.71565572516),
(0.69951672331, 0.739419187253), (0.639447159575, 0.805236879972),
(0.576410514055, 0.835436670169), (0.525398405766, 0.841706377792),
(0.47641545769, 0.837505914975), (0.41379548902, 0.810045601727),
(0.380084785646, 0.749979603086), (0.477955996282, 0.74513234612),
(0.523389793327, 0.748924302636), (0.571057789237, 0.74332894691),
(0.672409137852, 0.744177032192), (0.572539621444, 0.776609286626),
(0.5240106503, 0.783370783245), (0.477561227414, 0.778476346951)])
return normalized_face_landmarks
| 5,342,832
|
def vacuum_vessel(shot):
"""
Get the coordinates of the Tore Supra / WEST vacuum vessel
R_wall, Z_wall = vacuum_vessel(shot)
Arguments:
- shot: Tore Supra or WEST shot number
Returns:
- R_wall: radius of the vacuum chamber walls [m]
- Z_wall: height of the vacuum chamber walls [m]
TODO: once WEST will have started, get the final vacuum vessel coordinates
"""
if (shot <= 0) or (not isinstance(shot, int)):
raise ValueError('Shot number should be a positive integer')
elif shot < 50000: # Tore Supra vacuum chamber profile
wall = pw.tsmat(shot, 'APOLO;+Parametres;Paroi')
R_wall = wall[:,0]
Z_wall = wall[:,1]
else: # WEST vacuum chamber profile
# get the absolute path of the filename, in order to work even if launched from other dir
filename = os.path.dirname(__file__) + os.sep + 'WEST_vacuum_vessel.txt'
R_wall, Z_wall = np.loadtxt(filename, skiprows=1, unpack=True)
return R_wall, Z_wall
| 5,342,833
|
def backup_file(file):
"""Create timestamp'd backup of a file
Args:
file (str): filepath
Returns:
backupfile(str)
"""
current_time = datetime.now()
time_stamp = current_time.strftime("%b-%d-%y-%H.%M.%S")
backupfile = file +'.bkp_'+ time_stamp
copyfile(file, backupfile)
return(backupfile)
| 5,342,834
|
def svn_client_mergeinfo_log_eligible(*args):
"""
svn_client_mergeinfo_log_eligible(char path_or_url, svn_opt_revision_t peg_revision,
char merge_source_path_or_url, svn_opt_revision_t src_peg_revision,
svn_log_entry_receiver_t receiver,
svn_boolean_t discover_changed_paths,
apr_array_header_t revprops, svn_client_ctx_t ctx,
apr_pool_t pool) -> svn_error_t
"""
return _client.svn_client_mergeinfo_log_eligible(*args)
| 5,342,835
|
def share_nodes_sockets():
"""
Create a shared node layout where the simulation and analysis ranks share
compute nodes. Furthermore, they share sockets of the node.
"""
shared_sockets = SummitNode()
for i in range(10):
shared_sockets.cpu[i] = "simulation:{}".format(i)
shared_sockets.cpu[21+i] = "simulation:{}".format(10+i)
for i in range(10):
shared_sockets.cpu[10+i] = "pdf_calc:{}".format(i)
shared_sockets.cpu[21+10+i] = "pdf_calc:{}".format(10+i)
return [shared_sockets]
| 5,342,836
|
def imlist(img_dir, valid_exts=None, if_recursive=False):
"""
List images under directory
:param img_dir:
:param valid_exts:
:param if_recursive:
:return:
"""
from glob import glob
if is_str(valid_exts):
valid_exts = [valid_exts.strip(".")]
valid_exts = list(valid_exts) if is_seq(valid_exts) else ["jpg", "jpeg", "bmp", "tif", "gif", "png"]
images = []
for ext in valid_exts:
images.extend(glob(os.path.join(img_dir, "**", "*.%s" % ext), recursive=if_recursive))
return images
| 5,342,837
|
def add_assignment_to_db(db, assignment):
"""
Adds an assignment to the database
:param db: (string) The database to connect to
:param assignment: (Feature) The assignment to add
:return:
"""
conn = sqlite3.connect(db)
c = conn.cursor()
params = (
assignment.global_id,
)
c.execute("INSERT INTO assignments VALUES (?)",
params)
conn.commit()
conn.close()
| 5,342,838
|
def _rle_decode(data):
"""
Decodes run-length-encoded `data`.
"""
if not data:
return data
new = b''
last = b''
for cur in data:
if last == b'\0':
new += last * cur
last = b''
else:
new += last
last = bytes([cur])
return new + last
| 5,342,839
|
def blend_and_save(freq_dict, filename, colormap='viridis', dots=600):
"""Plot a heatmap, upscale it to the keyboard and save a blended image."""
# Clear the heatmap plot and axes
plt.clf()
plt.xticks([])
plt.yticks([])
plt.axis('off')
# Display the data on the heatmap
heatmap_data = get_frequencies(freq_dict)
plt.imshow(
heatmap_data, interpolation='lanczos', zorder=1, cmap=colormap
)
# Save the heatmap plot
plt.savefig(
'obj/heatmap.png',
dpi=dots,
pad_inches=0,
transparent=True,
bbox_inches='tight'
)
# Open the heatmap image
heatmap = Image.open(
'obj/heatmap.png'
)
# Resize the heatmap to the keyboard's size, with antialiasing
heatmap = heatmap.resize(keyboard.size, Image.ANTIALIAS)
heatmap.save(
'obj/heatmap.png'
)
# Blend the images, and save
blended = ImageChops.darker(keyboard, heatmap)
blended.save(
'{0}/{1}'.format(os.getcwd(), filename)
)
| 5,342,840
|
def flatten(lst):
"""Helper function used to massage the raw tweet data."""
for el in lst:
if (isinstance(el, collections.Iterable) and
not isinstance(el, str)):
for sub in flatten(el):
yield sub
else:
yield el
| 5,342,841
|
def setup(app):
"""
Set up the sphinx extension.
"""
app.add_role('f', f_role)
app.add_role('ipa', ipa_role)
| 5,342,842
|
def find_shift_between_two_models(model_1,model_2,shift_range=5,number_of_evaluations=10,rotation_angles=[0.,0.,0.],
cropping_model=0,initial_guess=[0.,0.,0.], method='brute_force',full_output=False):
"""
Find the correct shift alignment in 3D by using a different optimization algorithms to minimise the distance between the two models.
Args:
:model_1(float ndarray): 3d ndarray of the fixed object
:model_2(float ndarray): 3d ndarray ot the rotatable model
Kwargs:
:shift_range(float): absolute value of the range in which the brute should be applied
:number_of_evaluations(int): number of grid points on which the brute force optimises
:rotation_angles(list): set of euler angles for rotating model_2 before applying the shift
:method(str): is the optimisation method which is use to minimise the difference, default = brute_force, other option fmin_l_bfgs_b
:full_output(bool): returns full output as a dictionary, default = False
"""
def shifting(x,model_1,model_2):
x0, x1, x2 = x
#model_2 = nutcracker.utils.rotate.rotation_based_on_euler_angles(model_2, rotation_angles)
model_2 = ndimage.interpolation.shift(model_2, shift=(x0, x1, x2), order=0, mode='wrap')
#model_2 = np.roll(np.roll(np.roll(model_2,int(x0),axis=0), int(x1), axis=1), int(x2), axis=2)
return np.sum(np.abs(model_1 - model_2) ** 2)
model_2 = nutcracker.utils.rotate.rotation_based_on_euler_angles(model_2, rotation_angles)
# cropping the model
if cropping_model:
model_1 = model_1[cropping_model/2:-cropping_model/2,cropping_model/2:-cropping_model/2,cropping_model/2:-cropping_model/2]
model_2 = model_2[cropping_model/2:-cropping_model/2,cropping_model/2:-cropping_model/2,cropping_model/2:-cropping_model/2]
args = (model_1, model_2)
if method == 'brute_force':
# set parameters
r = slice(-float(shift_range),float(shift_range),2.*shift_range/number_of_evaluations)
ranges = [r,r,r]
# shift retrieval brute force
shift = optimize.brute(shifting, ranges=ranges, args=args, full_output=True, finish=optimize.fmin_bfgs)
shift = np.array(shift)
elif method == 'fmin_l_bfgs_b':
#parameter for fmin_l_bfgs_b
x0 = np.array(initial_guess)
# fmin_l_bfgs_b optimisation
shift = optimize.fmin_l_bfgs_b(shifting, x0, args=args, approx_grad=True)
shift = np.array(shift)
shift_values = shift[0]
if full_output:
if method == 'brute_force':
out = {'shift_values':shift[0],
'shift_fvalues':shift[1],
'shift_grid':shift[2],
'shift_jout':shift[3]}
elif method == 'fmin_l_bfgs_b':
out = {'shift_values':shift[0],
'shift_fvalues':shift[1]}
return out
else:
return shift_values
| 5,342,843
|
def read_cmd(frame, data):
"""Get the return of the cmd sent to the MicroPython card
:param data: The commande sent
:type data: str
:return: the return of the command sent
:rtype: str
"""
b = frame.serial.read(frame.serial.in_waiting)
frame.is_data = False
if b:
frame.is_data = True
b = b.replace(b'\r\n', b'\n')
serial_read_data(frame, b)
frame.result = GetCmdReturn(frame.last_cmd_red, data)
| 5,342,844
|
def get_url(
url: str,
stream: bool = False,
session: Optional[requests.Session] = None
) -> requests.Response:
"""Call requests.get() on a url and return the requests.Response."""
if not session:
session = retry_session()
resp = session.get(url, stream=stream)
resp.raise_for_status()
return resp
| 5,342,845
|
def uploadFromPath(localFilePath: str,
resource,
bucketName: str,
fileID: str,
headerArgs: Optional[dict] = None,
partSize: int = 50 << 20):
"""
Uploads a file to s3, using multipart uploading if applicable
:param str localFilePath: Path of the file to upload to s3
:param S3.Resource resource: boto3 resource
:param str bucketName: name of the bucket to upload to
:param str fileID: the name of the file to upload to
:param dict headerArgs: http headers to use when uploading - generally used for encryption purposes
:param int partSize: max size of each part in the multipart upload, in bytes
:return: version of the newly uploaded file
"""
if headerArgs is None:
headerArgs = {}
client = resource.meta.client
file_size, file_time = fileSizeAndTime(localFilePath)
version = uploadFile(localFilePath, resource, bucketName, fileID, headerArgs, partSize)
info = client.head_object(Bucket=bucketName, Key=compat_bytes(fileID), VersionId=version, **headerArgs)
size = info.get('ContentLength')
assert size == file_size
# Make reasonably sure that the file wasn't touched during the upload
assert fileSizeAndTime(localFilePath) == (file_size, file_time)
return version
| 5,342,846
|
def scale_log2lin(value):
"""
Scale value from log10 to linear scale: 10**(value/10)
Parameters
----------
value : float or array-like
Value or array to be scaled
Returns
-------
float or array-like
Scaled value
"""
return 10**(value/10)
| 5,342,847
|
async def test_key_error(opp):
"""Test Connect returning empty string."""
flow = config_flow.SomaFlowHandler()
flow.opp = opp
with patch.object(SomaApi, "list_devices", return_value={}):
result = await flow.async_step_import({"host": MOCK_HOST, "port": MOCK_PORT})
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "connection_error"
| 5,342,848
|
def _cal_tgt_match_pct_worker(in_queue, out_queue, mapped_intervals, tgt_data, src_data, src_samples, ref_match_pct, sample_size, query_ref_match_pct, mapped_len_esp, len_esp, var_esp, sfs_esp):
"""
Description:
Worker function to calculate match percents in target populations.
Arguments:
in_queue multiprocessing.Queue: multiprocessing.Queue instance to receive parameters from the manager.
out_queue multiprocessing.Queue: multiprocessing.Queue instance to send results back to the manager.
mapped_intervals dict: Dictionary of tuples containing mapped regions across the genome.
tgt_data dict: Genotype data from target populations.
src_data dict: Genotype data from source populations.
src_samples list: List containing sample information for source populations.
ref_match_pct dict: Match percents in reference populations as the null distribution.
sample_size int: Number of individuals analyzed.
query_ref_match_pct func: Function used to query match percentages from reference popualtions.
mapped_len_esp float: Increment of the length of the mapped region.
len_esp float: Increment of the length of the haplotype.
var_esp float: Increment of the number of derived alleles on the haplotype.
sfs_esp float: Increment of mean site frequency spectrum.
"""
while True:
index, data = in_queue.get()
res = _cal_pvalue_ind(data, index, mapped_intervals, tgt_data, src_data, src_samples, ref_match_pct, sample_size, query_ref_match_pct, mapped_len_esp, len_esp, var_esp, sfs_esp)
out_queue.put("\n".join(res))
| 5,342,849
|
def train(
train_length:Union[int, TrainLength], model:nn.Module, dls:DataLoaders, loss_func:LossFunction,
opt:torch.optim.Optimizer, sched=None, metric:Optional[Metric]=None,
device=None, clip_grad:ClipGradOptions=None, callbacks:List[TrainingCallback]=None
) -> TrainingStats:
"""
Train `model` with the data given by `dls.train` to minimize `loss_func`.
Args
train_length: if it's an int, number of training epochs; if it's a TrainLength's subclass instance, training
won't stop until `train_length.must_stop(...)`, which is called at the end of each epoch, returns `True`.
model: module to train.
dls: dataloaders that iterates over the training and validation data. If you don't want to evaluate `model`
using a validation set, `dls.valid` can be `None`.
train_dl: dataloader that iterates over the training data.
valid_dl: dataloader that iterates over the validation data.
loss_func: loss function to minimize. We assume that this loss function applies reduction over the batch, i.e.,
it only returns one value.
opt: Pytorch optimizer
sched: scheduler with a method `step` that will be executed once per step.
metric: function that receives a model, a DataLoader `dl` and a `metric_fn` function, computes the metric
`metric_fn` for every batch of `dl` and returns the average.
device: device, in Pytorch format, where the model and data should be placed to train and calculate metrics.
clip_grad: if not None, the gradients of `clip_grad` are clipped to be at most `clip_grad.max_norm` right
before each optimizer step.
callbacks: list of callbacks that must be called every time an event (end of step, end of epoch, ...) occurs.
Returns: statistics of the training run, like a history of the losses/metrics by epoch
"""
if isinstance(train_length, int):
train_length = TrainLengthNEpochs(train_length)
assert dls.train is not None
if device is None: device = get_best_available_device()
if callbacks is None: callbacks = []
n_steps = 0
n_epochs_completed = 0
train_loss_history = []
train_metric_history = []
valid_metric_history = []
while (True):
model.train()
train_losses_epoch = None
n_examples_epoch = 0
for x, y, *extra_xs in dls.train:
x, y = x.to(device), y.to(device)
opt.zero_grad()
preds = model(x, *extra_xs)
loss = loss_func(preds, y)
loss.backward()
if clip_grad is not None:
torch.nn.utils.clip_grad_norm_(clip_grad.params, clip_grad.max_norm)
opt.step()
n_steps += 1
if sched is not None: sched.step()
with torch.no_grad():
actual_bs = x.shape[0]
n_examples_epoch += actual_bs
detached_loss = loss.detach()[None] * actual_bs
train_losses_epoch = (
detached_loss if train_losses_epoch is None else torch.cat((train_losses_epoch, detached_loss))
)
for cb in callbacks:
cb.on_step_end(loss, model, opt)
#losses.append(loss.detach().cpu().item())
#print('Train loss = ', loss.detach())
#print('Epoch completed')
model.eval()
train_metric, valid_metric, metric_name = None, None, ''
if metric is not None:
metric_name = metric.name
train_metric = metric(model, dls.train, device=device)
train_metric_history.append(train_metric)
if dls.valid is not None:
valid_metric = metric(model, dls.valid, device=device)
valid_metric_history.append(valid_metric)
avg_train_loss = ((train_losses_epoch.sum()) / n_examples_epoch).item()
train_loss_history.append(avg_train_loss)
n_epochs_completed += 1
epoch_stats = EpochTrainingStats(avg_train_loss, train_metric, valid_metric, n_epochs_completed, metric_name)
for cb in callbacks:
cb.on_epoch_end(epoch_stats, model, opt)
if train_length.must_stop(epoch_stats):
break
#valid_metric_str = f'{valid_metric:.4f}' if dls.valid is not None else 'N/A'
#last_iter_train_loss = loss.detach().item()
#print(f'Avg train loss = {avg_train_loss:.4f}, Last iter train loss = {last_iter_train_loss:.4f}')
#print(f'Train metric (f1) = {train_metric}')
#print(f'Valid metric (f1) = {valid_metric}')
return TrainingStats(
np.array(train_loss_history),
np.array(train_metric_history),
np.array(valid_metric_history),
n_epochs_completed,
n_steps,
)
| 5,342,850
|
def _error_to_level(error):
"""Convert a boolean error field to 'Error' or 'Info' """
if error:
return 'Error'
else:
return 'Info'
| 5,342,851
|
def add_review(status):
"""
Adds the flags on the tracker document.
Input: tracker document.
Output: sum of the switches.
"""
cluster = status['cluster_switch']
classify = status['classify_switch']
replace = status['replace_switch']
final = status['final_switch']
finished = status['finished_switch']
num = cluster + classify + replace + final + finished
return num
| 5,342,852
|
def test_pyramid_conv_encoder_forward_invalid_mask(config, images, masks):
"""Test PyramidConvEncoder.forward handles some invalid masks."""
encoder = encoders.PyramidConvEncoder(config=config, pretrained=False)
masks[-2:] = 0
actual = encoder(images, masks)
assert actual.shape == (BATCH_SIZE, *encoder.feature_shape)
assert actual[-2:].eq(0).all()
assert not actual[:-2].eq(0).all()
assert not torch.isnan(actual).any()
| 5,342,853
|
def n_floordiv(a, b):
"""safe floordiv"""
return np.where(b != 0, o.floordiv(a, b), 1)
| 5,342,854
|
def load_encoder_inputs(encoder_np_vecs='train_body_vecs.npy'):
"""
Load variables & data that are inputs to encoder.
Parameters
----------
encoder_np_vecs : str
filename of serialized numpy.array of encoder input (issue title)
Returns
-------
encoder_input_data : numpy.array
The issue body
doc_length : int
The standard document length of the input for the encoder after padding
the shape of this array will be (num_examples, doc_length)
"""
vectorized_body = np.load(encoder_np_vecs)
# Encoder input is simply the body of the issue text
encoder_input_data = vectorized_body
doc_length = encoder_input_data.shape[1]
print('Shape of encoder input: {}'.format(encoder_input_data.shape))
return encoder_input_data, doc_length
| 5,342,855
|
def test_parse_args():
"""Test argument parser of cli."""
argv = ['hourly', '--startyear', '2008',
'--variables', 'total_precipitation', '--statistics',
'--endyear', '2008', '--ensemble', '--land']
args = cli._parse_args(argv)
assert args.command == 'hourly'
assert args.days == list(range(1, 32))
assert args.endyear == 2008
assert args.ensemble
assert args.format == 'netcdf'
assert args.hours == list(range(0, 24))
assert args.levels == ref.PLEVELS
assert args.months == list(range(1, 13))
assert args.outputprefix == 'era5'
assert not args.merge
assert args.startyear == 2008
assert args.statistics
assert not args.threads
assert args.variables == ['total_precipitation']
assert args.land
assert not args.area
| 5,342,856
|
def save_groups(portal, groups, path, f='json', cls=None, **kw):
""" Save groups in the portal to disk. """
serializer = _select_serializer(f, cls, **kw)
serializer.serialize_groups(groups, path, portal)
| 5,342,857
|
def parse_row(row):
"""Create an Event object from a data row
Args:
row: Tuple of input data.
Returns:
Event object.
"""
# Ignore either 1 or 2 columns that preceed year
if len(row) > 6:
row = row[2:]
else:
row = row[1:]
# Remove occasional 'r' or 'x' character prefix from year,
# I'm not sure what these specify.
year = row[0]
if not year[0].isdigit():
year = year[1:]
return Event(year=int(year),
latitude=float(row[1]),
longitude=float(row[2]),
depth=float(row[3]),
magnitude=float(row[4]))
| 5,342,858
|
def vertical_log_binning(p, data):
"""Create vertical log_binning. Used for peak sale."""
import math
import operator
index, value = zip(*sorted(data.items(), key=operator.itemgetter(1)))
bin_result = []
value = list(value)
bin_edge = [min(value)]
i = 1
while len(value) > 0:
num_to_bin = int(math.ceil(p * len(value)))
# print num_to_bin
edge_value = value[num_to_bin - 1]
bin_edge.append(edge_value)
to_bin = list(filter(lambda x: x <= edge_value, value))
bin_result += [i] * len(to_bin)
value = list(filter(lambda x: x > edge_value, value))
# print len(bin_result) + len(value)
i += 1
# print '\n'
bin_result_dict = dict(zip(index, bin_result))
bin_distri = Counter(bin_result_dict.values())
# print len(index), len(bin_result)
return bin_result_dict, bin_edge, bin_distri
| 5,342,859
|
def get_main_name(ext="", prefix=""):
"""Returns the base name of the main script. Can optionally add an
extension or prefix."""
return prefix + op.splitext(op.basename(__main__.__file__))[0] + ext
| 5,342,860
|
def _grad_block_to_band(op, grad):
"""
Gradient associated to the ``block_to_band`` operator.
"""
grad_block = banded_ops.band_to_block(
grad, op.get_attr("block_size"), symmetric=op.get_attr("symmetric"), gradient=True
)
return grad_block
| 5,342,861
|
def LSTM(nO, nI):
"""Create an LSTM layer. Args: number out, number in"""
weights = LSTM_weights(nO, nI)
gates = LSTM_gates(weights.ops)
return Recurrent(RNN_step(weights, gates))
| 5,342,862
|
def test_setup_polynomial_wrong_weight_shape(small_data):
"""Ensures that an exception is raised if input weights and data are different shapes."""
weights = np.ones(small_data.shape[0] + 1)
with pytest.raises(ValueError):
_algorithm_setup._setup_polynomial(small_data, weights=weights)
| 5,342,863
|
def test_mse_multiple_hidden_units():
"""Test the MSE loss w/ 2 hidden units."""
_check_ae(MSE_MAX_SCORE, hidden_units=(2,))
| 5,342,864
|
def get_token(host, port, headers, auth_data):
"""Return token for a user.
"""
url = api_url(host, port, '/Users/AuthenticateByName')
r = requests.post(url, headers=headers, data=auth_data)
return r.json().get('AccessToken')
| 5,342,865
|
def generateDateTime(s):
"""生成时间"""
dt = datetime.fromtimestamp(float(s)/1e3)
time = dt.strftime("%H:%M:%S.%f")
date = dt.strftime("%Y%m%d")
return date, time
| 5,342,866
|
def get_activation_function():
"""
Returns tf.nn activation function
"""
return ACTIVATION_FUNCTION
| 5,342,867
|
def doi_and_title_from_citation(citation):
"""
Gets the DOI from
a plaintext citation.
Uses a search to CrossRef.org to retrive paper DOI.
Parameters
----------
citation : str
Full journal article citation.
Example: Senís, Elena, et al. "CRISPR/Cas9‐mediated genome
engineering: An adeno‐associated viral (AAV) vector
toolbox. Biotechnology journal 9.11 (2014): 1402-1412.
Returns
-------
doi : str
"""
# Encode raw citation
citation = urllib_quote(citation)
# Search for citation on CrossRef.org to try to get a DOI link
api_search_url = 'http://search.labs.crossref.org/dois?q=' + citation
try:
response = requests.get(api_search_url).json()
except json.decoder.JSONDecodeError:
return None
resp = response[0]
doi = resp.get('doi')
title = resp.get('title')
if doi is None:
return None
# If crossref returns a http://dx.doi.org/ link, retrieve the doi from it
# and save the URL to pass to doi_to_info
if 'http://dx.doi.org/' in doi:
doi = doi.replace('http://dx.doi.org/', '')
doi = doi.strip()
return doi, title
| 5,342,868
|
def prep_ciphertext(ciphertext):
"""Remove whitespace."""
message = "".join(ciphertext.split())
print("\nciphertext = {}".format(ciphertext))
return message
| 5,342,869
|
def setup_sample_file(base_filename, args, num_threads=1):
"""
Return a sample data file, the ancestors file, a corresponding recombination rate
(a single number or a RateMap), a prefix to use for files, and None
"""
gmap = args.genetic_map
sd = tsinfer.load(base_filename + ".samples")
anc = tsinfer.generate_ancestors(
sd,
num_threads=num_threads,
path=base_filename + ".ancestors",
)
logger.info("GA done")
inference_pos = anc.sites_position[:]
match = re.search(r'(chr\d+)', base_filename)
if match or gmap is not None:
if gmap is not None:
try:
rho=float(gmap)
logger.info(f"Using rate {gmap} for the recombination rate")
except ValueError:
rho = intervals.read_hapmap(gmap)
logger.info(f"Using file from {gmap} for the recombination map")
else:
chr = match.group(1)
logger.info(f"Using {chr} from HapMapII_GRCh37 for the recombination map")
gmap = stdpopsim.get_species("HomSap").get_genetic_map(id="HapMapII_GRCh37")
if not gmap.is_cached():
gmap.download()
filename = os.path.join(gmap.map_cache_dir, gmap.file_pattern.format(id=chr))
rho = intervals.read_hapmap(filename)
else:
rho = 1e-8 # shouldn't matter what this is - it it relative to mismatch
#if np.any(d==0):
# w = np.where(d==0)
# raise ValueError("Zero recombination rates at", w, inference_pos[w])
return sd.path, anc.path, rho, "", None
| 5,342,870
|
def main():
"""Runs dir()."""
call = PROCESS_POOL.submit(call_dir)
while True:
if call.done():
result = call.result().decode()
print("Results: \n\n{}".format(result))
return result
| 5,342,871
|
def bacthing_predict_SVGPVAE_rotated_mnist(test_data_batch, vae, svgp,
qnet_mu, qnet_var, aux_data_train):
"""
Get predictions for test data. See chapter 3.3 in Casale's paper.
This version supports batching in prediction pipeline (contrary to function predict_SVGPVAE_rotated_mnist) .
:param test_data_batch: batch of test data
:param vae: fitted (!) VAE object
:param svgp: fitted (!) SVGP object
:param qnet_mu: precomputed encodings (means) of train dataset (N_train, L)
:param qnet_var: precomputed encodings (vars) of train dataset (N_train, L)
:param aux_data_train: train aux data (N_train, 10)
:return:
"""
images_test_batch, aux_data_test_batch = test_data_batch
_, w, h, _ = images_test_batch.get_shape()
# get latent samples for test data from GP posterior
p_m, p_v = [], []
for l in range(qnet_mu.get_shape()[1]): # iterate over latent dimensions
p_m_l, p_v_l, _, _ = svgp.approximate_posterior_params(index_points_test=aux_data_test_batch,
index_points_train=aux_data_train,
y=qnet_mu[:, l], noise=qnet_var[:, l])
p_m.append(p_m_l)
p_v.append(p_v_l)
p_m = tf.stack(p_m, axis=1)
p_v = tf.stack(p_v, axis=1)
epsilon = tf.random.normal(shape=tf.shape(p_m), dtype=tf.float64)
latent_samples = p_m + epsilon * tf.sqrt(p_v)
# predict (decode) latent images.
# ===============================================
# Since this is generation (testing pipeline), could add \sigma_y to images
recon_images_test_logits = vae.decode(latent_samples)
# Gaussian observational likelihood, no variance
recon_images_test = recon_images_test_logits
# Bernoulli observational likelihood
# recon_images_test = tf.nn.sigmoid(recon_images_test_logits)
# Gaussian observational likelihood, fixed variance \sigma_y
# recon_images_test = recon_images_test_logits + tf.random.normal(shape=tf.shape(recon_images_test_logits),
# mean=0.0, stddev=0.04, dtype=tf.float64)
# MSE loss for CGEN (here we do not consider MSE loss, ince )
recon_loss = tf.reduce_sum((images_test_batch - recon_images_test_logits) ** 2)
# report per pixel loss
K = tf.cast(w, dtype=tf.float64) * tf.cast(h, dtype=tf.float64)
recon_loss = recon_loss / K
# ===============================================
return recon_images_test, recon_loss
| 5,342,872
|
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""
Set up Strava Home Assistant config entry initiated through the HASS-UI.
"""
hass.data.setdefault(DOMAIN, {})
# OAuth Stuff
try:
implementation = await config_entry_oauth2_flow.async_get_config_entry_implementation(
hass=hass, config_entry=entry
)
except ValueError:
implementation = config_entry_oauth2_flow.LocalOAuth2Implementation(
hass,
DOMAIN,
entry.data[CONF_CLIENT_ID],
entry.data[CONF_CLIENT_SECRET],
OAUTH2_AUTHORIZE,
OAUTH2_TOKEN,
)
OAuth2FlowHandler.async_register_implementation(hass, implementation)
oauth_websession = config_entry_oauth2_flow.OAuth2Session(
hass, entry, implementation
)
await oauth_websession.async_ensure_token_valid()
# webhook view to get notifications for strava activity updates
def strava_update_event_factory(data, event_type=CONF_STRAVA_DATA_UPDATE_EVENT):
hass.bus.fire(event_type, data)
strava_webhook_view = StravaWebhookView(
oauth_websession=oauth_websession,
event_factory=strava_update_event_factory,
host=get_url(hass, allow_internal=False, allow_ip=False),
hass=hass,
)
hass.http.register_view(strava_webhook_view)
# event listeners
async def strava_startup_functions():
await renew_webhook_subscription(
hass=hass, entry=entry, webhook_view=strava_webhook_view
)
await strava_webhook_view.fetch_strava_data()
return True
def ha_start_handler(event):
"""
called when HA rebooted
i.e. after all webhook views have been registered and are available
"""
hass.async_create_task(strava_startup_functions())
def component_reload_handler(event):
"""called when the component reloads"""
hass.async_create_task(strava_startup_functions())
async def async_strava_config_update_handler():
"""called when user changes sensor configs"""
await strava_webhook_view.fetch_strava_data()
return
def strava_config_update_handler(event):
hass.async_create_task(async_strava_config_update_handler())
def core_config_update_handler(event):
"""
handles relevant changes to the HA core config.
In particular, for URL and Unit System changes
"""
if "external_url" in event.data.keys():
hass.async_create_task(
renew_webhook_subscription(
hass=hass, entry=entry, webhook_view=strava_webhook_view
)
)
if "unit_system" in event.data.keys():
hass.async_create_task(strava_webhook_view.fetch_strava_data())
# register event listeners
hass.data[DOMAIN]["remove_update_listener"] = []
# if hass.bus.async_listeners().get(EVENT_HOMEASSISTANT_START, 0) < 1:
hass.data[DOMAIN]["remove_update_listener"].append(
hass.bus.async_listen(EVENT_HOMEASSISTANT_START, ha_start_handler)
)
# if hass.bus.async_listeners().get(EVENT_CORE_CONFIG_UPDATE, 0) < 1:
hass.data[DOMAIN]["remove_update_listener"].append(
hass.bus.async_listen(EVENT_CORE_CONFIG_UPDATE, core_config_update_handler)
)
if hass.bus.async_listeners().get(CONF_STRAVA_RELOAD_EVENT, 0) < 1:
hass.data[DOMAIN]["remove_update_listener"].append(
hass.bus.async_listen(CONF_STRAVA_RELOAD_EVENT, component_reload_handler)
)
if hass.bus.async_listeners().get(CONF_STRAVA_CONFIG_UPDATE_EVENT, 0) < 1:
hass.data[DOMAIN]["remove_update_listener"].append(
hass.bus.async_listen(
CONF_STRAVA_CONFIG_UPDATE_EVENT, strava_config_update_handler
)
)
hass.data[DOMAIN]["remove_update_listener"] = [
entry.add_update_listener(strava_config_update_helper)
]
for component in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True
| 5,342,873
|
def normalize_boxes(boxes: List[Tuple], img_shape: Union[Tuple, List]) -> List[Tuple]:
"""
Transform bounding boxes back to yolo format
"""
img_height = img_shape[1]
img_width = img_shape[2]
boxes_ = []
for i in range(len(boxes)):
x1, y1, x2, y2 = boxes[i]
width = x2 - x1
height = y2 - y1
x_mid = x1 + 0.5 * width
y_mid = y1 + 0.5 * height
box = [
x_mid / img_width,
y_mid / img_height,
width / img_width,
height / img_height,
]
boxes_.append(box)
return boxes_
| 5,342,874
|
def _make_reference_filters(filters, ref_dimension, offset_func):
"""
Copies and replaces the reference dimension's definition in all of the filters applied to a dataset query.
This is used to shift the dimension filters to fit the reference window.
:param filters:
:param ref_dimension:
:param offset_func:
:return:
"""
reference_filters = []
for ref_filter in filters:
if ref_filter.field is ref_dimension:
# NOTE: Important to apply the offset function to the start and stop properties because the date math can
# become expensive over many rows
ref_filter = copy.copy(ref_filter)
ref_filter.start = offset_func(ref_filter.start)
ref_filter.stop = offset_func(ref_filter.stop)
reference_filters.append(ref_filter)
return reference_filters
| 5,342,875
|
def test_division():
"""
Divide two units.
"""
pc_cgs = cm_per_pc
km_cgs = cm_per_km
# Create symbols
pc_sym = Symbol("pc", positive=True)
km_sym = Symbol("km", positive=True)
s_sym = Symbol("s", positive=True)
# Create units
u1 = Unit("pc")
u2 = Unit("km * s")
u3 = u1 / u2
assert_true(u3.expr == pc_sym / (km_sym * s_sym))
assert_allclose_units(u3.base_value, pc_cgs / km_cgs, 1e-12)
assert_true(u3.dimensions == 1 / time)
| 5,342,876
|
def detect():
"""
Detects the shell the user is currently using. The logic is picked from
Docker Machine
https://github.com/docker/machine/blob/master/libmachine/shell/shell.go#L13
"""
shell = os.getenv("SHELL")
if not shell:
return None
if os.getenv("__fish_bin_dir"):
return "fish"
return os.path.basename(shell)
| 5,342,877
|
def test_sanity_pass_declarative(ini_settings, dbsession):
"""See database sanity check understands about relationships and don't deem them as missing column."""
engine = engine_from_config(ini_settings, 'sqlalchemy.')
conn = engine.connect()
trans = conn.begin()
Session = sessionmaker(bind=engine)
session = Session()
Base, DeclarativeTestModel = gen_declarative()
try:
Base.metadata.drop_all(engine, tables=[DeclarativeTestModel.__table__])
except sqlalchemy.exc.NoSuchTableError:
pass
Base.metadata.create_all(engine, tables=[DeclarativeTestModel.__table__])
try:
assert is_sane_database(Base, session) is True
finally:
Base.metadata.drop_all(engine)
| 5,342,878
|
def load_plane_dataset(name, num_points, flip_axes=False):
"""Loads and returns a plane dataset.
Args:
name: string, the name of the dataset.
num_points: int, the number of points the dataset should have,
flip_axes: bool, flip x and y axes if True.
Returns:
A Dataset object, the requested dataset.
Raises:
ValueError: If `name` an unknown dataset.
"""
try:
return {
'gaussian': GaussianDataset,
'crescent': CrescentDataset,
'crescent_cubed': CrescentCubedDataset,
'sine_wave': SineWaveDataset,
'abs': AbsDataset,
'sign': SignDataset,
'four_circles': FourCircles,
'diamond': DiamondDataset,
'two_spirals': TwoSpiralsDataset,
'checkerboard': CheckerboardDataset,
'rings': RingsDataset,
'8-gaussians':EightGaussiansDataset
}[name](num_points=num_points, flip_axes=flip_axes)
except KeyError:
raise ValueError('Unknown dataset: {}'.format(name))
| 5,342,879
|
def get_toxic(annotated_utterance, probs=True, default_probs=None, default_labels=None):
"""Function to get toxic classifier annotations from annotated utterance.
Args:
annotated_utterance: dictionary with annotated utterance, or annotations
probs: return probabilities or not
default: default value to return. If it is None, returns empty dict/list depending on probs argument
Returns:
dictionary with toxic probablilties, if probs == True, or toxic labels if probs != True
"""
default_probs = {} if default_probs is None else default_probs
default_labels = [] if default_labels is None else default_labels
return _get_etc_model(
annotated_utterance,
"toxic_classification",
probs=probs,
default_probs=default_probs,
default_labels=default_labels,
)
| 5,342,880
|
def compute_energy_lapkmode(X,C,l,W,sigma,bound_lambda):
"""
compute Laplacian K-modes energy in discrete form
"""
e_dist = ecdist(X,C,squared =True)
g_dist = np.exp(-e_dist/(2*sigma**2))
pairwise = 0
Index_list = np.arange(X.shape[0])
for k in range(C.shape[0]):
tmp=np.asarray(np.where(l== k))
if tmp.size !=1:
tmp = tmp.squeeze()
else:
tmp = tmp[0]
# print('length of tmp ', len(tmp))
# pairwise = pairwise - W[tmp,:][:,tmp].sum() # With potts values -1/0
nonmembers = np.in1d(Index_list,tmp,invert =True) # With potts values 0/1
pairwise = pairwise + W[tmp,:][:,nonmembers].sum()
E_kmode = compute_km_energy(l,g_dist.T)
print(E_kmode)
E = (bound_lambda)*pairwise + E_kmode
return E
| 5,342,881
|
def set_openid_cookie(response, openid):
"""Utility method to consistently set the openid cookie."""
print 'SETTING openid cookie to: %s' % openid
response.set_cookie('openid', openid,
expires=(datetime.datetime.now() + datetime.timedelta(days=3650)), # expires in 10 years
httponly=True)
| 5,342,882
|
def calc_2d_wave_map(wave_grid, x_dms, y_dms, tilt, oversample=2, padding=10, maxiter=5, dtol=1e-2):
"""Compute the 2D wavelength map on the detector.
:param wave_grid: The wavelength corresponding to the x_dms, y_dms, and tilt values.
:param x_dms: the trace x position on the detector in DMS coordinates.
:param y_dms: the trace y position on the detector in DMS coordinates.
:param tilt: the trace tilt angle in degrees.
:param oversample: the oversampling factor of the input coordinates.
:param padding: the native pixel padding around the edge of the detector.
:param maxiter: the maximum number of iterations used when solving for the wavelength at each pixel.
:param dtol: the tolerance of the iterative solution in pixels.
:type wave_grid: array[float]
:type x_dms: array[float]
:type y_dms: array[float]
:type tilt: array[float]
:type oversample: int
:type padding: int
:type maxiter: int
:type dtol: float
:returns: wave_map_2d - an array containing the wavelength at each pixel on the detector.
:rtype: array[float]
"""
os = np.copy(oversample)
xpad = np.copy(padding)
ypad = np.copy(padding)
# No need to compute wavelengths across the entire detector, slightly larger than SUBSTRIP256 will do.
dimx, dimy = 2048, 300
y_dms = y_dms + (dimy - 2048) # Adjust y-coordinate to area of interest.
# Generate the oversampled grid of pixel coordinates.
x_vec = np.arange((dimx + 2*xpad)*os)/os - (os - 1)/(2*os) - xpad
y_vec = np.arange((dimy + 2*ypad)*os)/os - (os - 1)/(2*os) - ypad
x_grid, y_grid = np.meshgrid(x_vec, y_vec)
# Iteratively compute the wavelength at each pixel.
delta_x = 0.0 # A shift in x represents a shift in wavelength.
for niter in range(maxiter):
# Assume all y have same wavelength.
wave_iterated = np.interp(x_grid - delta_x, x_dms[::-1], wave_grid[::-1]) # Invert arrays to get increasing x.
# Compute the tilt angle at the wavelengths.
tilt_tmp = np.interp(wave_iterated, wave_grid, tilt)
# Compute the trace position at the wavelengths.
x_estimate = np.interp(wave_iterated, wave_grid, x_dms)
y_estimate = np.interp(wave_iterated, wave_grid, y_dms)
# Project that back to pixel coordinates.
x_iterated = x_estimate + (y_grid - y_estimate)*np.tan(np.deg2rad(tilt_tmp))
# Measure error between requested and iterated position.
delta_x = delta_x + (x_iterated - x_grid)
# If the desired precision has been reached end iterations.
if np.all(np.abs(x_iterated - x_grid) < dtol):
break
# Evaluate the final wavelength map, this time setting out-of-bounds values to NaN.
wave_map_2d = np.interp(x_grid - delta_x, x_dms[::-1], wave_grid[::-1], left=np.nan, right=np.nan)
# Extend to full detector size.
tmp = np.full((os*(dimx + 2*xpad), os*(dimx + 2*xpad)), fill_value=np.nan)
tmp[-os*(dimy + 2*ypad):] = wave_map_2d
wave_map_2d = tmp
return wave_map_2d
| 5,342,883
|
def is_pipeline_variable(var: object) -> bool:
"""Check if the variable is a pipeline variable
Args:
var (object): The variable to be verified.
Returns:
bool: True if it is, False otherwise.
"""
# Currently Expression is on top of all kinds of pipeline variables
# as well as PipelineExperimentConfigProperty and PropertyFile
# TODO: We should deprecate the Expression and replace it with PipelineVariable
return isinstance(var, Expression)
| 5,342,884
|
def scan(infile: str = 'scanner_domains.txt', outfile: str = 'scanner_log.txt', signature: str = 'None'):
"""Scans an input list for metadata and, optionally, for the presence of a given signature and sends the results to
be written to a file.
:param infile: An optional string containing the path to the input data to use for this scan.
:param outfile: An optional string containing the path to the output file to write results to.
:param signature: An optional string containing the signature to check each input item for.
"""
pool = Pool(cpu_count() * 10)
batch = read_infile_threaded(infile)
batch_counter = 0
batch_count = batch.__len__()
for item in batch:
batch_counter += 1
print('Batch #{0} | Batches remaining: {1} | {2}% complete'.format(
batch_counter,
batch_count - batch_counter,
round(100 * (batch_counter / batch_count), 3),
))
responses = pool.map(get, item)
for response in responses:
sig_detect = 'N/A'
if response[0]:
doc_html = response[2]
doc_text = response[3]
metadata = metadata_parse(doc_html)
if signature != 'None':
sig_detect = signature_parse(doc_text, signature)
write_outfile(
'{0}, {1}, {2}, {3}'.format(response[1], metadata.get('title'), metadata.get('desc'), sig_detect),
outfile
)
else:
write_outfile('{0}, \'{1}\''.format(response[1], response[2]), outfile)
| 5,342,885
|
def export_data():
"""Exports data to a file"""
data = {}
data['adgroup_name'] = request.args.get('name')
if data['adgroup_name']:
data['sitelist'] = c['adgroups'].find_one({'name':data['adgroup_name']}, {'sites':1})['sites']
return render_template("export.html", data=data)
| 5,342,886
|
def thresh_bin(img, thresh_limit=60):
""" Threshold using blue channel """
b, g, r = cv2.split(img)
# mask = get_salient(r)
mask = cv2.threshold(b, 50, 255, cv2.THRESH_BINARY_INV)[1]
return mask
| 5,342,887
|
def get_aux():
"""Get the entire auxiliary stack. Not commonly used."""
@parser
def g(c: Cursor, a: Any):
return a, c, a
return g
| 5,342,888
|
def _destupidize_dict(mylist):
"""The opposite of _stupidize_dict()"""
output = {}
for item in mylist:
output[item['key']] = item['value']
return output
| 5,342,889
|
def test_J5(i):
""" Test a property of J from result 2 of the paper with a log base """
d = SD([1 / i] * i)
d.set_base('e')
assert J(d) == pytest.approx((i - 1) * (np.log(i) - np.log(i - 1)))
| 5,342,890
|
def print_on_stderr(msg, *param):
"""Print msg on stderr"""
print(msg.format(*param), file=sys.stderr)
| 5,342,891
|
def equipment_add(request, type_, id_=None):
"""Adds an equipment."""
template = {}
if request.method == 'POST':
form = EquipmentForm(request.POST)
if form.is_valid():
form.save(request.user, id_)
return redirect('settings_equipment')
template['form'] = form
elif id_:
template['form'] = EquipmentForm(instance=Equipment.objects.get(pk=id_))
else:
template['form'] = EquipmentForm()
return render(request, 'settings/equipment_add.html', template)
| 5,342,892
|
def sync_app(search_app, batch_size=None, post_batch_callback=None):
"""Syncs objects for an app to ElasticSearch in batches of batch_size."""
model_name = search_app.es_model.__name__
batch_size = batch_size or search_app.bulk_batch_size
logger.info(f'Processing {model_name} records, using batch size {batch_size}')
read_indices, write_index = search_app.es_model.get_read_and_write_indices()
num_source_rows_processed = 0
num_objects_synced = 0
total_rows = search_app.queryset.count()
it = search_app.queryset.values_list('pk', flat=True).iterator(chunk_size=batch_size)
batches = slice_iterable_into_chunks(it, batch_size)
for batch in batches:
objs = search_app.queryset.filter(pk__in=batch)
num_actions = sync_objects(
search_app.es_model,
objs,
read_indices,
write_index,
post_batch_callback=post_batch_callback,
)
emit_progress = (
(num_source_rows_processed + num_actions) // PROGRESS_INTERVAL
- num_source_rows_processed // PROGRESS_INTERVAL
> 0
)
num_source_rows_processed += len(batch)
num_objects_synced += num_actions
if emit_progress:
logger.info(
f'{model_name} rows processed: {num_source_rows_processed}/{total_rows} '
f'{num_source_rows_processed*100//total_rows}%',
)
logger.info(f'{model_name} rows processed: {num_source_rows_processed}/{total_rows} 100%.')
if num_source_rows_processed != num_objects_synced:
logger.warning(
f'{num_source_rows_processed - num_objects_synced} deleted objects detected while '
f'syncing model {model_name}',
)
| 5,342,893
|
def _parse_text(val, **options):
"""
:return: Parsed value or value itself depends on 'ac_parse_value'
"""
if val and options.get('ac_parse_value', False):
return parse_single(val)
return val
| 5,342,894
|
async def test_zeroconf_host_already_exists(hass: HomeAssistant) -> None:
"""Test hosts already exists from zeroconf."""
config_entry = MockConfigEntry(
domain=DOMAIN,
data={
"host": "1.1.1.1",
"name": "Envoy",
"username": "test-username",
"password": "test-password",
},
title="Envoy",
)
config_entry.add_to_hass(hass)
with patch(
"homeassistant.components.enphase_envoy.config_flow.EnvoyReader.getData",
return_value=True,
), patch(
"homeassistant.components.enphase_envoy.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_ZEROCONF},
data=zeroconf.ZeroconfServiceInfo(
properties={"serialnum": "1234"},
host="1.1.1.1",
),
)
await hass.async_block_till_done()
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
assert config_entry.unique_id == "1234"
assert config_entry.title == "Envoy 1234"
assert len(mock_setup_entry.mock_calls) == 1
| 5,342,895
|
def test_get_networkx_graph(dummy_project):
"""test_get_networkx_graph."""
dummy_project.get_networkx_graph()
# TODO
# assert...
| 5,342,896
|
def output_path(model, model_set):
"""Return path to model output directory
Parameters
----------
model : str
model_set : str
"""
path = model_path(model, model_set=model_set)
return os.path.join(path, 'output')
| 5,342,897
|
def create_balcony_ungrouped(bm, faces, prop):
"""Make a balcony on each face selection"""
for f in faces:
f.select = False
if not valid_ngon(f):
ngon_to_quad(bm, f)
normal = f.normal.copy()
split_faces = create_balcony_split(bm, f, prop)
for f in split_faces:
add_faces_to_map(bm, [f], FaceMap.BALCONY)
_, top = extrude_balcony(bm, f, prop.depth, normal)
if prop.has_railing:
prop.rail.show_extra_props = True
add_railing_to_balcony(bm, top, normal, prop)
bmesh.ops.delete(bm, geom=[f], context="FACES_ONLY")
| 5,342,898
|
def get_rotation_matrix(rotation_angles):
"""Get the rotation matrix from euler's angles
Parameters
-----
rotation_angles: array-like or list
Three euler angles in the order [sai, theta, phi] where
sai = rotation along the x-axis
theta = rotation along the y-axis
phi = rotation along the z-axis
Returns
-----
A rotation matrix of shape (3, 3)
Refrences
-----
Computing Euler angles from a rotation matrix by Gregory G. Slabaugh
https://www.gregslabaugh.net/publications/euler.pdf
"""
sai = rotation_angles[0] # s
theta = rotation_angles[1] # t
phi = rotation_angles[2] # p
# find all the required sines and cosines
cs = np.cos(sai)
ct = np.cos(theta)
cp = np.cos(phi)
ss = np.sin(sai)
st = np.sin(theta)
sp = np.sin(phi)
# contruct the rotation matrix along the x-axis
rotation_matrix = np.array([
[ct*cp, ss*st*cp-cs*sp, cs*st*cp+ss*sp],
[ct*sp, ss*st*sp+cs*cp, cs*st*sp-ss*cp],
[ -st, sp*ct, cp*ct]
])
return rotation_matrix
| 5,342,899
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.