content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def use_scratch_dir(original_wf, scratch_dir):
"""
For all RunVaspCustodian tasks, add the desired scratch dir.
Args:
original_wf (Workflow)
scratch_dir (path): Path to the scratch dir to use. Supports env_chk
Returns:
Workflow
"""
for idx_fw, idx_t in get_fws_and_tasks(original_wf, task_name_constraint="RunVaspCustodian"):
original_wf.fws[idx_fw].tasks[idx_t]["scratch_dir"] = scratch_dir
return original_wf | 0e45b317d477b84fe92359f10a6294d86b5ff670 | 3,627,400 |
from typing import cast
import tqdm
def infer_tests_wrapper(
weight_path: str, model_name: str, dataset: COCO_Dataset, test_name: str,
handler_constructor: type,
data_dump_dir: str=None, video_dump_dir: str=None, img_dump_dir: str=None,
skip_if_data_dump_exists: bool=False,
show_preview: bool=False,
show_pbar: bool=True
):
"""
Usage:
infer_tests_wrapper(*args, **kwargs)(infer_func)(*infer_func_args, **infer_func_kwargs)
The following are reserved parameters:
'weight_path', 'model_name', 'dataset', 'test_name',
'accumulate_pred_dump', 'stream_writer', 'leave_stream_writer_open'
These reserved parameters should be keyword parameters in infer_func, but not specified
in infer_func_kwargs.
Ideally these reserved parameters should be last in the list of parameters in infer_func without a default value.
This is to allow freedom for remaining parameters to be specified as non-keyward arguments.
TODO: Add functionality for calculating error during inference. (Will need to pass filtered GT handler.)
"""
def _wrapper(infer_func):
def _wrapper_inner(*args, **kwargs):
# Check/Adjust Parameters
if isinstance(weight_path, (str, dict)):
weight_paths = [weight_path]
elif isinstance(weight_path, (tuple, list)):
assert all([type(part) in [str, dict] for part in weight_path])
for part in weight_path:
if isinstance(part, dict):
for key, val in part.items():
assert isinstance(val, str)
weight_paths = weight_path
else:
raise TypeError
if isinstance(model_name, str):
model_names = [model_name]
elif isinstance(model_name, (tuple, list)):
assert all([type(part) is str for part in model_name])
model_names = model_name
else:
raise TypeError
assert len(weight_paths) == len(model_names)
if isinstance(dataset, COCO_Dataset):
datasets = [dataset]
elif isinstance(dataset, (tuple, list)):
assert all([isinstance(part, COCO_Dataset) for part in dataset])
datasets = dataset
else:
raise TypeError
if isinstance(test_name, str):
test_names = [test_name]
elif isinstance(test_name, (tuple, list)):
assert all([type(part) is str for part in test_name])
test_names = test_name
else:
raise TypeError
assert len(datasets) == len(test_names)
# Prepare Dump Directory
if data_dump_dir is not None:
make_dir_if_not_exists(data_dump_dir)
# delete_all_files_in_dir(data_dump_dir, ask_permission=True)
if video_dump_dir is not None:
make_dir_if_not_exists(video_dump_dir)
# delete_all_files_in_dir(video_dump_dir, ask_permission=True)
if img_dump_dir is not None:
make_dir_if_not_exists(img_dump_dir)
# delete_all_files_in_dir(img_dump_dir, ask_permission=True)
stream_writer = cast(StreamWriter, None)
# Accumulate/Save Inference Data On Tests
total_images = sum([len(dataset.images) for dataset in datasets])
test_pbar = tqdm(total=total_images*len(model_names), unit='image(s)', leave=True) if show_pbar else None
reserved_params = [
'weight_path', 'model_name', 'dataset', 'test_name',
'accumulate_pred_dump', 'stream_writer', 'leave_stream_writer_open'
]
for param in reserved_params:
assert param not in kwargs, f'{param} already exists in kwargs'
assert param in infer_func.__annotations__, f"{infer_func.__name__} needs to accept a {param} keyword argument to be wrapped by infer_tests_wrapper"
for weight_path0, model_name0 in zip(weight_paths, model_names):
video_save_path = f'{video_dump_dir}/{model_name0}.avi' if video_dump_dir is not None else None
data_dump_save = f'{data_dump_dir}/{model_name0}.json' if data_dump_dir is not None else None
if data_dump_save is not None and file_exists(data_dump_save) and skip_if_data_dump_exists:
if test_pbar is not None:
for dataset0, test_name0 in zip(datasets, test_names):
test_pbar.update(len(dataset0.images))
continue
if stream_writer is None:
stream_writer = StreamWriter(
show_preview=show_preview,
video_save_path=video_save_path,
dump_dir=img_dump_dir
)
elif video_save_path is not None:
stream_writer.video_writer._save_path = video_save_path
if img_dump_dir is not None:
model_img_dump_dir = f'{img_dump_dir}/{model_name0}'
make_dir_if_not_exists(model_img_dump_dir)
else:
model_img_dump_dir = None
data = handler_constructor()
assert isinstance(data, BasicLoadableHandler)
assert hasattr(data, '__add__')
# if video_dump_dir is not None:
# video_save_path = f'{video_dump_dir}/{model_name0}.avi'
# else:
# video_save_path = None
for dataset0, test_name0 in zip(datasets, test_names):
if test_pbar is not None:
test_pbar.set_description(f'{model_name0} {test_name0}')
if img_dump_dir is not None:
test_img_dump_dir = f'{model_img_dump_dir}/{test_name0}'
make_dir_if_not_exists(test_img_dump_dir)
stream_writer.dump_writer._save_dir = test_img_dump_dir
kwargs['weight_path'] = weight_path0
kwargs['model_name'] = model_name0
kwargs['dataset'] = dataset0
kwargs['test_name'] = test_name0
kwargs['accumulate_pred_dump'] = data_dump_dir is not None
kwargs['stream_writer'] = stream_writer
kwargs['leave_stream_writer_open'] = True
if data_dump_dir is not None:
data0 = infer_func(*args, **kwargs)
assert isinstance(data0, handler_constructor), f"Encountered dump data of type {type(data0).__name__}. Expected {handler_constructor.__name__}."
data += data0
else:
infer_func(*args, **kwargs)
if test_pbar is not None:
test_pbar.update(len(dataset0.images))
if data_dump_dir is not None:
data.save_to_path(data_dump_save, overwrite=True)
if stream_writer is not None and stream_writer.video_writer is not None and stream_writer.video_writer.recorder is not None:
stream_writer.video_writer.recorder.close()
stream_writer.video_writer.recorder = None
if test_pbar is not None:
test_pbar.close()
if stream_writer is not None:
del stream_writer
return _wrapper_inner
return _wrapper | 8df325dc6ae58f520d479c6b1046ace0e186449b | 3,627,401 |
def auto_peak_finder(prominence, x_data, y_data):
"""
automatic peak finding routine which finds peaks given a user supplied prominence, and presents the peaks until user
is happy
:param prominence: height from noise to tip of peaks
:param x_data: x_data as np array
:param y_data: y_data as np array
:return: peak_info is a dict containing information about the peaks e.g. centers etc. also prominence used to find
these peaks
"""
while True:
peak_info = find_cents(prominence, y_data, find_all=True)
plt.plot(x_data, y_data)
peak_coordinates = [x_data[ind] for ind in peak_info['center_indices']]
for xc in peak_coordinates:
plt.axvline(x=xc)
print(
f"Peak finder requires user input, please look at the following plot with prominence={prominence}")
plt.show()
ans = input(
"If you are happy with the plot, type y. If not then please type a new prominence ")
if ans == 'y':
break
else:
try:
prominence = float(ans)
except ValueError:
print("You entered an incorrect answer! Trying again...")
plt.close()
return peak_info, prominence | 1e23d23c0edc679a3429b3ef0f91b905c34e91d7 | 3,627,402 |
from typing import OrderedDict
def dataframe_reg_as_panel(df,against='index',regfunc='OLS'):
"""
Perform regression for each of the columns of the dataframe, and return
the regression result as panel.
Parameters:
-----------
against: the variable used as the independent variable in the regression,
default is the index values of the dataframe.
regfunc: the regression function used to do the regression.
"""
if against == 'index':
against = df.index.values
if regfunc == 'OLS':
regfunc = lambda yxlist:linreg_OLS_2varyx(yxlist[0],yxlist[1])[0]
dic = OrderedDict()
for varname in df.columns:
dic[varname] = regfunc([df[varname].values,against])
return pa.Panel(dic) | fac0d6c768b4c8bd9fbafad4ba5e3216e539f28d | 3,627,403 |
import threading
def serve_pyr(name, url, root_path, config='config.ini'):
"""Serve the Pyramid app locally in a separate thread that forks a
subprocess that invokes the pserve executable. Return a function that stops
serving the Pyramid app.
"""
process = _fork_server_process(url, root_path, config=config)
thread = threading.Thread(
target=_monitor_server_process,
kwargs={'process': process, 'name': name,},
daemon=True)
thread.start()
logger.info('%s should be being served at %s', name, url)
def stop_serving():
logger.info('Shutting down %s at %s.', name, url)
process.terminate()
logger.info('%s at %s should be shut down.', name, url)
return stop_serving | a21e182b27b424bb7bf0e669209391a429d81541 | 3,627,404 |
def order_line_needs_automatic_fulfillment(line_data: OrderLineInfo) -> bool:
"""Check if given line is digital and should be automatically fulfilled."""
digital_content_settings = get_default_digital_content_settings()
default_automatic_fulfillment = digital_content_settings["automatic_fulfillment"]
content = line_data.digital_content
if not content:
return False
if default_automatic_fulfillment and content.use_default_settings:
return True
if content.automatic_fulfillment:
return True
return False | baa85396d3da44670ce634353db55a589ca27387 | 3,627,405 |
def docker_setup(project_path: str) -> bool:
""" Tries to find evidence of a docker setup in the project. """
_file_names = ['[Dd]ockerfile', '[Dd]ocker-compose.yml']
for name in _file_names:
_findings = search_filename(
base_folder=project_path,
file_name=name,
recursive_flag=True
)
if _findings:
return True
return False | 6f1e18d83cbcfdac21105b89a6dedb8c17e1d026 | 3,627,406 |
def prac_q_count(year, month, day, hour, qtemplate):
""" Fetch the practice count for the given time/qtemplate or return
False if not found. Can be used when deciding if to INSERT or UPDATE.
Note: may return 0 if count is zero, is not same as not exist (False)
"""
sql = """SELECT "qtemplate", "hour", "day", "month", "year", "number", "when"
FROM stats_prac_q_course
WHERE hour=%s
AND month=%s
AND day=%s
AND year=%s
AND qtemplate=%s;"""
params = (hour, month, day, year, qtemplate)
res = DB.run_sql(sql, params)
if not res or len(res) == 0:
return False
return int(res[0][0]) | adc90d18d8f55ab76a4f3939b622f6ba30a86c66 | 3,627,407 |
import getpass
import base64
def get_headers(gargs):
"""Get the required headers. """
headers = {
'Content-type': 'application/json'
}
if gargs.no_passwd:
return headers
if gargs.passwdfile is not None:
passwd = open(gargs.passwdfile, "r").read().strip()
auth_str = "%s:%s" % (gargs.user, passwd)
elif gargs.auth is not None:
auth_str = gargs.auth
else:
passwd = getpass.getpass("Password: ")
auth_str = "%s:%s" % (gargs.user, passwd)
auth = base64.b64encode(auth_str.encode('utf-8'))
headers['Authorization'] = 'Basic %s' % auth.decode('utf-8')
return headers | 04d7a9da9e30fbfdf86b0a09d23dc178a0615d27 | 3,627,408 |
import torch
def highest_prob_class(y_predicted: torch.Tensor, y_true: torch.Tensor, **kwargs) -> _TensorOrTensors:
"""
Get the index of class with highest probability.
"""
y_predicted = y_predicted.max(dim=-1)[1]
assert y_true.shape == y_predicted.shape
return y_predicted, y_true | 8107c2f960e22d7da9c042242365ebb1f2e96722 | 3,627,409 |
def vectorized_local_axes(three_atoms_coords):
"""
Takes as an argument a Nx3x3 block of reference atom coordinates to construct N local axes systems (Nx3x3)
"""
u12 = vectorized_unit_vector(three_atoms_coords[:, [0,1], :])
u23 = vectorized_unit_vector(three_atoms_coords[:, [1,2], :])
if np.any(np.einsum('ij,ij->i', u12,u23)) > 1.0:
print("co-linear atoms detected")
u23_x_u12 = vectorized_unit_cross_product(u23, u12)
u12_x_u23_x_u12 = vectorized_unit_cross_product(u12, u23_x_u12)
z = u12
y = u12_x_u23_x_u12
x = vectorized_unit_cross_product(y, z)
local_axes = np.transpose(np.array([x, y, z]), (1,0,2))
return local_axes | 2f19924b516ce76fc4f769e675c78f0e5ddfd3c1 | 3,627,410 |
def set_option(option, value):
"""Set a single option using the flat format, i.e ``section.option``
Parameters
----------
option: str
Option name in the ``section.option`` format
value:
Value to set
Example
-------
.. ipython:: python
@suppress
from xoa import set_option
set_option('plot.cmapdiv', 'cmo.balance');
"""
return set_options(None, **{option: value}) | a5cb71e60da6d80bf0409c91945dd31a9e44be74 | 3,627,411 |
def dict_to_capabilities(caps_dict):
"""Convert a dictionary into a string with the capabilities syntax."""
return ','.join("%s:%s" % tpl for tpl in caps_dict.items()) | 12a321ba5f337f8da116ec7adec68d717fbc776f | 3,627,412 |
import numpy
def delta(flag, F, K, t, r, sigma):
"""Returns the Black delta of an option.
:param flag: 'c' or 'p' for call or put.
:type flag: str
:param F: underlying futures price
:type F: float
:param K: strike price
:type K: float
:param t: time to expiration in years
:type t: float
:param r: annual risk-free interest rate
:type r: float
:param sigma: volatility
:type sigma: float
:returns: float
>>> F = 49
>>> K = 50
>>> r = .05
>>> t = 0.3846
>>> sigma = 0.2
>>> flag = 'c'
>>> v1 = delta(flag, F, K, t, r, sigma)
>>> v2 = 0.45107017482201828
>>> abs(v1-v2) < .000001
True
"""
D1 = d1(F, K, t, r, sigma)
if flag == 'p':
return - numpy.exp(-r*t) * cnd(-D1)
else:
return numpy.exp(-r*t) * cnd(D1) | b3439872aa41e697d42d1e2ef503996e82651db8 | 3,627,413 |
def main(X, Y, X0, Y0, depth, omegaX, omegaY, omegaZ, ax, ay, az, opening, nu, verbose=False):
"""
Test CDM with test input parameters
X0, Y0, depth: define the position of the dislocation
omegaX, omegaY, omegaZ: define the orientation (clockwise rotations) of the dislocation
ax, ay, az: define the semi-axes of the dislocation in the "body fixed" coordinates
length of the tensile component of the Burgers Vector (length of the dislocation)
"""
if verbose:
print("X = ", X)
print("Y = ", Y)
print("X0, Y0, depth = ", X0, Y0, depth)
print("omegaX, omegaY, omegaZ = ", omegaX, omegaY, omegaZ)
print("ax, ay, az = ", ax, ay, az)
print("opening = ", opening)
print("nu = ", nu)
# Call CDM with test parameters; return displacements (de, dn, dv) and error status for
de, dn, dv, ierr = CDM(X, Y, X0, Y0, depth, omegaX, omegaY, omegaZ, ax, ay, az, opening, nu)
# compare output against the matlab generated output
# open the matlab generated file
if verbose:
print("Differences from reference")
print(" X Y de dn dv")
for idx in range(len(X)):
m = [mde[idx], mdn[idx], mdv[idx]]
if verbose:
print("{0: 5.2f} {1: 5.2f} {2:12.6e} {3:12.6e} {4:12.6e}".format(
X[idx], Y[idx], abs((m[0]-de[idx])/m[0]), abs((m[1]-dn[idx])/m[1]),
abs((m[2]-dv[idx])/m[2])
))
if( (abs((m[0] - de[idx])/m[0]) > 1.e-6) or
(abs((m[1] - dn[idx])/m[1]) > 1.e-6) or
(abs((m[2] - dv[idx])/m[2]) > 1.e-6)
):
# if there are any differences print them otherwise be silent
print(idx, X[idx], Y[idx], m[0]-de[idx], m[1]-dn[idx], m[2]-dv[idx])
break
return 0 | 40a887b8282cd29c7b63c15882eaf4191a0658f4 | 3,627,414 |
def extend_view(response, fetch_page_data, fetch_multimedia, raise_errors):
"""
Extends view query results with pronunciation URLs, multimedia URLs, and extended hanja
information by scraping the dictionary website.
This function modifies the response in-place, and returns the modified object.
See the [documentation](https://krdictpy.readthedocs.io/en/stable/return_types/#viewresponse)
for details.
- ``response``: The word search results to extend.
- ``fetch_page_data``: Whether page data (URLs and hanja information) should be scraped.
- ``fetch_multimedia``: Whether multimedia URLs should be scraped.
- ``raise_errors``: Whether errors that occur during scraping should be raised or ignored.
"""
if fetch_page_data:
_extend_response(*send_extend_request('view', response, raise_errors))
if fetch_multimedia:
_extend_view_multimedia(response, raise_errors)
return response | 0c35341fb146144f8bc3efa4a98d89419d625e1a | 3,627,415 |
def mean(vector):
"""
Calculates the arithmetic mean of the given vector.
Args:
-----
vector : list
A non-empty list/array of numbers to be averaged.
Returns:
--------
mean : float
The arithmetic mean of the given vector.
"""
return sum(vector) / len(vector) | 71bd9a37cb0bfb166632866d0a29be9b14236364 | 3,627,416 |
import os
import subprocess
def main():
"""
Composes a BLAST command from a (DataTable) AJAX request, forwarded from
molmod endpoint /blast_run, and executes the command using subprocess.
"""
#
# Format the BLAST command - return an error if the input form is missing
# required values.
#
try:
form = request.json
field_names = ['qacc', 'stitle', 'pident', 'qcovhsp', 'evalue']
# Collect BLAST cmd items into list
cmd = ['blastn']
cmd += ['-perc_identity', unlist(form['min_identity'])]
# Query cover per High-Scoring Pair
cmd += ['-qcov_hsp_perc', unlist(form['min_qry_cover'])]
cmd += ['-db', os.path.join('/blastdbs', form['db'])]
cmd += ['-outfmt', f'6 {" ".join(field_names)}']
# Only report best High Scorting Pair per query/subject pair
cmd += ['-max_hsps', '1']
cmd += ['-num_threads', '4']
except KeyError as err:
# pylint: disable=no-member
APP.logger.error(f'Command formatting resulted in: {err}')
return str(err), 500
#
# Execute BLAST search using subprocess.
#
APP.jobs += 1
APP.logger.debug(f'Job count is {APP.jobs}')
try:
with subprocess.Popen(cmd, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as process:
# Send seq from sform to stdin, read output & error until 'eof'
stdout, stderr = process.communicate(
input="\n".join(form['sequence']).encode()
)
# Make sure to catch everything so that workers can keep working
# pylint: disable=broad-except
except Exception as ex:
# pylint: disable=no-member
APP.logger.error("Subprocess error: %s", ex)
return str(ex), 500
else:
# If BLAST returns success response
if process.returncode == 0:
# pylint: disable=no-member
APP.logger.debug('BLAST success')
#
# Format results as JSON, to make it easier to parse.
#
raw = stdout.decode()
results = []
for row in raw.split('\n'):
row = row.strip()
if not row:
continue
# Format as dictionary using list of field names,
# transforming numerical strings into numbers
result = {}
for i, field in enumerate(row.split()):
try:
value = float(field)
except ValueError:
value = field
result[field_names[i]] = value
results += [result]
return jsonify(data=results)
# If BLAST returns error (even if subprocess worked),
# e.g. 2: Error in BLAST database
err = stderr.decode()
# pylint: disable=no-member
APP.logger.error("%s", err.strip())
return err, 500
# Decrease job count, irrespective of success or failure
finally:
APP.jobs -= 1
APP.logger.debug(f'Final job count is {APP.jobs}') | 71542a32a5a4dd8ee12bb588e18a0c815ef0fcec | 3,627,417 |
import math
def get_hamming_window(w, h, rot, sx, sy, out_size_w, out_size_h):
"""
A hamming window map
:param w: init ellipse w
:param h: init ellipse h
:param rot: rotation angle(rad)
:param sx: center x coordinate of the window
:param sy: center y-coordinate of the window
:param out_size_w: output window size width
:param out_size_h: output window size height
:return:
"""
alpha = 1
w = math.floor(w * alpha)
h = math.floor(h * alpha)
ham_window = np.outer(np.hamming(h), np.hamming(w))
sx -= w / 2
sy -= h / 2
aa = w / 2
bb = h / 2 # rot center
cc = math.cos(rot)
ss = math.sin(rot)
# """
# we first do the rotation before the original _crop_hwc
# [ cos(c), -sin(c), a - a*cos(c) + b*sin(c)] [ 1, 0, a][ cos(c), -sin(c), 0][ 1, 0, -a]
# [ sin(c), cos(c), b - b*cos(c) - a*sin(c)] = [ 0, 1, b][ sin(c), cos(c), 0][ 0, 1, -b]
# [ 0, 0, 1] [ 0, 0, 1][ 0, 0, 1][ 0, 0, 1]
# """
mapping_rot = np.array([[cc, -ss, aa - aa * cc + bb * ss + sx],
[ss, cc, bb - bb * cc - aa * ss + sy],
[0, 0, 1]]).astype(np.float)
project = np.array([[1, 0, 0],
[0, 1, 0]]).astype(np.float)
new_ham_window = cv2.warpAffine(ham_window, project @ mapping_rot, (out_size_w, out_size_h),
)
# plt.close('all')
# plt.imshow(new_ham_window)
# plt.show()
# plt.close('all')
return new_ham_window | 4611eb52ba8abd00c1c78a78fb60b62ae13a5755 | 3,627,418 |
def reconstructimage(lpyramid, f):
""" Reconstruct image from Laplacian pyramid
Args:
lpyramid: Laplacian pyramid
f: 2d filter kernel
Returns:
Reconstructed image as (H, W) np.array clipped to [0, 1]
"""
#
# You code here
#
lpyramid_flip = np.flip(lpyramid)
gpyramid = []
for count, lpyramid_i in enumerate(lpyramid_flip):
if count == 0:
gpyramid.append(lpyramid_i)
continue
upsample = upsample2(gpyramid[count - 1], f)
gpyramid_i = upsample + lpyramid_i
gpyramid.append(gpyramid_i)
return gpyramid[-1] | 2cc70a899ab5694846010fcd6980e245d25671f1 | 3,627,419 |
def convert_to_pressure_levels(mv, plevs, dataset, var, season):
"""
Given either test or reference data with a z-axis,
convert to the desired pressure levels.
"""
mv_plv = mv.getLevel()
# var(time,lev,lon,lat) convert from hybrid level to pressure
if mv_plv.long_name.lower().find('hybrid') != -1:
extra_vars = ['hyam', 'hybm', 'PS']
hyam, hybm, ps = dataset.get_extra_variables_only(var, season, extra_vars=extra_vars)
mv_p = hybrid_to_plevs(mv, hyam, hybm, ps, plevs)
# levels are pressure levels
elif mv_plv.long_name.lower().find('pressure') != -1 or mv_plv.long_name.lower().find('isobaric') != -1:
mv_p = pressure_to_plevs(mv, plevs)
else:
raise RuntimeError(
"Vertical level is neither hybrid nor pressure. Aborting.")
return mv_p | b4f14f9a62356f3e3718fce7a71f8c3efc2d347e | 3,627,420 |
def matrix2xzy_extrinsic(rotation_matrices: np.ndarray) -> np.ndarray:
"""
Ry(k3) @ Rz(k2) @ Rx(k1) = [[c2c3, -c1s2c3+s1s3, s1s2c3+c1s3],
[s2, c1c2, -s1c2],
[-c2s3, c1s2s3+s1c3, -s1s2s3+c1c3]]
"""
rotation_matrices = rotation_matrices.reshape((-1, 3, 3))
angles_radians = np.zeros((rotation_matrices.shape[0], 3))
# Angle 2 can be taken directly from matrices
angles_radians[:, 1] = np.arcsin(rotation_matrices[:, 1, 0])
# Gimbal lock case (c2 = 0)
tolerance = 1e-4
# Find indices where this is the case
gimbal_idx = np.abs(rotation_matrices[:, 0, 0]) < tolerance
# Calculate angle 1 and set angle 3 = 0 for those indices
r32 = rotation_matrices[gimbal_idx, 2, 1]
r33 = rotation_matrices[gimbal_idx, 2, 2]
angles_radians[gimbal_idx, 0] = np.arctan2(r32, r33)
angles_radians[gimbal_idx, 2] = 0
# Normal case (s2 > 0)
idx = np.invert(gimbal_idx)
r23 = rotation_matrices[idx, 1, 2]
r22 = rotation_matrices[idx, 1, 1]
r31 = rotation_matrices[idx, 2, 0]
r11 = rotation_matrices[idx, 0, 0]
angles_radians[idx, 0] = np.arctan2(-r23, r22)
angles_radians[idx, 2] = np.arctan2(-r31, r11)
# convert to degrees
euler_angles = np.rad2deg(angles_radians)
return euler_angles | 9371a5b459f9c92354752e94a9fb292add1dca96 | 3,627,421 |
from datetime import datetime
def do_auth():
"""
perform authentication using API_KEY,
stores token and stored timestamp in integration context,
retrieves new token when expired
"""
auth = demisto.getIntegrationContext()
now_epoch = int(datetime.today().strftime('%s'))
if ("token" in auth or "stored" in auth) and int(auth['stored']) + (60 * 60 * 4) > int(now_epoch):
# if integration context contains token and stored and the token is not expired then return token
return auth['token']
else:
# fetch new token
r = http_request('GET', 'IdToken', token=API_KEY)
if r.get('token') is None:
return_error("Authorization failed")
demisto.setIntegrationContext({
'token': r['token'],
'stored': now_epoch
})
return r['token'] | 875c4dd44c2e253b62382a1ffb37ab0fbc5c3333 | 3,627,422 |
async def write_credential_def(controller, schema_id):
"""
Writes Credential Definition to the ledger
Parameters:
----------
controller: AriesController
The aries_cloudcontroller object
Schema id
Returns:
-------
write_cred_response :dict
"""
write_cred_response = await controller.definitions.write_cred_def(schema_id)
if not write_cred_response:
raise HTTPException(
status_code=404,
detail="Something went wrong. Could not write credential definition to the ledger",
)
return write_cred_response | 4825dc30aa0ecf4097b654d7119fb2a9be41818a | 3,627,423 |
def guess_cloudwatch_log_group(alarm_name):
"""
Guess the name of the CloudWatch log group most likely to contain
logs about the error.
"""
if alarm_name.startswith("loris-"):
return "platform/loris"
if alarm_name.startswith("catalogue-api-romulus"):
return "ecs/catalogue_api_gw-romulus"
if alarm_name.startswith("catalogue-api-remus"):
return "ecs/catalogue_api_gw-remus"
if alarm_name.startswith("lambda-") and alarm_name.endswith("-errors"):
# e.g. lambda-ecs_ec2_instance_tagger-errors
lambda_name = alarm_name[len("lambda-") : -len("-errors")]
return f"/aws/lambda/{lambda_name}"
raise ValueError(f"Unable to guess log group name for alarm name={alarm_name!r}") | 94822c16ce6c84b154be40581eb74e25e2cbe898 | 3,627,424 |
def update_attrs(orig, keys, override):
"""Utility function for altering and adding the specified attributes to a particular repository rule invocation.
This is used to make a rule reproducible.
Args:
orig: dict of actually set attributes (either explicitly or implicitly)
by a particular rule invocation
keys: complete set of attributes defined on this rule
override: dict of attributes to override or add to orig
Returns:
dict of attributes with the keys from override inserted/updated
"""
result = {}
for key in keys:
if getattr(orig, key) != None:
result[key] = getattr(orig, key)
result["name"] = orig.name
result.update(override)
return result | 82498f78604924c281da1fab372a871d5f224010 | 3,627,425 |
def GetCreateLabelsFlag(extra_message='', labels_name='labels',
validate_values=True):
"""Makes the base.Argument for --labels flag."""
value_type = VALUE_FORMAT_VALIDATOR if validate_values else None
format_help = [KEY_FORMAT_HELP]
if validate_values:
format_help.append(VALUE_FORMAT_HELP)
help_parts = ['List of label KEY=VALUE pairs to add.',
' '.join(format_help)]
if extra_message:
help_parts.append(extra_message)
return base.Argument(
'--{}'.format(labels_name),
metavar='KEY=VALUE',
type=arg_parsers.ArgDict(
key_type=KEY_FORMAT_VALIDATOR, value_type=value_type),
action=arg_parsers.UpdateAction,
help=('\n\n'.join(help_parts))) | aa8e5e6f40d2a9ab53668ff16b5c9227bca42541 | 3,627,426 |
import six
def unflatten(d, splitter='tuple', inverse=False):
"""Unflatten dict-like object.
Parameters
----------
d : dict-like object
The dict that will be unflattened.
splitter : {'tuple', 'path', Callable}
The key splitting method. If a Callable is given, the Callable will be
used to split.
'tuple': Use each element in the tuple key as the key of the unflattened dict.
'path': Use `pathlib.Path.parts` to split keys.
inverse : bool
Whether you want to invert the key and value before flattening.
Returns
-------
unflattened_dict : dict
"""
if isinstance(splitter, str):
splitter = SPLITTER_DICT[splitter]
unflattened_dict = {}
for flat_key, value in six.viewitems(d):
if inverse:
flat_key, value = value, flat_key
key_tuple = splitter(flat_key)
nested_set_dict(unflattened_dict, key_tuple, value)
return unflattened_dict | 44100cfcc8d3cc399f94bd6d3f6e7e780d7a1012 | 3,627,427 |
def get_mapped_to_elements(mapper):
"""
The mapper list contains all the element names that have been mapped to by
other elements
"""
mapper_list = []
for element in mapper:
for list_element in mapper[element]:
if list_element not in mapper_list:
mapper_list.append(list_element)
return mapper_list | 6f8d940997f4b871b6934db0592663448343e031 | 3,627,428 |
def psd(time, rate, norm='leahy'):
"""
Returns power spectral density from a (real) time series
with Leahy normalization.
Args:
time: array of times (evenly binned).
rate: array of rate in counts/s.
Kwargs:
norm: Normalization (only Leahy for now).
Returns:
f: array of frequencies.
p: power spectrum.
History:
v1: Initial python implementation. Riccardo Campana, 2014.
"""
# Sanity checks
assert len(time) == len(rate), 'ERROR: Time and rate arrays with different dimensions'
# Check that time array is evenly spaced (also taking into account rounding errors)
assert np.std(np.diff(time)) <= 1e-12, 'ERROR: Time array is not evenly spaced'
dt = time[1]-time[0]
# Total number of photons
counts = rate*dt
n_phot = np.sum(counts)
print("Total number of photons: {}".format(n_phot))
# Number of points
n = len(counts)
# Fourier transform
f = np.fft.rfftfreq(n, d=dt)
a = np.fft.rfft(counts)
if norm == 'leahy':
psd = 2./n_phot * np.abs(a)**2
return f[1:], psd[1:] | 878dda8fc3821560335d7b14fc0d8863b2f0f84e | 3,627,429 |
async def session_fetch(
handle: SessionHandle, category: str, name: str, for_update: bool = False
) -> EntrySetHandle:
"""Fetch a row from the Store."""
category = encode_str(category)
name = encode_str(name)
return await do_call_async(
"askar_session_fetch",
handle,
category,
name,
c_int8(for_update),
return_type=EntrySetHandle,
) | ab3c0a9ec2006e4b83b8c2018c3dcf529f9f532d | 3,627,430 |
def login(hostname, username, password):
"""Login to the switch and return the console with the prompt at `#`"""
try_again = True
alternatives = [r'[\r\n]+.+#',
"Permission denied",
"[Pp]assword: *",
">",
"(ibmnos-cli/iscli):*",
"Connection reset by peer\r\r\n"]
console = pexpect.spawn("ssh {}@{}".format(username, hostname))
outcome = console.expect(alternatives, timeout=60)
while outcome:
if outcome == 1:
console.close()
raise ConnectionDenied("Permission denied")
elif outcome == 2:
console.sendline(password)
elif outcome == 3:
console.sendline("enable")
elif outcome == 4:
console.sendline("iscli")
console.sendline()
elif outcome == 5 and try_again:
console = pexpect.spawn("ssh {}@{}".format(username, hostname))
try_again = False
elif outcome == 5 and not try_again:
console.close()
raise ConnectionFailed("connectivity/network issue")
outcome = console.expect(alternatives, timeout=60)
return console | b705470108c07ca88497e883e1c55325596ad36c | 3,627,431 |
def _ComputeImageDiff(failure_image, golden_image):
"""Compute mask showing which pixels are different between two images."""
return (ImageChops.difference(failure_image, golden_image)
.convert('L')
.point(lambda i: 255 if i else 0)) | e830aff6434928d1b32b76bfc61b7fc366c24a36 | 3,627,432 |
def load_pytest_conf(path, parser):
"""loads a ``pytestconf.py`` file and update default parser
and / or tester.
"""
namespace = {}
exec(open(path, 'rb').read(), namespace)
if 'update_parser' in namespace:
namespace['update_parser'](parser)
return namespace.get('CustomPyTester', PyTester) | 435584078584538adadd0feb7455f2010e1cdc4c | 3,627,433 |
def svn_client_import3(*args):
"""
svn_client_import3(svn_commit_info_t commit_info_p, char path, char url,
svn_depth_t depth, svn_boolean_t no_ignore,
svn_boolean_t ignore_unknown_node_types, apr_hash_t revprop_table,
svn_client_ctx_t ctx,
apr_pool_t pool) -> svn_error_t
"""
return apply(_client.svn_client_import3, args) | a10005cfcafa90fab40efaa84b8c69f461a7e772 | 3,627,434 |
def labs(**kwargs):
"""
Change plot title, axis labels and legend titles.
Parameters
----------
kwargs:
A list of new names in the form aesthetic='new name',
e.g. title='Plot title' or aes-name='Scale label'
Returns
--------
Axis label specification.
Note
-----
Change axis labels and legend titles.
Examples
---------
>>> import numpy as np
>>> import pandas as pd
>>> from scipy.stats import multivariate_normal
>>> N = 100
>>> M = 3
>>> mean = np.zeros(M)
>>> cov = np.eye(M)
>>> X = multivariate_normal.rvs(mean, cov, N)
>>> X = X.astype(int) # comment this line to make variables continuous back
>>> dat = pd.DataFrame(X)
>>> dat = pd.melt(dat)
>>> ggplot(dat, aes(x='value', group='variable', fill='variable')) +
... geom_bar(stat='bin', position=position_dodge(width=5.0), width=10, alpha=0.8) +
... labs(title='New plot title', x='New x axis label', y='New y axis label')
"""
specs = []
for k, v in kwargs.items():
if k == 'title':
specs.append(FeatureSpec('ggtitle', name=None, text=v))
else:
specs.append(_scale(aesthetic=k, name=v))
if len(specs) == 1:
return specs[0]
return FeatureSpecArray(*specs) | 84820d0a626e623919957ff48b5a95f4a28d3756 | 3,627,435 |
def _filter(paths, cgroups, rgroups):
""" Keep only paths with the appropriate cgroups and/or rgroups
"""
kept = []
for path in paths:
valid_cgroup = (cgroups is None or _cname(path) in cgroups)
valid_rgroup = (rgroups is None or _rname(path) in rgroups)
if valid_cgroup and valid_rgroup:
kept.append(path)
return kept | a90b89be831bcad3da9ed4e692c0d78d232537b3 | 3,627,436 |
def patch(target, new):
"""Simplified module monkey patching via context manager.
Args:
target: Target class or object.
new: Object or value to replace the target with.
"""
def _import_module(target):
components = target.split('.')
import_path = components.pop(0)
module = import_module(import_path)
for comp in components:
try:
module = getattr(module, comp)
except AttributeError:
import_path += ".%s" % comp
module = import_module(import_path)
return module
def _get_target(target):
if isinstance(target, str):
try:
module, attr = target.rsplit('.', 1)
except (TypeError, ValueError):
raise TypeError(f'invalid target: {target!r}')
module = _import_module(module)
return module, attr
else:
try:
obj, attr = target
except (TypeError, ValueError):
raise TypeError(f'invalid target: {target!r}')
return obj, attr
obj, attr = _get_target(target)
orig_attr = getattr(obj, attr)
setattr(obj, attr, new)
try:
yield
finally:
setattr(obj, attr, orig_attr) | c5d9fbf37d35991938838bc813ebc443c9934304 | 3,627,437 |
def _load_with_pydub(filename, audio_format):
"""Open compressed audio file using pydub. If a video file
is passed, its audio track(s) are extracted and loaded.
This function should not be called directely, use :func:`from_file`
instead.
:Parameters:
`filename`:
path to audio file.
`audio_format`:
string, audio file format (e.g. raw, webm, wav, ogg)
"""
func_dict = {
"mp3": AudioSegment.from_mp3,
"ogg": AudioSegment.from_ogg,
"flv": AudioSegment.from_flv,
}
open_function = func_dict.get(audio_format, AudioSegment.from_file)
segment = open_function(filename)
return BufferAudioSource(
data=segment.raw_data,
sampling_rate=segment.frame_rate,
sample_width=segment.sample_width,
channels=segment.channels,
) | b16fa9897afd63650c2de66765aec6710ae131cc | 3,627,438 |
from typing import Optional
def get_storage(store: Optional[StorageEngine] = None) -> StorageEngine:
"""Get current storage method."""
if store is not None:
return store
else:
if _storage_stack.top is not None:
out: StorageEngine = _storage_stack.top
return out
else:
raise RuntimeError("No Storage instance available.") | eeeaa059feacd22aa8c3b79e10b9826d49108af5 | 3,627,439 |
def UniformDot(d=100, p=100, tol=1e-2):
"""
Let U be a random `d` x `p` matrix with i.i.d. uniform
entries. Then Sigma = ``cov2corr``(U^T U)
"""
U = np.random.uniform(size=(d, p))
V = np.dot(U.T, U)
V = cov2corr(V)
return cov2corr(shift_until_PSD(V, tol=tol)) | a8c491c01f9f47dc1442f476c363bff1ae588f3a | 3,627,440 |
def get_supported_eline_list(*, lines=None):
"""
Returns the list of the emission lines supported by ``scikit-beam``
Parameters
----------
lines : list(str)
tuple or list of strings, that defines, which emission lines are going to be included
in the output list (e.g. ``("K",)`` or ``("L", "M")`` etc.) If ``None`` (default),
then K, L and M lines are going to be included.
Returns
-------
the list of supported emission line. The lines are in the format ``"Fe_K"`` or ``"Mg_M"``.
"""
if lines is None:
lines = ("K", "L", "M")
eline_list = []
if "K" in lines:
eline_list += K_LINE
if "L" in lines:
eline_list += L_LINE
if "M" in lines:
eline_list += M_LINE
return eline_list | c711a66b2d8b8c1017d75024e93d18668d0b8106 | 3,627,441 |
def visualize_test(test_data_full, test_data, thetas):
"""
Visualize Test for Testing Results
:param test_data_full: the test data set (full) with labels and data
:param thetas: model parameters
:return: fig
"""
fig, ax = plt.subplots()
ax.scatter(test_data_full["Height"], test_data_full["Weight"], color='blue')
ax.plot(test_data_full["Height"], predict(test_data, thetas[-1]), color='red', linewidth=2)
plt.show()
return fig | 2d84204578b7cb26ebda381c5f2a25319d0fee93 | 3,627,442 |
def normalization(data, dmin=0, dmax=1, save_centering=False):
"""
Normalization in [a, b] interval or with saving centering
x` = (b - a) * (xi - min(x)) / (max(x) - min(x)) + a
Args:
data (np.ndarray): data for normalization
dmin (float): left interval
dmax (float): right interval
save_centering (bool): if True -- will save data centering and just normalize by lowest data
Returns:
np.ndarray: normalized data
"""
# checking on errors
if dmin >= dmax:
raise Exception("Left interval 'dmin' must be fewer than right interval 'dmax'")
if save_centering:
return data / abs(min(data))
else:
min_x = min(data)
max_x = max(data)
return (data - min_x) * (dmax - dmin) / (max_x - min_x) + dmin | acfa7aaae1bb7eb5752751f5c929ddb7868ccf49 | 3,627,443 |
import os
def relpath(path: str, start: str = os.curdir) -> str:
"""Return a relative version of a path"""
try:
return os.path.relpath(path, start)
except ValueError:
return path | cd9daffa197a0443eb49ca515c805902eb404554 | 3,627,444 |
def normalize_adjacency_matrix(adjacency_matrix):
"""
Helper function for denoise_predictions.
Arguments:
adjacency_matrix: A matrix of size h * w,
https://en.wikipedia.org/wiki/Adjacency_matrix
For this type of normalization, should be symmetric - i.e. from an undirected graph.
Returns:
normalized_adjacency_matrix: A normalized version of our adjacency matrix,
such that each entry represents:
1) If the source and dest node are connected, and if so, then
2) how connected the source node is
3) how connected the destination node is
If the input is symmetric, this output will still be symmetric.
Though it could also be normalized in other ways.
"""
"""
We sum over all the rows of this matrix to get the degrees,
since it is a symmetric adjacency matrix this is the same as if we summed over the columns.
Once we have the degrees vector, we use it as the data to create a new sparse matrix,
of the same shape as our adjacency matrix, and with the vector elements on the diagonal
via using (row, col) pairs of (0,0), (1,1), ..., (h*w-1, h*w-1) with (np.arange(h*w), np.arange(h*w))
"""
"""
We get our h*w via the length of the matrix, since we don't need individual h and w values.
"""
adjacency_matrix_len = adjacency_matrix.shape[0]
degrees = adjacency_matrix.sum(axis=1)
degrees = np.array(degrees).flatten()
degree_matrix = csr_matrix((degrees, (np.arange(adjacency_matrix_len), np.arange(adjacency_matrix_len))), shape=(adjacency_matrix_len, adjacency_matrix_len))
"""
We use the adjacency matrix and degree matrix to make our normalized adjacency matrix, via
M = D^(-1/2) * A * D^(-1/2)
Where
D = degree matrix,
A = adjacency matrix,
M = normalized adjacency result
Since our degree matrix is a diagonal matrix,
the elementwise matrix power of it is the same
as the normal matrix power, so we can quickly
use the built-in csr_matrix function for elementwise power
to compute D^(-1/2).
We then do the dot product in the normal way.
"""
degree_matrix = degree_matrix.power(-1./2)
normalized_adjacency_matrix = degree_matrix.dot(adjacency_matrix.dot(degree_matrix))#DAD
return normalized_adjacency_matrix | 60043f8ee4ee8cf19c33dec683b0c8fc28d3d1fb | 3,627,445 |
import six
import struct
def dl_parse_bsd_lo(link_packet):
"""parse bsd loopback packet"""
if len(link_packet) < 4:
return None, None
# first 4 bytes are packet size which always less then 256, may be LE or BE
if six.byte2int(link_packet) == 0 and six.indexbytes(link_packet, 1) == 0:
prot, = struct.unpack(b'>I', link_packet[:4])
else:
prot, = struct.unpack(b'<I', link_packet[:4])
if prot > 0xFF:
return None, None
if prot == 2:
network_protocol = NetworkProtocol.IP
elif prot == 10:
network_protocol = NetworkProtocol.IPV6
else:
return None, None
return network_protocol, link_packet[4:] | 2d1a7eb62742ce8f43b11b836e9ad8fe78d3a097 | 3,627,446 |
def energycalc(spcode0, climatezone, dbh_orig, height, azimuth, distance, vintage, shade_reduction, lu_conversion_shade, lu_conversion_climate, eqpt_cooling_potential, eqpt_heating_potential):
"""Calculates avoided emissions values given tree and building relationship data.
Args:
spcode0 - species code
climatezone - climate region code
dbh_orig - tree dbh in inches
height - tree height in feet
azimuth - direction to the shaded building in degrees
distance - distance tree is from building in feet
vintage - the year the building was built
shade_reduction - shade reduction factor, accounting for the reduction
in tree shading effects due to overlapping trees.
lu_conversion_shade - land use conversion shading factor. Multi-use
buildings that share walls (e.g. multi-family residences) have reduced
sensitivity to temperature reductions from trees. This factor varies
by land use type, and represents the trees that are within the shading
distance of 60 feet.
lu_conversion_climate - land use conversion climate factor. Multi-use
buildings that share walls (e.g. multi-family residences) have reduced
sensitivity to temperature reductions from trees. This factor varies
by land use type, and represents the trees that are beyond the shading
distance of 60 feet.
eqpt_cooling_potential - equivalent cooling potential. This is a factor
reducing the cooling transfer function by the degree of saturation
of air conditioning in a particular combination of land use and
climate zones.
eqpt_heating_potential - equivalent heating potential. This is a factor
reducing the heating transfer function by the degree of saturation
of heating equipment in a particular combination of land use
and climate zones.
Return values (given in results tuple):
spcode - species code
climatezone - geographic region
dbh_orig - tree DBH in inches
dbh_calc - tree DBH in inches used in calculation
height - tree height in feet
max_dbh - maximum tree DBH in inches
distance - distance tree is from building in feet
building_lookup_class - distance class building falls within
azimuth_lookup_class - cardinal direction to building
vintage_lookup_class - building vintage, one of 3 classes
ef_cooling_subset.co2_avg_emis_factor_kg_kwh_field - CO2 emissions factor relating
kWh to CO2 equivalent
cooling_total_ctcc - total cooling emissions reductions in kWh without including
reduction factors (CTCC=CUFR Tree Carbon Calculator)
cooling_total - total cooling emissions reductions in kWh including all reduction
factors (e.g. shadereduc)
heating_total_ctcc - total heating emissions reductions in MBtu without including
reduction factors
heating_total - total heating emissions reductions in MBtu including all
reduction factors
all_cooling_emis_ctcc - total cooling emissions reductions in kg CO2 equivalents
without including reduction factors.. Note that these incorporate methane and nitrous oxide CO2 equivalents in addition to actual CO2 emissions)
all_heating_emis_ctcc - total cooling emissions reductions in kg CO2 equivalents
including all reduction factors
all_heating_emis_ctcc - total heating emissions reductions in kg CO2 equivalents
without including reduction factors
all_heating_emis - total heating emissions reductions in kg CO2 equivalents
including all reduction factors
all_emis_ctcc - total emissions reductions in kg CO2 equivalents without
including reduction factors
all_emis - total emissions reductions in kg CO2 equivalents including
all reduction factors
"""
error_factor = 0.7
dbh_orig = float(dbh_orig)
height = float(height)
azimuth = float(azimuth)
distance = float(distance)
vintage = float(vintage)
shade_reduction = float(shade_reduction)
lu_conversion_shade = float(lu_conversion_shade)
lu_conversion_climate = float(lu_conversion_climate)
eqpt_cooling_potential = float(eqpt_cooling_potential)
eqpt_heating_potential = float(eqpt_heating_potential)
# Forgot we have to do species assignments for many of the species out there
spcode = SpeciesMaster.objects.filter(Q(speciescode=spcode0) & Q(region=climatezone))[0].growth_assign
# Need a double lookup for the species with initial growth assignments like 'BEM OTHER'
if 'OTHER' in spcode:
spcode = SpeciesMaster.objects.filter(Q(speciescode=spcode) & Q(region=climatezone))[0].growth_assign
palmlist = [palm.sp_code for palm in Palms.objects.all()]
if spcode in palmlist:
dbh_calc = height
else:
dbh_calc = dbh_orig
# import ipdb; ipdb.set_trace()
try:
dbh_lookup_class_min = DbhClassesInterp.objects.filter(Q(midlowfake__lte=dbh_calc) & Q(midhighfake__gt=dbh_calc))[0].class_low
dbh_lookup_class_max = DbhClassesInterp.objects.filter(Q(midlowfake__lte=dbh_calc) & Q(midhighfake__gt=dbh_calc))[0].class_high
dbh_low = DbhClassesInterp.objects.filter(Q(midlowfake__lte=dbh_calc) & Q(midhighfake__gt=dbh_calc))[0].midlow
dbh_high = DbhClassesInterp.objects.filter(Q(midlowfake__lte=dbh_calc) & Q(midhighfake__gt=dbh_calc))[0].midhigh
if azimuth <= 22.5 or azimuth >= 337.5:
azimuth_lookup_class = "N"
else:
azimuth_lookup_class = AzimuthClasses.objects.filter(Q(degree_start__lte=azimuth) & Q(degree_end__gte=azimuth))[0].direction
building_lookup_class = BuildingClasses.objects.filter(Q(distance_start__lte=distance) & Q(distance_end__gte=distance))[0].class_desc
if vintage < 1950:
vintage_lookup_class = "pre1950"
elif vintage >= 1950 and vintage <= 1980:
vintage_lookup_class = "1950-1980"
else:
vintage_lookup_class = "post1980"
# import ipdb; ipdb.set_trace()
# lookup all cooling energy reduction values (values are in "energy_reduction") #
# for each tree, there will be a climate value (corresponding to clim in the building.lookup.class)
# for any tree within 60ft of a building, there will also be a shade value (corresponding to adj, near, or far in the building lookup class)
q1 = Q(cz=climatezone) & Q(benefit_type="cool") & Q(species=spcode) & Q(dbh_class=dbh_lookup_class_min) & Q(azimuth=azimuth_lookup_class) & Q(dist="Clim") & Q(vintage=vintage_lookup_class)
try:
cooling_climate_low = Energylong2.objects.filter(q1)[0].energy_reduction
except IndexError:
cooling_climate_low = 0 # set to 0 per lines 219-226 in avoided_emissions_calculator.R
q1 = Q(cz=climatezone) & Q(benefit_type="cool") & Q(species=spcode) & Q(dbh_class=dbh_lookup_class_max) & Q(azimuth=azimuth_lookup_class) & Q(dist="Clim") & Q(vintage=vintage_lookup_class)
try:
cooling_climate_high = Energylong2.objects.filter(q1)[0].energy_reduction
except IndexError:
cooling_climate_high = 0
q1 = Q(cz=climatezone) & Q(benefit_type="cool") & Q(species=spcode) & Q(dbh_class=dbh_lookup_class_min) & Q(azimuth=azimuth_lookup_class) & Q(dist=building_lookup_class) & Q(vintage=vintage_lookup_class)
try:
cooling_shade_low = Energylong2.objects.filter(q1)[0].energy_reduction
except IndexError:
cooling_shade_low = 0
q1 = Q(cz=climatezone) & Q(benefit_type="cool") & Q(species=spcode) & Q(dbh_class=dbh_lookup_class_max) & Q(azimuth=azimuth_lookup_class) & Q(dist=building_lookup_class) & Q(vintage=vintage_lookup_class)
try:
cooling_shade_high = Energylong2.objects.filter(q1)[0].energy_reduction
except IndexError:
cooling_shade_high = 0
q1 = Q(cz=climatezone) & Q(benefit_type="heat") & Q(species=spcode) & Q(dbh_class=dbh_lookup_class_min) & Q(azimuth=azimuth_lookup_class) & Q(dist="Clim") & Q(vintage=vintage_lookup_class)
try:
heating_climate_low = Energylong2.objects.filter(q1)[0].energy_reduction * 0.001
except IndexError:
heating_climate_low = 0
q1 = Q(cz=climatezone) & Q(benefit_type="heat") & Q(species=spcode) & Q(dbh_class=dbh_lookup_class_max) & Q(azimuth=azimuth_lookup_class) & Q(dist="Clim") & Q(vintage=vintage_lookup_class)
try:
heating_climate_high = Energylong2.objects.filter(q1)[0].energy_reduction * 0.001
except IndexError:
heating_climate_high = 0
q1 = Q(cz=climatezone) & Q(benefit_type="heat") & Q(species=spcode) & Q(dbh_class=dbh_lookup_class_min) & Q(azimuth=azimuth_lookup_class) & Q(dist=building_lookup_class) & Q(vintage=vintage_lookup_class)
try:
heating_shade_low = Energylong2.objects.filter(q1)[0].energy_reduction * 0.001
except IndexError:
heating_shade_low = 0
q1 = Q(cz=climatezone) & Q(benefit_type="heat") & Q(species=spcode) & Q(dbh_class=dbh_lookup_class_max) & Q(azimuth=azimuth_lookup_class) & Q(dist=building_lookup_class) & Q(vintage=vintage_lookup_class)
try:
heating_shade_high = Energylong2.objects.filter(q1)[0].energy_reduction * 0.001
except IndexError:
heating_shade_high = 0
if spcode in palmlist:
max_dbh = AppsMax.objects.filter(Q(sp_code=spcode) & Q(cz=climatezone))[0].height_ft
else:
max_dbh = AppsMax.objects.filter(Q(sp_code=spcode) & Q(cz=climatezone))[0].dbh_in
if dbh_calc > max_dbh:
dbh_calc = max_dbh
# calculate cooling (kWh) & heating (MBtu)
#### interpolate the high and low values ##
cooling_climate_interp = cooling_climate_low + (dbh_calc - dbh_low)*(cooling_climate_high-cooling_climate_low)/(dbh_high-dbh_low)
cooling_shade_interp = cooling_shade_low + (dbh_calc - dbh_low)*(cooling_shade_high-cooling_shade_low)/(dbh_high-dbh_low)
heating_climate_interp = heating_climate_low + (dbh_calc - dbh_low)*(heating_climate_high-heating_climate_low)/(dbh_high-dbh_low)
heating_shade_interp = heating_shade_low + (dbh_calc - dbh_low)*(heating_shade_high-heating_shade_low)/(dbh_high-dbh_low)
# check to see if it is surpassing max cooling contribution; not applicable for heating.
max_cool_contrib = MaxCoolHeat.objects.filter(Q(cz=climatezone) & Q(type_max="cool") & Q(vintage=vintage_lookup_class))[0].max
# jim's calculator totals both the climate and the shade
if cooling_climate_interp > max_cool_contrib:
cooling_climate_interp = max_cool_contrib
else:
cooling_climate_interp = cooling_climate_interp
if cooling_shade_interp > max_cool_contrib:
cooling_shade_interp = max_cool_contrib
else:
cooling_shade_interp = cooling_shade_interp
#### apply shade reductions ####
cooling_shade_interp2 = cooling_shade_interp * shade_reduction
heating_shade_interp2 = heating_shade_interp * shade_reduction
cooling_climate_interp2 = cooling_climate_interp
heating_climate_interp2 = heating_climate_interp
#### apply land use conversions ####
cooling_shade_interp3 = cooling_shade_interp2 * lu_conversion_shade
heating_shade_interp3 = heating_shade_interp2 * lu_conversion_shade
cooling_climate_interp3 = cooling_climate_interp2 * lu_conversion_climate
heating_climate_interp3 = heating_climate_interp2 * lu_conversion_climate
# distinguish between trees w/ only climate effects and those with shade and climate effects
if distance>60.0001:
cooling_total_ctcc = cooling_climate_interp
heating_total_ctcc = heating_climate_interp
cooling_total_reducts = cooling_climate_interp3
heating_total_reducts = heating_climate_interp3
else:
cooling_total_ctcc = cooling_climate_interp + cooling_shade_interp
heating_total_ctcc = heating_climate_interp + heating_shade_interp
cooling_total_reducts = cooling_climate_interp3 + cooling_shade_interp3
heating_total_reducts = heating_climate_interp3 + heating_shade_interp3
cooling_total = cooling_total_reducts * eqpt_cooling_potential
heating_total = heating_total_reducts * eqpt_heating_potential
##### calculate emission equivalents (kg) for cooling & heating
# compile emission reductions in kg
# first, find climate zone-dependent cooling emissions values and subset them
ef_cooling_subset = EmisFactorsCooling.objects.filter(cz=climatezone)[0]
# lookup emissions factors cooling #
# must convert emissions to co2 equivalents - emis.factors.cooling.subset[1] & so forth represent co2, methane, and nitrous oxide emission factors and
# multiply by global warming potential (co2=1,methane=23,no=296#
# also calculate for ctcc to compare with carbon calculator
# ctcc
co2_cooling_emis_ctcc = cooling_total_ctcc*(ef_cooling_subset.co2_avg_emis_factor_kg_kwh_field)
methane_cooling_emis_ctcc = cooling_total_ctcc*(ef_cooling_subset.methane_avg_emis_factor_kg_kwh_field*23)
no_cooling_emis_ctcc = cooling_total_ctcc*(ef_cooling_subset.nitrous_oxide_avg_emis_factor_kg_kwh_field*296)
all_cooling_emis_ctcc = co2_cooling_emis_ctcc + methane_cooling_emis_ctcc + no_cooling_emis_ctcc
# new results w/multi tree & eqpt reductions
co2_cooling_emis = cooling_total*(ef_cooling_subset.co2_avg_emis_factor_kg_kwh_field)
methane_cooling_emis = cooling_total*(ef_cooling_subset.methane_avg_emis_factor_kg_kwh_field*23)
no_cooling_emis = cooling_total*(ef_cooling_subset.nitrous_oxide_avg_emis_factor_kg_kwh_field*296)
all_cooling_emis = co2_cooling_emis + methane_cooling_emis + no_cooling_emis
# lookup emissions factors heating; unlike cooling, heating efs not climate-zone dependent #
ef_heating_subset = EmisFactorsHeating.objects.filter(fuel_type="Natural Gas")[0] # was fuel oil ignored because we were in Calif?
# ctcc
co2_heating_emis_ctcc = heating_total_ctcc*(ef_heating_subset.co2_emis_factor_kg_mbtu_field)
methane_heating_emis_ctcc = heating_total_ctcc*(ef_heating_subset.methane_emis_factor_kg_mbtu_field*23)
no_heating_emis_ctcc = heating_total_ctcc*(ef_heating_subset.nitrous_oxide_emis_factor_kg_mbtu_field*296)
all_heating_emis_ctcc = co2_heating_emis_ctcc + methane_heating_emis_ctcc + no_heating_emis_ctcc
# new results w/multi tree & eqpt reductions
co2_heating_emis = heating_total*(ef_heating_subset.co2_emis_factor_kg_mbtu_field)
methane_heating_emis = heating_total*(ef_heating_subset.methane_emis_factor_kg_mbtu_field*23)
no_heating_emis = heating_total*(ef_heating_subset.nitrous_oxide_emis_factor_kg_mbtu_field*296)
all_heating_emis = co2_heating_emis + methane_heating_emis + no_heating_emis
# ctcc
all_emis_ctcc = all_cooling_emis_ctcc + all_heating_emis_ctcc
# ours
all_emis = all_cooling_emis + all_heating_emis
# error_reductions
cooling_total_ctcc = cooling_total_ctcc * error_factor
cooling_total = cooling_total * error_factor
heating_total_ctcc = heating_total_ctcc * error_factor
heating_total = heating_total * error_factor
all_cooling_emis_ctcc = all_cooling_emis_ctcc * error_factor
all_cooling_emis = all_cooling_emis * error_factor
all_heating_emis_ctcc = all_heating_emis_ctcc * error_factor
all_heating_emis = all_heating_emis* error_factor
all_emis_ctcc = all_emis_ctcc * error_factor
all_emis = all_emis * error_factor
# form results tuple
# I take out a bunch of id stuff. Can argue it doesn't really belong in this function.
results = (spcode, climatezone, dbh_orig, dbh_calc, max_dbh, height, distance, building_lookup_class, azimuth_lookup_class, vintage_lookup_class, ef_cooling_subset.co2_avg_emis_factor_kg_kwh_field, cooling_total_ctcc, cooling_total,heating_total_ctcc, heating_total, all_cooling_emis_ctcc, all_cooling_emis, all_heating_emis_ctcc, all_heating_emis, all_emis_ctcc, all_emis)
except IndexError:
return ("Database lookup error")
else:
#return (dbh_lookup_class_min,dbh_lookup_class_max, dbh_low, dbh_high, azimuth_lookup_class, building_lookup_class, vintage_lookup_class, cooling_climate_low, cooling_climate_high, cooling_shade_low)
return (results) | 3164bf258aef232093541f6f421becd034461cbe | 3,627,447 |
def _create_scene_object(token: str, object_type: TrackedObjectType) -> Agent:
"""
:param token: a unique instance token
:param object_type: agent type.
:return: a random Agent
"""
scene = SceneObject.make_random(token, object_type)
return Agent(
tracked_object_type=object_type,
oriented_box=scene.box,
velocity=StateVector2D(0, 0),
metadata=SceneObjectMetadata(token=token, track_token=token, track_id=None, timestamp_us=0),
) | 1d71b8a25980f40768900aaf22874b11b5b3fc99 | 3,627,448 |
def conv3x3_group(in_planes, out_planes, groups=1, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, groups=groups, stride=stride,
padding=1, bias=False) | f594509979bffc438fb459b15ddc3a64c9a1220d | 3,627,449 |
def named(name):
"""Change the name of something (via a decorator)."""
def decorator(obj):
obj.__name__ = name
return obj
return decorator | 5b4873e7e6475e23ab13cd1fc203d6c79622d96d | 3,627,450 |
def split_repo_and_dir(repo):
"""
Split the input string
org-name/repo-name/subdir-name/more/sub/dirs
(where '/subdir-name/more/sub/dirs' is optional) into
org-name/repo-name
and
subdir-name/more/sub/dirs
The second part might be the empty string if no subdir-name was given.
"""
parts = repo.split('/')
if len(parts) == 2:
return [repo, '']
return ['/'.join(parts[0:2]), '/'.join(parts[2:])] | c5cfb58fa0780af0391fc07fa78279af4f5c2790 | 3,627,451 |
from pathlib import Path
import os
import shutil
def save_upload_file_tmp(upload_file: UploadFile):
"""Saves recieved dat and returns directory"""
try:
suffix = Path(upload_file.filename).suffix
direc = mkdtemp(dir="dat")
with NamedTemporaryFile(
delete=False, suffix=suffix, dir=os.path.abspath(direc)
) as tmp:
shutil.copyfileobj(upload_file.file, tmp)
tmp_path = Path(tmp.name)
finally:
upload_file.file.close()
return tmp_path, direc | 6b5a7a659ccbc0697a96429f6a7f92491371c308 | 3,627,452 |
import requests
def hits_recorded(pitcher_id=None):
"""Get the number of hits recorded by each historical pitcher. If pitcherId is specified, only that pitcher is returned.
`pitcher_id` is a single string UUID or list of string UUIDs.
Returns dictionary {pitcher_id: count}"""
params = {}
if pitcher_id:
params['pitcherId'] = prepare_id(pitcher_id)
response = requests.get(construct_url('hitsRecorded'), params=params)
response.raise_for_status()
return {
pitcher['pitcher_id']: pitcher['count'] for pitcher in response.json()['results']
} | 3b0b2e81ce35ad0437c1721a18518ab3b6ed53ff | 3,627,453 |
def calculateOnlineVariance(data):
"""
Returns the variance of the given list.
:param data: A list of numbers to be measured (ie. the window)
:returns: The variance of the data.
"""
n, mean, M2 = 0, 0, 0
for x in data:
n = n + 1
delta = x - mean
mean = mean + delta/n
M2 = M2 + delta*(x-mean)
variance = M2/(n-1)
return variance | bf8d70cd736471e4723db07fb609aff6a7ccec50 | 3,627,454 |
def clamp(x, xmin, xmax):
"""Constrain a value to lie between two further values, element-wise.
The returned value is computed as `min(max(x, xmin), xmax)`.
The arguments can be scalars or :class:`~taichi.Matrix`,
as long as they can be broadcasted to a common shape.
Args:
x (:mod:`~taichi.types.primitive_types`, :class:`~taichi.Matrix`): Specify
the value to constrain.
y (:mod:`~taichi.types.primitive_types`, :class:`~taichi.Matrix`): Specify
the lower end of the range into which to constrain `x`.
a (:mod:`~taichi.types.primitive_types`, :class:`~taichi.Matrix`): Specify
the upper end of the range into which to constrain `x`.
Returns:
The value of `x` constrained to the range `xmin` to `xmax`.
Example::
>>> v = ti.Vector([0, 0.5, 1.0, 1.5])
>>> ti.clamp(v, 0.5, 1.0)
[0.5, 0.5, 1.0, 1.0]
>>> x = ti.Matrix([[0, 1], [-2, 2]], ti.f32)
>>> y = ti.Matrix([[1, 2], [1, 2]], ti.f32)
>>> ti.clamp(x, 0.5, y)
[[0.5, 1.0], [0.5, 2.0]]
"""
return min(xmax, max(xmin, x)) | 80b4ef6224031502cf444b86141398ba60c77bda | 3,627,455 |
def anim_curve_exists(attr_curve_name):
"""
"""
anim_curve = get_anim_curve(attr_curve_name)
if not anim_curve:
return True
else:
return False | 03f84bf231fa6100574c4ff3c6cc516e48a5a95b | 3,627,456 |
from datetime import datetime
def create_string(item):
"""Create strings for tests."""
return WEBEX_TEAMS_TEST_STRING_TEMPLATE.substitute(
prefix=WEBEX_TEAMS_TEST_STRING_PREFIX,
item=item,
datetime=str(datetime.datetime.now())
) | b6c9ebfc38a7a3b875b2bfec531a5fc64e94eb59 | 3,627,457 |
from typing import List
def save_image_with_legends_and_labels(save_path: str, image: np.ndarray, legends: List[Rectangle],
labels: List[str]) -> (plt.Figure, plt.Axes):
"""
Saves the given image with the given legends rects and labels
Args:
save_path: the path to the folder to save the image
image: the image to put the legends on
legends: a list of colored rects
labels: a list of the names of the legends
Returns:
the figure and the axes of matplotlib
"""
figure, axes = plt.subplots()
axes.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
axes.legend(legends, labels)
axes.axis('off')
axes.plot()
plt.savefig(save_path, bbox_inches='tight', pad_inches=0)
return figure, axes | 6c485c8fb367fcc7301ad68d08c674f615574bf9 | 3,627,458 |
from datetime import datetime
import time
def nod_uploadkey(private, own=False, date=None):
"""Generate the key for the next News of the Day entry.
>>> nod_uploadkey("USK@foo,moo,goo/WebOfTrust/0", own=False, date=datetime.datetime(2010,1,1))
'SSK@foo,moo,goo/nod-shared-2010-01-01'
>>> nod_uploadkey("USK@foo,moo,goo/WebOfTrust/0", own=True, date=datetime.datetime(2010,2,1))
'SSK@foo,moo,goo/nod-own-2010-02-01'
"""
if date:
t = date
else:
t = today = datetime.datetime(*time.gmtime()[:6])
datepart = "{:04}-{:02}-{:02}".format(t.year, t.month, t.day)
if own:
path = "nod-own-" + datepart
else:
path = "nod-shared-" + datepart
return usktossk(private, path) | ec51a0b56e3922c0c5d9f74d2885086e582d729d | 3,627,459 |
def is_struct(struct):
""" Checks if the message is a data structure or an rpc
request/response"""
return (not struct.name.endswith("Request") and
not struct.name.endswith("Response")) | 0dbce36cad826988cc18d86a31b91f2090d5e338 | 3,627,460 |
def relative_round(value, relative_digits):
"""Rounds to a given relative precision"""
if isinstance(value, tuple):
return tuple((relative_round(x, relative_digits) for x in value))
if value == 0 or isinstance(value, str) or np.isnan(value) or np.isinf(value):
return value
value_precision = get_number_precision(value)
absolute_digits = -value_precision + relative_digits # pylint: disable=invalid-unary-operand-type
return round(value, int(absolute_digits)) | 932665f95969c603b053e6ce18415f78abd43891 | 3,627,461 |
import xmlrpc
def get_pypi_proxy():
"""Returns a RPC ServerProxy object pointing to the PyPI RPC
URL.
:rtype: xmlrpclib.ServerProxy
:return: the RPC ServerProxy to PyPI repository.
"""
return xmlrpc.client.ServerProxy(PYPI_XMLRPC) | 26439fb81911aeb3ca864f1883be9b84d176752c | 3,627,462 |
def replace_labels(code):
"""
Replaces free labels in `code` with integers, and accordingly any label call / branch.
For example, code:
('BRANCH', 'L0')
('LABEL', '2+')
('1+',)
('EXIT',)
('LABEL', 'L0')
returns:
('BRANCH', 1)
('LABEL', 0)
('1+',)
('EXIT',)
('LABEL', 1)
and:
{'2+': 0, 'L0': 1}
Args:
code: the input IM code.
Returns:
code with labels replaced by numbers
a {label -> ID} dictionary
"""
labels = {}
def find_labels(words):
""" Find all the labels in the code """
for word in words:
if word[0] == im.label()[0]:
label = word[1]
labels[label] = len(labels)
find_labels(code)
label_ops = {im.branch()[0], im.branch0()[0], im.call()[0], im.inc_do_loop()[0]}
def replace(words):
""" Recursively replace labels in the code """
result = []
for word in words:
if word[0] in label_ops:
result.append((word[0], labels[word[1]]))
elif word[0] == im.label()[0]:
result.append(im.label(labels[word[1]]))
elif symbol_match(word, im.parallel()):
left, right = word[1:]
result.append(im.parallel(replace(left), replace(right)))
elif symbol_match(word, im.pipeline()):
words = word[1]
replaced_words = (replace(w) for w in words)
result.append(im.pipeline(replaced_words))
elif symbol_match(word, im.slot()):
dec, enc, id_, trans = word[1:]
if symbol_match(dec, im.choose()) or symbol_match(dec, im.sample()):
result.append(im.slot((dec[0], replace(dec[1])), enc, id_, trans))
else:
result.append(word)
else:
result.append(word)
return tuple(result)
replaced = replace(code)
return replaced, labels | 92384d83020021543d2e2f85a79d4df9f6e9017e | 3,627,463 |
import pandas as pd
from typing import Union
from pathlib import Path
import tarfile
def read_tarfile_csv(path: Union[str, Path], inner_path: str, sep: str = "\t", **kwargs):
"""Read an inner CSV file from a tar archive.
:param path: The path to the tar archive
:param inner_path: The path inside the tar archive to the dataframe
:param sep: The separator in the dataframe. Overrides Pandas default to use a tab.
:param kwargs: Additional kwargs to pass to :func:`pandas.read_csv`.
:return: A dataframe
:rtype: pandas.DataFrame
"""
with tarfile.open(path) as tar_file:
with tar_file.extractfile(inner_path) as file: # type: ignore
return pd.read_csv(file, sep=sep, **kwargs) | 24b3183da5787c095e78fc1bc7a1ed4e4012d6d2 | 3,627,464 |
def hasReturnType(matcher):
"""Match a function/method with a specified return type
>>> from glud import *
>>> config = '''
... class X {};
... X u();
... int v();
... '''
>>> m = functionDecl(hasReturnType(builtinType()))
>>> for c in walk(m, parse_string(config).cursor):
... print(c.spelling)
v
"""
return ReturnTypeTraversalMatcher(matcher) | dbc9ab35f5bd3184080252c5feb9858df573217f | 3,627,465 |
from pathlib import Path
def calc_path(filename):
"""Calculate a filepath based off of current file"""
logger.info("calc_path filename: %s", filename)
if filename is None:
return None
filepath = Path(filename)
if not filepath.is_absolute():
filepath = Path(__file__, "..", filepath)
return filepath.resolve() | ee5ef9a645194a4c6ea334d57a3499dbbc7a50f0 | 3,627,466 |
def get_enum_map(mri, name):
""" Returns enum value to name and name to enum value map.
@param mri mri instance
@param name name of enum """
ret = {}
tdm = mri.engine.tdm
n = len(name) + 1
roles = tdm.getByHierarchicalName(name)
constants = roles.getConstants()
for c in constants:
_name = c.getName()
v = c.getConstantValue()
ret[_name[n:]] = v
ret[v] = _name[n:]
return ret | e781790c7e7f5a24e12fb986e2e45d735157a7b6 | 3,627,467 |
import time
def CreateWebsList(AssetInfo, TimePause, TimesFetch):
""" Get web pages """
AssetTickers = list(AssetInfo.keys())
# Create a linked list for every asset
WebsLists = dict()
for s in AssetTickers:
WebsLists[s] = WebLinkedList.WebList()
# Start crawling
for t in range(TimesFetch):
for s in AssetTickers:
AssetLink = AssetInfo[s]
[tempWebPage, tempTime] = Utilities.FetchWebPage(AssetLink)
tempWebNode = WebLinkedList.WebNode(tempWebPage, tempTime)
WebsLists[s].Append(tempWebNode)
time.sleep(TimePause)
return WebsLists | f1bb57d56360d596d33769100f101d9f7ba74ef7 | 3,627,468 |
def spatial_filter(x, y, z, dx, dy, sigma=3.0):
"""
des: outlier filtering within the defined spatial region (dx * dy).
arg:
x, y: coord_x and coord_y (m)
z: value
dx, dy: resolution in x (m) and y (m)
n_sigma: cut-off value
thres: max absolute value of data
return:
zo: filtered z, containing nan-values
"""
Nn = int((np.abs(y.max() - y.min())) / dy) + 1
Ne = int((np.abs(x.max() - x.min())) / dx) + 1
f_bin = stats.binned_statistic_2d(x, y, z, bins=(Ne, Nn))
index = f_bin.binnumber # the bin index of each (x,y)
ind = np.unique(index)
zo = z.copy()
# loop for each bin (valid data exit)
for i in range(len(ind)):
# index: bin index corresponding to each data point
idx, = np.where(index == ind[i]) # idx: data points indices in specific bin
zb = z[idx]
if len(zb[~np.isnan(zb)]) == 0:
continue
dh = zb - np.nanmedian(zb)
foo = np.abs(dh) > sigma * np.nanstd(dh)
zb[foo] = np.nan
zo[idx] = zb
return zo | ff1f5f8af17b2cce453ec1c75e4df5eafbaf5957 | 3,627,469 |
def _log_add(x, y):
"""
Add x and y in log space.
"""
if x == -np.inf:
return y
if y == -np.inf:
return x
return np.max([x, y]) + np.log1p(np.exp(-abs(x - y))) | ff4ab48c9ddb446f17456049d6f5317306cdc7a2 | 3,627,470 |
import asyncio
def query_by_name(first, last, jurisdictions=None, timeout=None):
"""Query jurisdictions with an inmate name.
:param first_name: Inmate first name to search.
:type first_name: str
:param last_name: Inmate last name to search.
:type last_name: str
:param jurisdictions: List of jurisdictions to search.
If `None`, then all available jurisdictions are searched.
:type jurisdictions: None or iterable
:param timeout: Time in seconds to wait for HTTP requests to complete.
:type timeout: float
:returns: tuple `(inmates, errors)` where
- :py:data:`inmates` -- inmates matching search parameters.
- :py:data:`errors` -- errors encountered while searching.
"""
if jurisdictions is None:
jurisdictions = PROVIDERS.keys()
providers = [PROVIDERS[j] for j in jurisdictions]
async def async_helper():
loop = asyncio.get_event_loop()
def generate_futures():
for _, module in providers:
yield loop.run_in_executor(
None, module.query_by_name, first, last, timeout
)
futures = list(generate_futures())
results = await asyncio.gather(*futures, return_exceptions=True)
return results
results = asyncio.run(async_helper())
return providers, results | effff7fd1b75baea0a65b4a21e87894a39d1f783 | 3,627,471 |
import ray
import psutil
import time
def test_basic(ray_start_with_dashboard):
"""Dashboard test that starts a Ray cluster with a dashboard server running,
then hits the dashboard API and asserts that it receives sensible data."""
assert (wait_until_server_available(ray_start_with_dashboard["webui_url"])
is True)
address_info = ray_start_with_dashboard
node_id = address_info["node_id"]
address = address_info["redis_address"]
address = address.split(":")
assert len(address) == 2
client = redis.StrictRedis(
host=address[0],
port=int(address[1]),
password=ray_constants.REDIS_DEFAULT_PASSWORD)
all_processes = ray.worker._global_node.all_processes
assert ray_constants.PROCESS_TYPE_DASHBOARD in all_processes
assert ray_constants.PROCESS_TYPE_REPORTER not in all_processes
dashboard_proc_info = all_processes[ray_constants.PROCESS_TYPE_DASHBOARD][
0]
dashboard_proc = psutil.Process(dashboard_proc_info.process.pid)
assert dashboard_proc.status() in [
psutil.STATUS_RUNNING, psutil.STATUS_SLEEPING
]
raylet_proc_info = all_processes[ray_constants.PROCESS_TYPE_RAYLET][0]
raylet_proc = psutil.Process(raylet_proc_info.process.pid)
def _search_agent(processes):
for p in processes:
try:
for c in p.cmdline():
if "new_dashboard/agent.py" in c:
return p
except Exception:
pass
# Test for bad imports, the agent should be restarted.
logger.info("Test for bad imports.")
agent_proc = _search_agent(raylet_proc.children())
prepare_test_files()
agent_pids = set()
try:
assert agent_proc is not None
agent_proc.kill()
agent_proc.wait()
# The agent will be restarted for imports failure.
for x in range(50):
agent_proc = _search_agent(raylet_proc.children())
if agent_proc:
agent_pids.add(agent_proc.pid)
# The agent should be restarted,
# so we can break if the len(agent_pid) > 1
if len(agent_pids) > 1:
break
time.sleep(0.1)
finally:
cleanup_test_files()
assert len(agent_pids) > 1, agent_pids
agent_proc = _search_agent(raylet_proc.children())
if agent_proc:
agent_proc.kill()
agent_proc.wait()
logger.info("Test agent register is OK.")
wait_for_condition(lambda: _search_agent(raylet_proc.children()))
assert dashboard_proc.status() in [
psutil.STATUS_RUNNING, psutil.STATUS_SLEEPING
]
agent_proc = _search_agent(raylet_proc.children())
agent_pid = agent_proc.pid
# Check if agent register is OK.
for x in range(5):
logger.info("Check agent is alive.")
agent_proc = _search_agent(raylet_proc.children())
assert agent_proc.pid == agent_pid
time.sleep(1)
# Check redis keys are set.
logger.info("Check redis keys are set.")
dashboard_address = client.get(dashboard_consts.REDIS_KEY_DASHBOARD)
assert dashboard_address is not None
dashboard_rpc_address = client.get(
dashboard_consts.REDIS_KEY_DASHBOARD_RPC)
assert dashboard_rpc_address is not None
key = f"{dashboard_consts.DASHBOARD_AGENT_PORT_PREFIX}{node_id}"
agent_ports = client.get(key)
assert agent_ports is not None | c31926cd6ce1976fec2adcdebebfe8c8b8a0bf99 | 3,627,472 |
def arc_length_3point(A, B, C):
""" Returns length of arc defined by 3 points, A, B and C; B is the point in between """
### Meticulously transcribed from
# https://develop.openfoam.com/Development/openfoam/-/blob/master/src/mesh/blockMesh/blockEdges/arcEdge/arcEdge.C
p1 = np.asarray(A)
p2 = np.asarray(B)
p3 = np.asarray(C)
a = p2 - p1
b = p3 - p1
# Find centre of arcEdge
asqr = a.dot(a)
bsqr = b.dot(b)
adotb = a.dot(b)
denom = asqr*bsqr - adotb*adotb
# if norm(denom) < 1e-5:
# raise ValueError("Invalid arc points!")
fact = 0.5*(bsqr - adotb)/denom
centre = p1 + 0.5*a + fact*(np.cross(np.cross(a, b), a))
# Position vectors from centre
r1 = p1 - centre
r2 = p2 - centre
r3 = p3 - centre
mag1 = norm(r1)
mag3 = norm(r3)
# The radius from r1 and from r3 will be identical
radius = r3
# Determine the angle
angle = np.arccos((r1.dot(r3))/(mag1*mag3))
# Check if the vectors define an exterior or an interior arcEdge
if np.dot(np.cross(r1, r2), np.cross(r1, r3)) < 0:
angle = 2*np.pi - angle
return angle*norm(radius) | aa51863f08d6b4d22e8321252506bf575861e191 | 3,627,473 |
def connect_bucket(cfg):
""" TODO: do we really need this? """
return (
cfg.bucket,
boto3.client('s3')
) | cb60266d59f3692e061276c9cdabfa6879aad116 | 3,627,474 |
def dsymv(alpha, A, X, beta, Y, Uplo=CblasLower):
"""
returns y'
This function computes the matrix-vector product and
sum \M{y' = S{alpha} A x + S{beta} y} for the symmetric matrix A. Since the
matrix A is symmetric only its upper half or lower half need to be
stored. When Uplo is CblasUpper then the upper triangle and diagonal
of A are used, and when Uplo is CblasLower then the lower triangle
and diagonal of A are used.
"""
yn = array_typed_copy(Y, Float)
_gslwrap.gsl_blas_dsymv(Uplo, alpha, A, X, beta, yn)
return yn | d4bb0aed936dbf3e0d94b8e76f9b1ef24f5536a7 | 3,627,475 |
def view_categories(request):
"""View category posts view"""
categories = Category.objects.all()
ret_dict = {
'categories': categories,
'view_rss': 'rss/categories.xml',
'current_nav': 'categories',
}
ret_dict = __append_common_vars(request, ret_dict)
return render(request, 'blog/categories.html', ret_dict) | 22cf5ef4e8c7a9aed13f5b528c44f2ecb9c93979 | 3,627,476 |
def zfill_to_collection_size(index: int, collection_size: int) -> str:
"""
Prepends amount of zeroes required for indexes to be string-sortable in terms of given collection size.
Examples:
for 10 items prepends up to 1 zero: 1 -> "01", 10 -> "10"
for 100 items prepends up to 2 zeroes: 7 -> "007", "13" -> "013"
"""
positions = len(str(collection_size))
return str(index).zfill(positions) | df086ba9c4485dd0073c9a9b4485cb0c0d423859 | 3,627,477 |
import os.path
def parse_fname_meta(file):
"""
Takes a file name and separates out any/all metadata of interest
(Serial ID, Source, NN Tags)
Decisions to be made: How strict to be on naming?
This should be in the config file...
FOR NOW: Only Impose 3 Rules:
* UNTIL FIRST "_" is Serial ID (Expect but don't force Date-Serial#)
* END to LAST "_" (except extension .) are CLASSIFY tags
(separated by '-' if multiple)
* ALL OTHERS are MetaData tags. Save these if/only/if we can match
them to any of several metadata setup config dictionaries?
(IGNORE FOR NOW? Just pass as a list of Meta-Tags)
INPUTS:
fname : file name to be parsed.
:
RETURN:
serial_id : UNIQUE file ID. NEED OTHER FUCNTION TO CHECK UNIQUE??
meta_tags : All tags in the middle of the file. in Future will sort
and separate as per config
class_tags: CLASSIFICATION Tags - Those used to train the model.
Group Discussion on what to do with these for
non-training data?? For now, this is then a mark
of training (manually checked) data.
BLIND files maybe should then always end with "_.csv"
to identify them as un-observed! Suggest to group...
"""
fname = os.path.basename(file)
serial_id = ""
meta_tags = []
class_tags = []
# Now separate file based on "_" markers
a = 0 # Copy-paste placeholder
for b in range(len(fname)): # Loop through the file name...
if a == 0 and fname[b] == "_": # When you get to the first _
serial_id = fname[a:b] # ----Copy the first chunk as the ID
a = b+1 # ----(and reassign a)
elif fname[b] == "_": # When you find any other mid-tag
meta_tags.append(fname[a:b]) # -----Add it to the Meta tag list
a = b+1 # ----(and reassign a)
elif fname[b] == '.': # When you get to the file extension
class_tags_all = fname[a:b] # The final set is all class-tags
else:
pass
# Ok, same thing, now looping class_tags and separate based on "-"
a = 0 # Use same placeholders, to show it's the same process
for b in range(len(class_tags_all)):
if class_tags_all[b] == "-": # Note this time, the "-"
class_tags.append(class_tags_all[a:b])
a = b+1
else:
pass
else:
# If you get to the end of string, that's one last tag
class_tags.append(class_tags_all[a:]) # Add it to the Classify list
# NOW for internal Unit-tags
if not len(serial_id):
raise AssertionError("File <" + fname + "> Has no Separators.\n" +
"Expected <SerialID>_<tags>_<classify>.csv")
return serial_id, meta_tags, class_tags | 892a46c54e842033fb610131264455b6de0272a1 | 3,627,478 |
def _make_grid_spec(storage) -> GridSpec:
"""Make a grid spec based on a storage spec."""
assert 'tile_size' in storage
crs = CRS(storage['crs'])
return GridSpec(crs=crs,
tile_size=[storage['tile_size'][dim] for dim in crs.dimensions],
resolution=[storage['resolution'][dim] for dim in crs.dimensions]) | 37d3b7cd005e4bf8e0d813bdefb0ef367b80dd6c | 3,627,479 |
def gaussian_2d_cylind(shape, center, resolution, scale, sigma=1, min_range_rho=0):
"""Generate gaussian map.
Args:
shape (list[int]): Shape of the map. [y, x]
sigma (float): Sigma to generate gaussian map.
Defaults to 1.
Returns:
np.ndarray: Generated gaussian map. [rho, phi]
"""
m, n = [(ss - 1.) / 2. for ss in shape]
y, x = np.ogrid[-m:m + 1, -n:n + 1]
rho_res, theta_res = resolution[0], resolution[1]
rho = (x + center[0]) * rho_res * scale + min_range_rho
theta_offset = y * theta_res * scale
# h = np.exp(-(rho_center * rho_center + (rho_center + rho_offset) ** 2
# - 2 * rho_center * (rho_center + rho_offset) * np.cos(theta_offset) ) / (2 * sigma * sigma))
rho_center = rho[:, (shape[1] - 1) // 2]
distance_square = rho ** 2 + rho_center ** 2 - 2 * rho * rho_center * np.cos(theta_offset)
h = np.exp(-(distance_square / ((rho_res * scale) ** 2) / (2 * sigma * sigma)))
h[h < np.finfo(h.dtype).eps * h.max()] = 0
return h | 6490bdb3edd7930ac6d0c899338d3487263c8daf | 3,627,480 |
from datetime import datetime
async def create_specific_guid(guid: str, data: GuidIn):
"""
Create a record w/ a guid specified in the path.
Also cleans up expired records & caches the new record.
Raises an exception if you try to overwrite an existing record.
"""
try:
guid = validate_guid(guid)
except ValueError:
raise HTTPException(status_code=400, detail="Invalid guid")
validated = data.dict()
try:
await create_guid_record(guid, validated['name'], validated['expire'])
except Exception as detail:
raise HTTPException(status_code=400, detail=f'{detail}')
# Build serialized response
out = GuidOut(
id=guid,
expire=validated['expire'],
name=validated['name'],
)
# Cache stuff
ttl = validated['expire'] - datetime.now(timezone.utc)
await cache.set(guid, out, ttl=ttl.seconds)
return out | 67d1a3615e54cd330bf22e8846987b71360fde0a | 3,627,481 |
def partition(predicate, values):
"""
Splits the values into two sets, based on the return value of the function
(True/False). e.g.:
>>> partition(lambda x: x > 3, range(5))
[0, 1, 2, 3], [4]
"""
results = ([], [])
for item in values:
results[predicate(item)].append(item)
return results | 27184fd908ab2d214db86b612e2e5cbec9393a07 | 3,627,482 |
def get_colors(color='ALL'):
""" get color palette as a dictionary """
if color == 'ALL':
return color_dict['palette']
return color_dict['palette'][color] | 719fb6ce8a335c80694eb0418eace8ca4b7bc397 | 3,627,483 |
def avoid_line_too_long(pretty_html_text):
"""
detect any line with more than 998 characters
"""
lines = pretty_html_text.split('\n')
new_lines = []
for line in lines:
line_length = len(line)
if line_length >= 998:
# Cut the line in several parts of 900 characters
parts = []
part_size = 900
part_index = 0
while part_size * len(parts) < line_length:
parts.append(line[part_index*part_size:(part_index + 1)*part_size])
part_index = len(parts)
parts = [_replace_from_end(part, ' ', '\n', 1) for part in parts]
new_lines.append(''.join(parts))
else:
new_lines.append(line)
return '\n'.join(new_lines) | 610021c6ee52e0b8cc0f20951383b9f559ce0f59 | 3,627,484 |
def stringify_tags(tags, human_readable=False):
"""
格式化 Beancount 标签列表
"""
if human_readable:
if len(tags) == 0:
return _('无')
return ', '.join(f'#{t}' for t in tags)
return ' '.join(f'#{t}' for t in tags) | c4b7fce339754fbd437972c8de4e3cc38ad9d6f5 | 3,627,485 |
from datetime import datetime
def is_friday():
"""判断是否是周五"""
return datetime.date.today().weekday() == 4 | b8536933c973677366e3b19c19cb57cbc33980c6 | 3,627,486 |
def _parse_exclude_images_commands(commands, experiments, reflections):
"""Parse a list of list of command line options.
e.g. commands = [['1:101:200'], ['0:201:300']]
or commands = [[101:200]] allowable for a single experiment.
builds and returns a list of tuples (exp_id, (start, stop))
"""
ranges_to_remove = []
for com in commands:
vals = com[0].split(":")
if len(vals) == 2:
if len(experiments) > 1:
raise ValueError(
"Exclude images must be in the form experimentnumber:start:stop for multiple experiments"
)
else:
ranges_to_remove.append(
(experiments[0].identifier, (int(vals[0]), int(vals[1])))
)
else:
if len(vals) != 3:
raise ValueError(
"Exclude images must be input in the form experimentnumber:start:stop, or start:stop for a single experiment"
)
dataset_id = int(vals[0])
for table in reflections:
if dataset_id in table.experiment_identifiers():
expid = table.experiment_identifiers()[dataset_id]
ranges_to_remove.append((expid, (int(vals[1]), int(vals[2]))))
break
return ranges_to_remove | e1decbcce826d1fe32a9b5c98b745f43e754a3b4 | 3,627,487 |
def thomson_spec_series(eleckineng, photeng, T, as_pairs=False):
""" Thomson ICS spectrum of secondary photons by series method.
Parameters
----------
eleckineng : ndarray
Incoming electron kinetic energy.
photeng : ndarray
Outgoing photon energy.
T : float
CMB temperature.
as_pairs : bool
If true, treats eleckineng and photeng as a paired list: produces eleckineng.size == photeng.size values. Otherwise, gets the spectrum at each photeng for each eleckineng, returning an array of length eleckineng.size*photeng.size.
Returns
-------
ndarray
dN/(dt dE) of the outgoing photons (dt = 1 s), with abscissa photeng.
Notes
-----
Insert note on the suitability of the method.
"""
print('***** Computing Spectra by Analytic Series... *****')
gamma = 1 + eleckineng/phys.me
# Most accurate way of finding beta when beta is small, I think.
beta = np.sqrt(eleckineng/phys.me*(gamma+1)/gamma**2)
if as_pairs:
lowlim = (1-beta)/(1+beta)*photeng/T
upplim = (1+beta)/(1-beta)*photeng/T
else:
lowlim = np.outer((1-beta)/(1+beta), photeng/T)
upplim = np.outer((1+beta)/(1-beta), photeng/T)
eta = photeng/T
prefac = np.float128(
phys.c*(3/8)*phys.thomson_xsec/(4*gamma**2*beta**6)
* (8*np.pi*T**2/(phys.ele_compton*phys.me)**3)
)
print('Series 1/12...')
F1_low = F1(lowlim, eta)
print('Series 2/12...')
F0_low = F0(lowlim, eta)
print('Series 3/12...')
F_inv_low = F_inv(lowlim, eta)[0]
print('Series 4/12...')
F_log_low = F_log(lowlim, eta)[0]
print('Series 5/12...')
F1_upp = F1(eta, upplim)
print('Series 6/12...')
F0_upp = F0(eta, upplim)
print('Series 7/12...')
F_inv_upp = F_inv(eta, upplim)[0]
print('Series 8/12...')
F_log_upp = F_log(eta, upplim)[0]
print('Series 9/12...')
F2_low = F2(lowlim, eta)[0]
print('Series 10/12...')
F2_upp = F2(eta, upplim)[0]
print('Series 11/12...')
F_x_log_low = F_x_log(lowlim, eta)[0]
print('Series 12/12...')
F_x_log_upp = F_x_log(eta, upplim)[0]
# CMB photon energy less than outgoing photon energy.
# F1_low an array of size [eleckineng, photeng].
# We can take photeng*F1_vec_low for element-wise products.
# In the other dimension, we must take transpose(eleckineng*transpose(x)).
# CMB photon energy lower than outgoing photon energy
term_low_1 = np.transpose(
-(1/gamma**4)*np.transpose(
photeng**2/T**2*F_inv_low
)
)
term_low_2 = np.transpose(
(
(1-beta)*(
beta*(beta**2 + 3) - (1/gamma**2)*(9 - 4*beta**2)
)
- 2/gamma**2*(3-beta**2)
*(np.log1p(beta)-np.log1p(-beta))
)*np.transpose(photeng/T*F0_low)
) - np.transpose(
2/gamma**2*(3 - beta**2)*np.transpose(
photeng/T*(-np.log(photeng/T))*F0_low
)
)
term_low_3 = np.transpose(
-2/gamma**2*(3 - beta**2)*np.transpose(
photeng/T*F_log_low
)
)
term_low_4 = np.transpose(
-(2/gamma**2)*(3 - beta**2)
*(np.log1p(beta)-np.log1p(-beta))*np.transpose(F1_low)
+(2/gamma**2)*(3 - beta**2)*np.transpose(
np.log(photeng/T)*F1_low
)
+(1+beta)*(
beta*(beta**2 + 3) + (1/gamma**2)*(9 - 4*beta**2)
)*np.transpose(F1_low)
)
term_low_5 = np.transpose(
1/gamma**4*np.transpose(F2_low/(photeng/T))
)
term_low_6 = np.transpose(
-2/gamma**2*(3 - beta**2)*np.transpose(F_x_log_low)
)
# CMB photon energy higher than outgoing photon energy
term_high_1 = np.transpose(
(1/gamma**4)*np.transpose(
photeng**2/T**2*F_inv_upp
)
)
term_high_2 = np.transpose(
(
(1+beta)*(
beta*(beta**2 + 3) + (1/gamma**2)*(9 - 4*beta**2)
)
+ (2/gamma**2)*(3-beta**2)
*(np.log1p(-beta)-np.log1p(beta))
)*np.transpose(photeng/T*F0_upp)
) + np.transpose(
2/gamma**2*(3 - beta**2)*np.transpose(
photeng/T*(-np.log(photeng/T))*F0_upp
)
)
term_high_3 = np.transpose(
2/gamma**2*(3 - beta**2)*np.transpose(
photeng/T*F_log_upp
)
)
term_high_4 = np.transpose(
(2/gamma**2)*(3 - beta**2)
*(np.log1p(-beta)-np.log1p(beta))*np.transpose(F1_upp)
+(2/gamma**2)*(3 - beta**2)*np.transpose(
-np.log(photeng/T)*F1_upp
)
+(1-beta)*(
beta*(beta**2 + 3) - (1/gamma**2)*(9 - 4*beta**2)
)*np.transpose(F1_upp)
)
term_high_5 = -np.transpose(
1/gamma**4*np.transpose(F2_upp/(photeng/T))
)
term_high_6 = np.transpose(
2/gamma**2*(3 - beta**2)*np.transpose(F_x_log_upp)
)
testing = False
if testing:
print('***** Diagnostics *****')
print('lowlim: ', lowlim)
print('upplim: ', upplim)
print('photeng/T: ', eta)
print('beta: ', beta)
print('***** epsilon < epsilon_1 *****')
print('term_low_1: ', term_low_1)
print('term_low_2: ', term_low_2)
print('term_low_3: ', term_low_3)
print('term_low_4: ', term_low_4)
print('term_low_5: ', term_low_5)
print('term_low_6: ', term_low_6)
print('***** epsilon > epsilon_1 *****')
print('term_high_1: ', term_high_1)
print('term_high_2: ', term_high_2)
print('term_high_3: ', term_high_3)
print('term_high_4: ', term_high_4)
print('term_high_5: ', term_high_5)
print('term_high_6: ', term_high_6)
print('***** Term Sums *****')
print('term_low_1 + term_high_1: ', term_low_1 + term_high_1)
print('term_low_2 + term_high_2: ', term_low_2 + term_high_2)
print('term_low_3 + term_high_3: ', term_low_3 + term_high_3)
print('term_low_4 + term_high_4: ', term_low_4 + term_high_4)
print('term_low_5 + term_high_5: ', term_low_5 + term_high_5)
print(
'term_low_6 + term_high_6: ',
term_low_6 + term_high_6
)
print('***** Prefactor *****')
print(prefac)
print('***** Total Sum *****')
print(
np.transpose(
prefac*np.transpose(
(term_low_1 + term_high_1)
+ (term_low_2 + term_high_2)
+ (term_low_3 + term_high_3)
+ (term_low_4 + term_high_4)
+ (term_low_5 + term_high_5)
+ (term_low_6 + term_high_6)
)
)
)
print('***** End Diagnostics *****')
# Addition ordered to minimize catastrophic cancellation, but if this is important, you shouldn't be using this method.
print('***** Analytic Series Computation Complete! *****')
return np.transpose(
prefac*np.transpose(
(term_low_1 + term_high_1)
+ (term_low_2 + term_high_2)
+ (term_low_3 + term_high_3)
+ (term_low_4 + term_high_4)
+ (term_low_5 + term_high_5)
+ (term_low_6 + term_high_6)
)
) | a261aa1c344404f71c8a17712865276e28fc5831 | 3,627,488 |
def voxel_grid_sampling(coords: np.ndarray, voxel_size: float) -> np.ndarray:
"""Voxel grid sampling
Args:
coords: coords (N, C)
voxel_size: voxel grid size
Returns:
samples: sample coords (M, C)
"""
N, C = coords.shape
# get voxel indices.
indices_float = coords / voxel_size
indices = indices_float.astype(np.int32)
# calculate the average coordinate of the point for each voxel.
_, voxel_labels = np.unique(indices, axis=0, return_inverse=True)
df = pd.DataFrame(data=np.concatenate(
[voxel_labels[:, np.newaxis], coords], axis=1), columns=np.arange(C+1))
voxel_mean_df = df.groupby(0).mean()
# use average coordinates as samples.
samples = voxel_mean_df.to_numpy()
return samples | 145af0fc17887536ee3ebe466a3920d1abd717e9 | 3,627,489 |
def compute_skewness(data):
""" Skewness of the data (per channel).
Parameters
----------
data : ndarray, shape (n_channels, n_times)
Returns
-------
output : ndarray, shape (n_channels,)
"""
ndim = data.ndim
return stats.skew(data, axis=ndim - 1) | 563f7a10d882b23e9cbb717cae4b9f251de0fd11 | 3,627,490 |
def rfft(x, n=None, axis=-1, norm=None, overwrite_x=False, *, plan=None):
"""Compute the one-dimensional FFT for real input.
The returned array contains the positive frequency components of the
corresponding :func:`fft`, up to and including the Nyquist frequency.
Args:
x (cupy.ndarray): Array to be transformed.
n (None or int): Length of the transformed axis of the output. If ``n``
is not given, the length of the input along the axis specified by
``axis`` is used.
axis (int): Axis over which to compute the FFT.
norm (``"backward"``, ``"ortho"``, or ``"forward"``): Optional keyword
to specify the normalization mode. Default is ``None``, which is
an alias of ``"backward"``.
overwrite_x (bool): If True, the contents of ``x`` can be destroyed.
plan (:class:`cupy.cuda.cufft.Plan1d` or ``None``): a cuFFT plan for
transforming ``x`` over ``axis``, which can be obtained using::
plan = cupyx.scipy.fftpack.get_fft_plan(x, n, axis,
value_type='R2C')
Note that ``plan`` is defaulted to ``None``, meaning CuPy will use
an auto-generated plan behind the scene.
Returns:
cupy.ndarray:
The transformed array.
.. seealso:: :func:`scipy.fft.rfft`
"""
return _fft(x, (n,), (axis,), norm, cufft.CUFFT_FORWARD, 'R2C',
overwrite_x=overwrite_x, plan=plan) | 4ba7bf16788f7587e3a7076612d6a27d75749c7a | 3,627,491 |
from pathlib import Path
def copy_from_table_row(row, clobber=False):
"""Perform a single copy operation using information from a single row of a copy_table.
Args:
param1 (type): The first parameter.
param2 (type): The second parameter.
Returns:
Path: destination path returned by respective copy function.
"""
if row['kind'] == 'dir':
return row["desti_path"].mkdir(parents=True, exist_ok=True)
elif row['kind'] == 'file':
return Path(copy_file(src=row["source_path"], dst=row["desti_path"], clobber=clobber))
elif row['kind'] == 'symlink':
# TODO: write a ``copy_symlink`` after deciding how to treat those.
return Path(copy_file(src=row["source_path"], dst=row["desti_path"], clobber=clobber))
elif row['kind'] == 'other':
# TODO: decide how to treat these.
return Path(copy_file(src=row["source_path"], dst=row["desti_path"], clobber=clobber))
else:
raise NotImplementedError("Gus needs to write a copy function for kind: '{kind}'".format(kind=row['kind'])) | 49ca233700b33bb2d2a643acc65363651caebce9 | 3,627,492 |
def mustache(template, partials=None, **kwargs):
"""Usage:
{{ mustache('path/to/whatever.mustache', key=value, key1=value1.. keyn=valuen) }}
or, with partials
{{ mustache('path/to/whatever.mustache', partials={'partial_name': 'path/to/partial.mustache'}, \
key1=value1.. keyn=valuen) }}
This uses the regular Jinja2 loader to find the templates, so your
*.mustache files will need to be available in that path.
"""
# TODO: cache loaded templates
template = get_template(template)
_partials = None
if partials:
_partials = dict((name, get_template(path)) for name, path in partials.iteritems())
renderer = pystache.Renderer(partials=_partials)
return renderer.render(template, kwargs, encoding='utf-8') | 22af5bc7ff4dbeb6459761c827bba7bd1317edb0 | 3,627,493 |
from typing import List
from typing import Dict
from typing import Any
def from_protos(
proto_list: List[american_option_pb2.AmericanEquityOption],
american_option_config: "AmericanOptionConfig" = None
) -> Dict[str, Any]:
"""Creates a dictionary of preprocessed swap data."""
prepare_fras = {}
for am_option_proto in proto_list:
short_position = am_option_proto.short_position
h, currency = _get_hash(am_option_proto)
expiry_date = am_option_proto.expiry_date
expiry_date = [expiry_date.year,
expiry_date.month,
expiry_date.day]
equity = am_option_proto.equity
contract_amount = instrument_utils.decimal_to_double(
am_option_proto.contract_amount)
business_day_convention = business_days.convention_from_proto_value(
am_option_proto.business_day_convention)
strike = instrument_utils.decimal_to_double(am_option_proto.strike)
calendar = business_days.holiday_from_proto_value(
am_option_proto.bank_holidays)
settlement_days = am_option_proto.settlement_days
is_call_option = am_option_proto.is_call_option
name = am_option_proto.metadata.id
instrument_type = am_option_proto.metadata.instrument_type
if h not in prepare_fras:
prepare_fras[h] = {"short_position": [short_position],
"currency": currency,
"expiry_date": [expiry_date],
"equity": [equity],
"contract_amount": [contract_amount],
"business_day_convention": business_day_convention,
"calendar": calendar,
"strike": [strike],
"is_call_option": [is_call_option],
"settlement_days": [settlement_days],
"american_option_config": american_option_config,
"batch_names": [[name, instrument_type]]}
else:
prepare_fras[h]["short_position"].append(short_position)
prepare_fras[h]["expiry_date"].append(expiry_date)
prepare_fras[h]["equity"].append(equity)
prepare_fras[h]["contract_amount"].append(contract_amount)
prepare_fras[h]["strike"].append(strike)
prepare_fras[h]["is_call_option"].append(is_call_option)
prepare_fras[h]["settlement_days"].append(settlement_days)
prepare_fras[h]["batch_names"].append([name, instrument_type])
return prepare_fras | 20f7bb76873068744b6652966884c62650e1c8a6 | 3,627,494 |
def proxy_capture(self, link_guid, user_agent=''):
"""
start warcprox process. Warcprox is a MITM proxy server and needs to be running
before, during and after phantomjs gets a screenshot.
Create an image from the supplied URL, write it to disk and update our asset model with the path.
The heavy lifting is done by PhantomJS, our headless browser.
This whole function runs with the local dir set to a temp dir by run_in_tempdir().
So we can use local paths for temp files, and they'll just disappear when the function exits.
"""
# basic setup
link = Link.objects.get(guid=link_guid)
target_url = link.submitted_url
# Override user_agent for now, since PhantomJS doesn't like some user agents.
# This user agent is the Chrome on Linux that's most like PhantomJS 1.9.8.
user_agent = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/534.36 (KHTML, like Gecko) Chrome/13.0.766.0 Safari/534.36"
print "%s: Fetching %s" % (link_guid, target_url)
# suppress verbose warcprox logs
logging.disable(logging.INFO)
# Set up an exception we can trigger to halt capture and release all the resources involved.
class HaltCaptureException(Exception):
pass
meta_thread = browser = robots_txt_thread = warcprox_controller = warcprox_thread = favicon_capture_url = None
have_warc = False
try:
# create a request handler class that counts unique requests and responses
unique_requests = set()
unique_responses = set()
count_lock = threading.Lock()
class CountingRequestHandler(WarcProxyHandler):
def _proxy_request(self):
with count_lock:
unique_requests.add(self.url)
WarcProxyHandler._proxy_request(self)
with count_lock:
unique_responses.add(self.url)
# connect warcprox to an open port
warcprox_port = 27500
recorded_url_queue = queue.Queue()
for i in xrange(500):
try:
proxy = WarcProxy(
server_address=("127.0.0.1", warcprox_port),
recorded_url_q=recorded_url_queue,
req_handler_class=CountingRequestHandler
)
break
except socket_error as e:
if e.errno != errno.EADDRINUSE:
raise
warcprox_port += 1
else:
raise self.retry(exc=Exception("WarcProx couldn't find an open port."))
proxy_address = "127.0.0.1:%s" % warcprox_port
# set up requests getter for one-off requests outside of selenium
def proxied_get_request(url):
return requests.get(url,
headers={'User-Agent': user_agent},
proxies={'http': 'http://' + proxy_address, 'https': 'http://' + proxy_address},
verify=False)
# start warcprox in the background
warc_writer = WarcWriter(gzip=True, port=warcprox_port)
warc_writer_thread = WarcWriterThread(recorded_url_q=recorded_url_queue, warc_writer=warc_writer)
warcprox_controller = WarcproxController(proxy, warc_writer_thread)
warcprox_thread = threading.Thread(target=warcprox_controller.run_until_shutdown, name="warcprox", args=())
warcprox_thread.start()
print "WarcProx opened."
# fetch page in the background
print "Fetching url."
browser = get_browser(user_agent, proxy_address, proxy.ca.ca_file)
browser.set_window_size(1024, 800)
start_time = time.time()
page_load_thread = threading.Thread(target=browser.get, args=(target_url,)) # returns after onload
page_load_thread.start()
page_load_thread.join(ONLOAD_EVENT_TIMEOUT)
# wait for the HAR log to report that at least one resource has successfully loaded
while True:
har_log_entries = json.loads(browser.get_log('har')[0]['message'])['log']['entries']
if har_log_entries:
break
if time.time() - start_time > RESOURCE_LOAD_TIMEOUT:
raise HaltCaptureException
time.sleep(1)
# use the HAR log to retrieve the URL we ended up, after any forwards,
# and the content type.
content_type = ''
for header in har_log_entries[0]['response']['headers']:
if header['name'].lower() == 'content-type':
content_type = header['value'].lower()
break
content_url = har_log_entries[0]['request']['url']
have_html = content_type and content_type.startswith('text/html')
print "Finished fetching url."
# get favicon urls
favicon_urls = []
if have_html:
favicons = browser.find_elements_by_xpath('//link[@rel="icon" or @rel="shortcut icon"]')
for candidate_favicon in favicons:
if candidate_favicon.get_attribute('href'):
candidate_favicon_url = urlparse.urljoin(content_url, candidate_favicon.get_attribute('href'))
favicon_urls.append(candidate_favicon_url)
# favicon_extension = candidate_favicon_url.rsplit('.',1)[-1]
# if favicon_extension in ['ico', 'gif', 'jpg', 'jpeg', 'png']:
favicon_urls.append(urlparse.urljoin(content_url, '/favicon.ico'))
# Here we fetch everything in the page that's marked as a favicon, for archival purposes.
# But we only record a favicon as our favicon_capture_url if it passes a mimetype whitelist.
for favicon_url in favicon_urls:
print "Fetching favicon from %s ..." % favicon_url
try:
favicon_response = proxied_get_request(favicon_url)
assert favicon_response.ok
except (requests.ConnectionError, requests.Timeout, AssertionError):
continue
# apply mime type whitelist
if not favicon_response.headers.get('content-type', '').split(';')[0] in VALID_FAVICON_MIME_TYPES:
continue
# record the first valid favicon as our favicon_capture_url
if not favicon_capture_url:
favicon_capture_url = favicon_url
if not favicon_capture_url:
print "Couldn't get favicon"
# fetch robots.txt in the background
def robots_txt_thread():
print "Fetching robots.txt ..."
robots_txt_location = urlparse.urljoin(content_url, '/robots.txt')
try:
robots_txt_response = proxied_get_request(robots_txt_location)
assert robots_txt_response.ok
except (requests.ConnectionError, requests.Timeout, AssertionError):
print "Couldn't reach robots.txt"
return
# We only want to respect robots.txt if Perma is specifically asked not to archive (we're not a crawler)
if 'Perma' in robots_txt_response.content:
# We found Perma specifically mentioned
rp = robotparser.RobotFileParser()
rp.parse([line.strip() for line in robots_txt_response.content.split('\n')])
if not rp.can_fetch('Perma', target_url):
save_fields(link, dark_archived_robots_txt_blocked=True)
print "Robots.txt fetched."
robots_txt_thread = threading.Thread(target=robots_txt_thread, name="robots")
robots_txt_thread.start()
if have_html:
# get page title
print "Getting title."
if browser.title:
save_fields(link, submitted_title=browser.title)
# check meta tags
# (run this in a thread and give it long enough to find the tags, but then let other stuff proceed)
print "Checking meta tags."
def meta_thread():
# get all meta tags
meta_tags = browser.find_elements_by_tag_name('meta')
# first look for <meta name='perma'>
meta_tag = next((tag for tag in meta_tags if tag.get_attribute('name').lower()=='perma'), None)
# else look for <meta name='robots'>
if not meta_tag:
meta_tag = next((tag for tag in meta_tags if tag.get_attribute('name').lower() == 'robots'), None)
# if we found a relevant meta tag, check for noarchive
if meta_tag and 'noarchive' in meta_tag.get_attribute("content").lower():
save_fields(link, dark_archived_robots_txt_blocked=True)
print "Meta found, darchiving"
meta_thread = threading.Thread(target=meta_thread)
meta_thread.start()
meta_thread.join(ELEMENT_DISCOVERY_TIMEOUT*2)
# scroll to bottom of page and back up, in case that prompts anything else to load
try:
browser.execute_script("window.scrollTo(0, document.body.scrollHeight);")
browser.execute_script("window.scrollTo(0, 0);")
except WebDriverException:
pass
# make sure all requests are finished
print "Waiting for post-load requests."
start_time = time.time()
time.sleep(min(AFTER_LOAD_TIMEOUT, 5))
while len(unique_responses) < len(unique_requests):
print "%s/%s finished" % (len(unique_responses), len(unique_requests))
if time.time() - start_time > AFTER_LOAD_TIMEOUT:
print "Waited %s seconds to finish post-load requests -- giving up." % AFTER_LOAD_TIMEOUT
break
time.sleep(.5)
# get page size to decide whether to take a screenshot
capture_screenshot = False
try:
root_element = browser.find_element_by_tag_name('body')
except NoSuchElementException:
try:
root_element = browser.find_element_by_tag_name('frameset')
except NoSuchElementException:
root_element = None
if root_element:
page_size = root_element.size
pixel_count = page_size['width'] * page_size['height']
capture_screenshot = pixel_count < settings.MAX_IMAGE_SIZE
# take screenshot after all requests done
if capture_screenshot:
print "Taking screenshot."
screenshot_data = browser.get_screenshot_as_png()
link.screenshot_capture.write_warc_resource_record(screenshot_data)
save_fields(link.screenshot_capture, status='success')
else:
print "Not saving screenshots! Page size is %s pixels." % pixel_count
save_fields(link.screenshot_capture, status='failed')
else:
# no screenshot if not HTML
save_fields(link.screenshot_capture, status='failed')
have_warc = True
except HaltCaptureException:
pass
except Exception as e:
print e
raise
finally:
# teardown (have to do this before save to make sure WARC is done writing):
print "Shutting down browser and proxies."
if browser:
browser.quit() # shut down phantomjs
# This can be removed when this bugfix ships in selenium:
# https://code.google.com/p/selenium/issues/detail?id=8498
browser.service.process.stdin.close()
if meta_thread:
meta_thread.join() # wait until meta thread is done
if robots_txt_thread:
robots_txt_thread.join() # wait until robots thread is done
if warcprox_controller:
warcprox_controller.stop.set() # send signal to shut down warc thread
if warcprox_thread:
warcprox_thread.join() # wait until warcprox thread is done writing out warc
# un-suppress logging
logging.disable(logging.NOTSET)
# save generated warc file
if have_warc:
print "Saving WARC."
try:
temp_warc_path = os.path.join(warc_writer.directory,
warc_writer._f_finalname)
with open(temp_warc_path, 'rb') as warc_file:
link.write_warc_raw_data(warc_file)
save_fields(link.primary_capture, status='success', content_type=content_type)
# We only save the Capture for the favicon once the warc is stored,
# since the data for the favicon lives in the warc.
if favicon_capture_url:
Capture(
link=link,
role='favicon',
status='success',
record_type='response',
url=favicon_capture_url
).save()
print "Saved favicon at %s" % favicon_capture_url
print "Writing CDX lines to the DB"
CDXLine.objects.create_all_from_link(link)
except Exception as e:
print "Web Archive File creation failed for %s: %s" % (target_url, e)
save_fields(link.primary_capture, warc_capture='failed')
print "%s capture done." % link_guid | 6174b416ee001b35075d9d89f374dddadd839475 | 3,627,495 |
def create_operator(key, value, context):
"""
Create operator instance along with its arguments
Args:
key:
operator name
value:
operator arguments
context (Dict):
Python dictionary holding all imported modules
Returns:
Tasrif pipeline object
"""
key = key.replace("map", "map_iterable")
operator_spec = key[1:]
operator = None
if isinstance(value, list):
if operator_spec in ("sequence", "compose"):
operator = _get_operator(operator_spec, context)(value)
else:
args = []
kwargs = {}
for item in value:
if isinstance(item, tuple):
kwargs[item[0]] = item[1]
else:
args.append(item)
operator = _get_operator(operator_spec, context)(*args, **kwargs)
elif isinstance(value, dict):
operator = _get_operator(operator_spec, context)(**value)
else:
if value:
operator = _get_operator(operator_spec, context)(value)
else:
operator = _get_operator(operator_spec, context)()
return operator | 89a1a8603818faae1704d6e74ff2bc5b026a1fef | 3,627,496 |
import logging
def get_combined_segmentation_points(ts, loc_df, time_query, filters_in_df, filter_methods):
"""
We can have mixed filters in a particular time range for multiple reasons.
a) user switches phones from one platform to another
b) user signs in simultaneously to phones on both platforms
case (b) is not a supported case, and can happen even with the same platform - i.e. a user can sign in to two devices of the same platform
(e.g. tablet and phone) with the same ID and then the trips won't really
match up.
So let's handle case (a), which should be a supported use case,
by creating separate time queries for the various types of filters and
combining them based on the order in which the filters appear in the dataframe.
Note that another option is to filter the dataframe inside the segmentation method
but then you would have to figure out how to recombine the segmentation points
maybe sort the resulting segmentation points by start_ts?
That might be the easier option after all
"""
segmentation_map = {}
for curr_filter in filters_in_df:
time_query.startTs = loc_df[loc_df["filter"] == curr_filter].head(1).iloc[0].ts
time_query.endTs = loc_df[loc_df["filter"] == curr_filter].tail(1).iloc[0].ts
logging.debug("for filter %s, startTs = %d and endTs = %d" %
(curr_filter, time_query.startTs, time_query.endTs))
segmentation_map[time_query.startTs] = filter_methods[curr_filter].segment_into_trips(ts, time_query)
logging.debug("After filtering, segmentation_map has keys %s" % list(segmentation_map.keys()))
sortedStartTsList = sorted(segmentation_map.keys())
segmentation_points = []
for startTs in sortedStartTsList:
segmentation_points.extend(segmentation_map[startTs])
return segmentation_points | 00caa605fc9945efa6e852c9da4fcbdaaba606e9 | 3,627,497 |
import os
import re
def is_experiment(names):
"""[Check names follows experiment rules]
Args:
names ([str]): [names founds in the path]
Returns:
[type]: [True or false ]
"""
regexps = get_regular_expressions(os.path.join(dir_rules, 'experiment_rules.json'))
conditions = []
if type(names) == str:
conditions.append([re.compile(x).search(names) is not None
for x in regexps])
elif type(names) == list:
for word in names:
conditions.append([re.compile(x).search(word) is not None
for x in regexps])
return any(flatten(conditions)) | 7e88f317425332ab9b4efa0e14a6ec69153ede50 | 3,627,498 |
def est_positif_non_nul(valeur: any):
"""Indique si la valeur fournie est un entier positif non nul."""
if valeur is not None:
if isinstance(valeur, int):
return valeur > 0
if isinstance(valeur, float) and valeur.is_integer():
return valeur > 0
return False | 03e799f73cfc3e112d61d638d6c120ede049991d | 3,627,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.