content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
async def test_t3_gc_will_not_intervene_for_regular_users_and_their_resources(
simcore_services_ready,
client,
socketio_client_factory: Callable,
aiopg_engine,
fake_project: Dict,
tests_data_dir: Path,
):
"""after a USER disconnects the GC will remove none of its projects or templates nor the user itself"""
number_of_projects = 5
number_of_templates = 5
logged_user = await login_user(client)
user_projects = [
await new_project(client, logged_user, tests_data_dir)
for _ in range(number_of_projects)
]
user_template_projects = [
await get_template_project(client, logged_user, fake_project)
for _ in range(number_of_templates)
]
async def assert_projects_and_users_are_present():
# check user and projects and templates are still in the DB
assert await assert_user_in_database(aiopg_engine, logged_user) is True
for project in user_projects:
assert await assert_project_in_database(aiopg_engine, project) is True
for template in user_template_projects:
assert await assert_project_in_database(aiopg_engine, template) is True
assert await assert_users_count(aiopg_engine, 1) is True
expected_count = number_of_projects + number_of_templates
assert await assert_projects_count(aiopg_engine, expected_count) is True
# connect the user and wait for gc
sio_connection_data = await connect_to_socketio(
client, logged_user, socketio_client_factory
)
await asyncio.sleep(WAIT_FOR_COMPLETE_GC_CYCLE)
await assert_projects_and_users_are_present()
await disconnect_user_from_socketio(client, sio_connection_data)
await asyncio.sleep(WAIT_FOR_COMPLETE_GC_CYCLE)
await assert_projects_and_users_are_present()
| 17,500
|
def extraction_closure(video_root, frame_root):
"""Closure that returns function to extract frames for video list."""
def func(video_list):
for video in video_list:
frame_dir = video.rstrip('.mp4')
frame_path = os.path.join(frame_root, frame_dir)
os.makedirs(frame_path, exist_ok=True)
extract_frames(video, video_root, frame_root)
return func
| 17,501
|
def superimpose_tetrahedrons(listofvecs):
"""superimpose a list of different tetrahedrons (in the form of 4 vectors), and plot 3D figure of the superimposition."""
fig = plt.figure()
vecs0 = np.mean(np.array(listofvecs), axis=0)#np.mean(listofvecs)
for vecs in listofvecs:
rotated_vecs = match_tetrahedron(vecs0, vecs)
draw_tetrahedron(fig, rotated_vecs)
plt.show()
| 17,502
|
def make_input_data_kmls(rundata):
"""
Produce kml files for the computational domain, all gauges and regions
specified, and all topo and dtopo files specified in rundata.
This can be used, e.g. by adding the lines
from clawpack.geoclaw import kmltools
kmltools.make_input_data_kmls(rundata)
to the end of a `setrun.py` file so that `make data` will generate all
kml files in addition to the `*.data` files.
"""
import os
from . import topotools, dtopotools
regions2kml(rundata, combined=False)
gauges2kml(rundata)
topofiles = rundata.topo_data.topofiles
for f in topofiles:
topo_file_name = f[-1]
topo_type = f[0]
topo2kml(topo_file_name, topo_type)
dtopofiles = rundata.dtopo_data.dtopofiles
for f in dtopofiles:
dtopo_file_name = f[-1]
dtopo_type = f[0]
dtopo2kml(dtopo_file_name, dtopo_type)
| 17,503
|
def std(input, axis=None, keepdim=False, unbiased=True, out=None, name=None):
"""
:alias_main: paddle.std
:alias: paddle.std,paddle.tensor.std,paddle.tensor.stat.std
Computes the standard-deviation of the input Variable's elements along the specified
axis.
Args:
input (Variable): The input Variable to be computed standard-deviation, with data
type float32 and float64 supported.
axis (list|int, optional): The axis along which the standard-deviation is computed.
If `None`, compute the standard-deviation over all elements of :attr:`input`
and return a Variable with a single element, otherwise it must be in
the range :math:`[-rank(input), rank(input))`. If :math:`axis[i] < 0`,
the axis to compute is :math:`rank(input) + axis[i]`.
keepdim (bool, optional): Whether to reserve the reduced dimensions in
the output Variable. The dimensions in :attr:`axis` will be squeezed
and the result Variable will have :attr:`len(axis)` fewer dimensions
than the :attr:`input` unless :attr:`keepdim` is true, default False.
unbiased (bool, optional): Whether to compute standard-deviation via the unbiased
estimator, in which the divisor used in the computation is
:math:`N - 1`, where :math:`N` represents the number of elements
along :attr:`axis`, otherwise the divisor is :math:`N`. Default True.
out (Variable, optional): Alternate output Variable to store the result
standard-deviation . Default None.
name (str, optional): The name for this layer. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`. Default None.
Returns:
Variable: The result standard-deviation with the same dtype as :attr:`input`.
If :attr:`out = None`, returns a new Variable containing the
standard-deviation , otherwise returns a reference to the output Variable.
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
# x is a Tensor variable with following elements:
# [[0.2, 0.3, 0.5, 0.9]
# [0.1, 0.2, 0.6, 0.7]]
# Each example is followed by the corresponding output tensor.
x = fluid.data(name='x', shape=[2, 4], dtype='float32')
paddle.std(x) # [0.28252685]
paddle.std(x, axis=[0]) # [0.0707107, 0.07071075, 0.07071064, 0.1414217]
paddle.std(x, axis=[-1]) # [0.30956957, 0.29439208]
"""
check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'std')
tmp = var(input, axis=axis, keepdim=keepdim, unbiased=unbiased, name=name)
tmp = layers.sqrt(tmp)
if out is not None:
layers.assign(input=tmp, output=out)
return out
else:
return tmp
| 17,504
|
def predict_model(model_name, data_file_name):
"""This function predicts house prices based on input data"""
model_path = os.path.join(config.TRAINED_MODEL_DIR, model_name)
data_file_path = os.path.join(os.path.join(config.DATA_DIR, data_file_name))
pipe = joblib.load(model_path)
data = pd.read_csv(data_file_path)
prediction = pipe.predict(data)
return prediction
| 17,505
|
def test_tagging():
"""Test the tagging functionality of this extension."""
try:
# TODO: serial.save should be able to take an open file-like object so
# we can direct its output to a StringIO or something and not need to
# screw around like this in tests that don't actually need to touch
# the filesystem. /dev/null would work but the test would fail on
# Windows.
fd, fn = tempfile.mkstemp(suffix='.pkl')
os.close(fd)
# Test that the default key gets created.
def_model = MockModel()
def_model.monitor = MockMonitor()
def_ext = MonitorBasedSaveBest(channel_name='foobar', save_path=fn)
def_ext.setup(def_model, None, None)
assert 'MonitorBasedSaveBest' in def_model.tag
# Test with a custom key.
model = MockModel()
model.monitor = MockMonitor()
model.monitor.channels['foobar'] = MockChannel()
ext = MonitorBasedSaveBest(channel_name='foobar', tag_key='test123',
save_path=fn)
# Best cost is initially infinity.
ext.setup(model, None, None)
assert model.tag['test123']['best_cost'] == float("inf")
# Best cost after one iteration.
model.monitor.channels['foobar'].val_record.append(5.0)
ext.on_monitor(model, None, None)
assert model.tag['test123']['best_cost'] == 5.0
# Best cost after a second, worse iteration.
model.monitor.channels['foobar'].val_record.append(7.0)
ext.on_monitor(model, None, None)
assert model.tag['test123']['best_cost'] == 5.0
# Best cost after a third iteration better than 2 but worse than 1.
model.monitor.channels['foobar'].val_record.append(6.0)
ext.on_monitor(model, None, None)
assert model.tag['test123']['best_cost'] == 5.0
# Best cost after a fourth, better iteration.
model.monitor.channels['foobar'].val_record.append(3.0)
ext.on_monitor(model, None, None)
assert model.tag['test123']['best_cost'] == 3.0
finally:
os.remove(fn)
| 17,506
|
def write_yaml(data, path):
"""Helper function to write data to a YAML file."""
path = Path(path)
log.info('Writing {}'.format(path))
with path.open('w') as fh:
ruamel.yaml.round_trip_dump(data, fh)
| 17,507
|
def check_file_behaviour(file_hash):
"""
Returns the file execution report.
"""
params = {
'hash': file_hash
}
api_endpoint = 'file/behaviour'
return http_request('GET', api_endpoint, params, DEFAULT_HEADERS)
| 17,508
|
def usage(error=''):
"""
Application usage
Args:
error (string): error message to display
"""
if len(error) > 0:
print(u'Error: %s' % error)
print(u'')
print(u'Usage: remotedev -E|--execenv -D|--devenv <-c|--conf "config filepath"> <-p|--prof "profile name"> <-d|--debug> <-h|--help>')
print(u' -E|--execenv: launch remotedev with execution env behavior, send updated files from mapped directories to development env and send log messages.')
print(u' -D|--devenv: launch remotedev with development env behavior, send files from your cloned repo to remote.')
print(u' -c|--conf: configuration filepath. If not specify use user home dir one')
print(u' -p|--prof: profile name to launch (doesn\'t launch wizard)')
print(u' -d|--debug: enable debug.')
print(u' -v|--version: display version.')
print(u' -h|--help: display this help.')
| 17,509
|
def is_access_group(outter_key, inner_key) -> None:
"""Prints access-group """
values = {}
if outter_key == "access-group":
if inner_key.get('index') is not None:
values['index'] = ', '.join(is_instance(inner_key.get('index', {})))
elif inner_key.get('name') is not None:
values['name'] = ', '.join(is_instance(inner_key.get('name', {})))
return values
| 17,510
|
def test_brain_models():
"""
Tests the introspection and creation of CIFTI-2 BrainModelAxis axes
"""
bml = list(get_brain_models())
assert len(bml[0]) == 3
assert (bml[0].vertex == -1).all()
assert (bml[0].voxel == [[0, 1, 2], [0, 4, 0], [0, 4, 2]]).all()
assert bml[0][1][0] == 'CIFTI_MODEL_TYPE_VOXELS'
assert (bml[0][1][1] == [0, 4, 0]).all()
assert bml[0][1][2] == axes.BrainModelAxis.to_cifti_brain_structure_name('thalamus_right')
assert len(bml[1]) == 4
assert (bml[1].vertex == -1).all()
assert (bml[1].voxel == [[0, 0, 0], [0, 1, 2], [0, 4, 0], [0, 4, 2]]).all()
assert len(bml[2]) == 3
assert (bml[2].voxel == -1).all()
assert (bml[2].vertex == [0, 5, 10]).all()
assert bml[2][1] == ('CIFTI_MODEL_TYPE_SURFACE', 5, 'CIFTI_STRUCTURE_CORTEX_LEFT')
assert len(bml[3]) == 4
assert (bml[3].voxel == -1).all()
assert (bml[3].vertex == [0, 5, 10, 13]).all()
assert bml[4][1] == ('CIFTI_MODEL_TYPE_SURFACE', 9, 'CIFTI_STRUCTURE_CORTEX_RIGHT')
assert len(bml[4]) == 3
assert (bml[4].voxel == -1).all()
assert (bml[4].vertex == [2, 9, 14]).all()
for bm, label, is_surface in zip(bml, ['ThalamusRight', 'Other', 'cortex_left', 'Other'],
(False, False, True, True)):
assert np.all(bm.surface_mask == ~bm.volume_mask)
structures = list(bm.iter_structures())
assert len(structures) == 1
name = structures[0][0]
assert name == axes.BrainModelAxis.to_cifti_brain_structure_name(label)
if is_surface:
assert bm.nvertices[name] == 15
else:
assert name not in bm.nvertices
assert (bm.affine == rand_affine).all()
assert bm.volume_shape == vol_shape
bmt = bml[0] + bml[1] + bml[2]
assert len(bmt) == 10
structures = list(bmt.iter_structures())
assert len(structures) == 3
for bm, (name, _, bm_split) in zip(bml[:3], structures):
assert bm == bm_split
assert (bm_split.name == name).all()
assert bm == bmt[bmt.name == bm.name[0]]
assert bm == bmt[np.where(bmt.name == bm.name[0])]
bmt = bmt + bml[2]
assert len(bmt) == 13
structures = list(bmt.iter_structures())
assert len(structures) == 3
assert len(structures[-1][2]) == 6
# break brain model
bmt.affine = np.eye(4)
with pytest.raises(ValueError):
bmt.affine = np.eye(3)
with pytest.raises(ValueError):
bmt.affine = np.eye(4).flatten()
bmt.volume_shape = (5, 3, 1)
with pytest.raises(ValueError):
bmt.volume_shape = (5., 3, 1)
with pytest.raises(ValueError):
bmt.volume_shape = (5, 3, 1, 4)
with pytest.raises(IndexError):
bmt['thalamus_left']
# Test the constructor
bm_vox = axes.BrainModelAxis('thalamus_left', voxel=np.ones((5, 3), dtype=int), affine=np.eye(4), volume_shape=(2, 3, 4))
assert np.all(bm_vox.name == ['CIFTI_STRUCTURE_THALAMUS_LEFT'] * 5)
assert np.array_equal(bm_vox.vertex, np.full(5, -1))
assert np.array_equal(bm_vox.voxel, np.full((5, 3), 1))
with pytest.raises(ValueError):
# no volume shape
axes.BrainModelAxis('thalamus_left', voxel=np.ones((5, 3), dtype=int), affine=np.eye(4))
with pytest.raises(ValueError):
# no affine
axes.BrainModelAxis('thalamus_left', voxel=np.ones((5, 3), dtype=int), volume_shape=(2, 3, 4))
with pytest.raises(ValueError):
# incorrect name
axes.BrainModelAxis('random_name', voxel=np.ones((5, 3), dtype=int), affine=np.eye(4), volume_shape=(2, 3, 4))
with pytest.raises(ValueError):
# negative voxel indices
axes.BrainModelAxis('thalamus_left', voxel=-np.ones((5, 3), dtype=int), affine=np.eye(4), volume_shape=(2, 3, 4))
with pytest.raises(ValueError):
# no voxels or vertices
axes.BrainModelAxis('thalamus_left', affine=np.eye(4), volume_shape=(2, 3, 4))
with pytest.raises(ValueError):
# incorrect voxel shape
axes.BrainModelAxis('thalamus_left', voxel=np.ones((5, 2), dtype=int), affine=np.eye(4), volume_shape=(2, 3, 4))
bm_vertex = axes.BrainModelAxis('cortex_left', vertex=np.ones(5, dtype=int), nvertices={'cortex_left': 20})
assert np.array_equal(bm_vertex.name, ['CIFTI_STRUCTURE_CORTEX_LEFT'] * 5)
assert np.array_equal(bm_vertex.vertex, np.full(5, 1))
assert np.array_equal(bm_vertex.voxel, np.full((5, 3), -1))
with pytest.raises(ValueError):
axes.BrainModelAxis('cortex_left', vertex=np.ones(5, dtype=int))
with pytest.raises(ValueError):
axes.BrainModelAxis('cortex_left', vertex=np.ones(5, dtype=int), nvertices={'cortex_right': 20})
with pytest.raises(ValueError):
axes.BrainModelAxis('cortex_left', vertex=-np.ones(5, dtype=int), nvertices={'cortex_left': 20})
# test from_mask errors
with pytest.raises(ValueError):
# affine should be 4x4 matrix
axes.BrainModelAxis.from_mask(np.arange(5) > 2, affine=np.ones(5))
with pytest.raises(ValueError):
# only 1D or 3D masks accepted
axes.BrainModelAxis.from_mask(np.ones((5, 3)))
# tests error in adding together or combining as ParcelsAxis
bm_vox = axes.BrainModelAxis('thalamus_left', voxel=np.ones((5, 3), dtype=int),
affine=np.eye(4), volume_shape=(2, 3, 4))
bm_vox + bm_vox
assert (bm_vertex + bm_vox)[:bm_vertex.size] == bm_vertex
assert (bm_vox + bm_vertex)[:bm_vox.size] == bm_vox
for bm_added in (bm_vox + bm_vertex, bm_vertex + bm_vox):
assert bm_added.nvertices == bm_vertex.nvertices
assert np.all(bm_added.affine == bm_vox.affine)
assert bm_added.volume_shape == bm_vox.volume_shape
axes.ParcelsAxis.from_brain_models([('a', bm_vox), ('b', bm_vox)])
with pytest.raises(Exception):
bm_vox + get_label()
bm_other_shape = axes.BrainModelAxis('thalamus_left', voxel=np.ones((5, 3), dtype=int),
affine=np.eye(4), volume_shape=(4, 3, 4))
with pytest.raises(ValueError):
bm_vox + bm_other_shape
with pytest.raises(ValueError):
axes.ParcelsAxis.from_brain_models([('a', bm_vox), ('b', bm_other_shape)])
bm_other_affine = axes.BrainModelAxis('thalamus_left', voxel=np.ones((5, 3), dtype=int),
affine=np.eye(4) * 2, volume_shape=(2, 3, 4))
with pytest.raises(ValueError):
bm_vox + bm_other_affine
with pytest.raises(ValueError):
axes.ParcelsAxis.from_brain_models([('a', bm_vox), ('b', bm_other_affine)])
bm_vertex = axes.BrainModelAxis('cortex_left', vertex=np.ones(5, dtype=int), nvertices={'cortex_left': 20})
bm_other_number = axes.BrainModelAxis('cortex_left', vertex=np.ones(5, dtype=int), nvertices={'cortex_left': 30})
with pytest.raises(ValueError):
bm_vertex + bm_other_number
with pytest.raises(ValueError):
axes.ParcelsAxis.from_brain_models([('a', bm_vertex), ('b', bm_other_number)])
# test equalities
bm_vox = axes.BrainModelAxis('thalamus_left', voxel=np.ones((5, 3), dtype=int),
affine=np.eye(4), volume_shape=(2, 3, 4))
bm_other = deepcopy(bm_vox)
assert bm_vox == bm_other
bm_other.voxel[1, 0] = 0
assert bm_vox != bm_other
bm_other = deepcopy(bm_vox)
bm_other.vertex[1] = 10
assert bm_vox == bm_other, 'vertices are ignored in volumetric BrainModelAxis'
bm_other = deepcopy(bm_vox)
bm_other.name[1] = 'BRAIN_STRUCTURE_OTHER'
assert bm_vox != bm_other
bm_other = deepcopy(bm_vox)
bm_other.affine[0, 0] = 10
assert bm_vox != bm_other
bm_other = deepcopy(bm_vox)
bm_other.affine = None
assert bm_vox != bm_other
assert bm_other != bm_vox
bm_other = deepcopy(bm_vox)
bm_other.volume_shape = (10, 3, 4)
assert bm_vox != bm_other
bm_vertex = axes.BrainModelAxis('cortex_left', vertex=np.ones(5, dtype=int), nvertices={'cortex_left': 20})
bm_other = deepcopy(bm_vertex)
assert bm_vertex == bm_other
bm_other.voxel[1, 0] = 0
assert bm_vertex == bm_other, 'voxels are ignored in surface BrainModelAxis'
bm_other = deepcopy(bm_vertex)
bm_other.vertex[1] = 10
assert bm_vertex != bm_other
bm_other = deepcopy(bm_vertex)
bm_other.name[1] = 'BRAIN_STRUCTURE_CORTEX_RIGHT'
assert bm_vertex != bm_other
bm_other = deepcopy(bm_vertex)
bm_other.nvertices['BRAIN_STRUCTURE_CORTEX_LEFT'] = 50
assert bm_vertex != bm_other
bm_other = deepcopy(bm_vertex)
bm_other.nvertices['BRAIN_STRUCTURE_CORTEX_RIGHT'] = 20
assert bm_vertex != bm_other
assert bm_vox != get_parcels()
assert bm_vertex != get_parcels()
| 17,511
|
def update_documentation():
"""
Update the documentation on the current host.
This method is host agnostic
"""
root_path = '/opt/webshop-demo'
with cd(root_path):
with prefix("source %s/bin/activate" % root_path):
run('pip install sphinx_rtd_theme')
with cd('nereid-webshop'):
run('python setup.py build_sphinx')
| 17,512
|
def SLC_deramp(SLC_1, SLC_par1, SLC_2, SLC_par2, mode, dop_ph='-', logpath=None):
"""
| Calculate and subtract Doppler phase from an SLC image
| Copyright 2016, Gamma Remote Sensing, v1.5 4-Feb-2016 clw
Parameters
----------
SLC-1:
(input) SLC data file (fcomplex or scomplex format)
SLC_par1:
(input) SLC parameter file with Doppler information
SLC-2:
(output) SLC with Doppler phase removed (or added)
SLC_par2:
(output) SLC parameter file for the output SLC
mode:
mode of operation:
* 0: subtract Doppler phase ramp (deramp)
* 1: add Doppler phase ramp (reramp)
dop_ph:
(output) Doppler phase (FLOAT)
Note: SLC_par1 contains the Doppler polynomial that is used to calculate the Doppler phase ramp
logpath: str or None
a directory to write command logfiles to
"""
process(['/cluster/GAMMA_SOFTWARE-20161207/ISP/bin/SLC_deramp', SLC_1, SLC_par1, SLC_2, SLC_par2, mode, dop_ph], logpath=logpath)
| 17,513
|
def lidar_to_cam_frame(xyz_lidar, frame_calib):
"""Transforms points in lidar frame to the reference camera (cam 0) frame
Args:
xyz_lidar: points in lidar frame
frame_calib: FrameCalib frame calibration
Returns:
ret_xyz: (N, 3) points in reference camera (cam 0) frame
"""
# Pad the r0_rect matrix to a 4x4
r0_rect_mat = frame_calib.r0_rect
r0_rect_mat = np.pad(r0_rect_mat, ((0, 1), (0, 1)),
'constant', constant_values=0)
r0_rect_mat[3, 3] = 1
# Pad the vel_to_cam matrix to a 4x4
tf_mat = frame_calib.velo_to_cam
tf_mat = np.pad(tf_mat, ((0, 1), (0, 0)),
'constant', constant_values=0)
tf_mat[3, 3] = 1
# Pad the point cloud with 1's for the transformation matrix multiplication
one_pad = np.ones(xyz_lidar.shape[0]).reshape(-1, 1)
xyz_lidar = np.append(xyz_lidar, one_pad, axis=1)
# p_cam = P2 * (R0_rect * Tr_velo_to_cam * p_velo)
rectified = np.dot(r0_rect_mat, tf_mat)
ret_xyz = np.dot(rectified, xyz_lidar.T)
# Return (N, 3) points
return ret_xyz[0:3].T
| 17,514
|
def create_logger(model_name: str, saved_path: str):
"""Create logger for both console info and saved info
"""
logger = logging.getLogger(model_name)
logger.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
fh = logging.FileHandler(f"{saved_path}/{model_name}.log")
fh.setLevel(logging.DEBUG)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
# create formatter and add it to the handlers
formatter = RuntimeFormatter('[Time: %(asctime)s] - [PID: %(process)d] - [Model: %(name)s] \n%(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(fh)
logger.addHandler(ch)
return logger
| 17,515
|
def hann_sinc_low_pass(x: Tensor, N: int, fs: int, fc: float) -> Tensor:
"""Hann windowed ideal low pass filter.
Args:
x: [n_batch, 1, n_sample]
N: the window will be [-N, N], totally 2N+1 samples.
Returns:
y: [n_batch, 1, n_sample]
"""
w = continuous_hann_sinc_filter(fs, fc, 2*N+1, x.dtype, x.device)
w = (w / w.sum()).view(1, 1, -1)
return torch.nn.functional.conv1d(x, w, padding=N)
| 17,516
|
def trusted_zone(engine_db, table: str):
"""
# TRUSTED ZONE
Transform the JSON data into a tabular format for the `TRUSTED` schema/dataset.
Parameters
----------
engine_db : connection
The connection to send data into the data warehouse
table : str
The table name will be used to save into the storage and data warehouse.
"""
schema = 'trusted'
drop_old_table = f"DROP TABLE IF EXISTS {schema}.{table};"
new_table = f"""
CREATE TABLE {schema}.{table} AS
SELECT *
FROM raw.{table}
WHERE refdate = (SELECT MAX(refdate) FROM raw.{table})
AND reftime = (SELECT MAX(reftime) FROM raw.{table})
"""
engine_db.execute(drop_old_table)
engine_db.execute(new_table)
print(f"===> Success to save {schema}.{table}.")
| 17,517
|
def test_vi_green(spectral_index_test_data):
"""Test for PlantCV."""
index_array = spectral_index.vi_green(spectral_index_test_data.load_hsi(), distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
| 17,518
|
def _patch_doctest_namespace(doctest_namespace):
"""Patch the namespace for doctests.
This function adds some packages to namespace of every doctest.
"""
doctest_namespace["np"] = np
doctest_namespace["pd"] = pd
| 17,519
|
def viz_use(df, ticks, title='default', outpath='./ctt_shift/overlap.pdf'):
"""
Heatmap visualization
:param df: an instance of pandas DataFrame
:return:
"""
corr = df.corr()
# Generate a mask for the upper triangle
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask, 1)] = True
for idx in range(len(mask)):
for jdx in range(len(mask[0])):
if idx == jdx:
mask[idx][jdx] = False
# print(mask)
center = np.median([item for item in df.to_numpy().ravel() if item != 1])
a4_dims = (16.7, 12.27)
fig, ax = plt.subplots(figsize=a4_dims)
sns.set(font_scale=1.2)
viz_plot = sns.heatmap(
df, mask=mask, annot=True, cbar=False,
ax=ax, annot_kws={"size": 36}, cmap='RdBu_r',
vmin=df.values.min(), fmt='.3f', center=center
) # center=0,
plt.xlabel('Temporal Domain', fontsize=25)
plt.ylabel('Temporal Domain', fontsize=25)
plt.xticks([item+0.5 for item in range(len(ticks))], ticks, rotation=0, fontsize=25)
plt.yticks([item+0.5 for item in range(len(ticks))], ticks, rotation=0, fontsize=25)
plt.title(title, fontsize=36)
ax.set_facecolor("white")
viz_plot.get_figure().savefig(outpath, format='pdf')
plt.close()
| 17,520
|
def ask_for_missing_options(arguments: CommandLineArguments, root: tk.Tk) -> ProgramOptions:
"""
Complete the missing information by askin the user interactively.
"""
values = copy.deepcopy(arguments)
if values.source_directory is None:
values.source_directory = insist_for_directory(
"Ordner mit allen Bildern auswaehlen.",
"Quellverzeichnis muss ausgewaehlt sein.",
root,
)
if len(values.target_directories) == 0:
values.target_directories.append(
insist_for_directory(
"Ordner in den die Bilder einsortiert werden sollen auswaehlen.",
"Mindestens ein Zielverzeichnis muss ausgewaehlt sein.",
root,
)
)
is_more_to_add = tkmb.askyesno(message="Ein weiteres Zielverzeichnis angeben?")
while is_more_to_add:
possible_directory = ask_for_directory(
"Ordner in den die Bilder einsortiert werden sollen auswaehlen.",
root,
)
if possible_directory is None:
tkmb.showwarning(message="Kein Verzeichnis gewaehlt!")
else:
values.target_directories.append(possible_directory)
is_more_to_add = tkmb.askyesno(message="Noch ein weiteres Zielverzeichnis angeben?")
program_options = ProgramOptions(
values.source_directory,
values.target_directories,
)
return program_options
| 17,521
|
def matrix2quaternion(m):
"""Returns quaternion of given rotation matrix.
Parameters
----------
m : list or numpy.ndarray
3x3 rotation matrix
Returns
-------
quaternion : numpy.ndarray
quaternion [w, x, y, z] order
Examples
--------
>>> import numpy
>>> from skrobot.coordinates.math import matrix2quaternion
>>> matrix2quaternion(np.eye(3))
array([1., 0., 0., 0.])
"""
m = np.array(m, dtype=np.float64)
tr = m[0, 0] + m[1, 1] + m[2, 2]
if tr > 0:
S = math.sqrt(tr + 1.0) * 2
qw = 0.25 * S
qx = (m[2, 1] - m[1, 2]) / S
qy = (m[0, 2] - m[2, 0]) / S
qz = (m[1, 0] - m[0, 1]) / S
elif (m[0, 0] > m[1, 1]) and (m[0, 0] > m[2, 2]):
S = math.sqrt(1. + m[0, 0] - m[1, 1] - m[2, 2]) * 2
qw = (m[2, 1] - m[1, 2]) / S
qx = 0.25 * S
qy = (m[0, 1] + m[1, 0]) / S
qz = (m[0, 2] + m[2, 0]) / S
elif m[1, 1] > m[2, 2]:
S = math.sqrt(1. + m[1, 1] - m[0, 0] - m[2, 2]) * 2
qw = (m[0, 2] - m[2, 0]) / S
qx = (m[0, 1] + m[1, 0]) / S
qy = 0.25 * S
qz = (m[1, 2] + m[2, 1]) / S
else:
S = math.sqrt(1. + m[2, 2] - m[0, 0] - m[1, 1]) * 2
qw = (m[1, 0] - m[0, 1]) / S
qx = (m[0, 2] + m[2, 0]) / S
qy = (m[1, 2] + m[2, 1]) / S
qz = 0.25 * S
return np.array([qw, qx, qy, qz])
| 17,522
|
def paging_forward(data_func, *args):
"""
Создает кнопку вперед для переключения страницы списка
:param data_func: func from UI.buttons. Действие, которое будет возвращать кнопка
:return: InlineKeyboardButton
"""
g_data = loads(data_func(*args).callback_data)
g_data['page'] += 1
text = '>>'
return InlineKeyboardButton(text, callback_data=dumps(g_data))
| 17,523
|
def test_hydrogen_like_sgd_vmc(caplog):
"""Test the wavefn exp(-a * r) converges (in 3-D) to a = nuclear charge with SGD."""
(
params,
nuclear_charge,
nchains,
nburn,
nepochs,
nsteps_per_param_update,
std_move,
learning_rate,
log_psi_model,
key,
data,
local_energy_fn,
) = _setup_hla_hyperparams_and_model()
_, params, _, _ = sgd_vmc_loop_with_logging(
caplog,
data,
params,
key,
nchains,
nburn,
nepochs,
nsteps_per_param_update,
std_move,
learning_rate,
log_psi_model,
local_energy_fn,
)
# Make sure the decay rate converged to the nuclear charge, since we're in 3-d
np.testing.assert_allclose(jax.tree_leaves(params)[0], nuclear_charge, rtol=1e-5)
| 17,524
|
def synchronized(func):
"""Synchronizes method invocation on an object using the method name as the mutex"""
def wrapper(self,*__args,**__kw):
try:
rlock = self.__get__('_sync_lock_%s' % func.__name__)
#rlock = self._sync_lock
except AttributeError:
from threading import RLock
rlock = self.__dict__.setdefault('_sync_lock_%s' % func.__name__, RLock())
rlock.acquire()
try:
return func(self,*__args,**__kw)
finally:
rlock.release()
wrapper.__name__ = func.__name__
wrapper.__dict__ = func.__dict__
wrapper.__doc__ = func.__doc__
return wrapper
| 17,525
|
def vector(location_1, location_2):
"""
Returns the unit vector from location_1 to location_2
location_1, location_2: carla.Location objects
"""
x = location_2.x - location_1.x
y = location_2.y - location_1.y
z = location_2.z - location_1.z
norm = np.linalg.norm([x, y, z]) + np.finfo(float).eps
return [x / norm, y / norm, z / norm]
| 17,526
|
def test_pandas_code_snippets(
app, client, tmpdir, monkeypatch,
template_name, script_name, expected_columns
):
"""Bit of a complicated test, but TLDR: test that the API example Python
scripts work.
This test is a bit complicated and is pretty low impact, so if you're
struggling to maintain this, I recommend adding a `@pytest.mark.skip` on
top of the test function.
"""
# We need to mock a few things to test that the Pandas code works:
monkeypatch.setitem(
app.jinja_env.globals,
'url_for',
lambda loc, **kwargs: loc
)
class MockResponse:
def __init__(self, data):
self.json = lambda: json.loads(data)
def _get(loc: str, **kwargs):
reversed_url_map = {
i.endpoint: i.rule
for i
in app.url_map.iter_rules()
}
res = client.get(reversed_url_map[loc])
return MockResponse(data=res.data)
monkeypatch.setattr(requests, 'get', _get)
# Now let's render the code:
py_code = app.jinja_env \
.get_template(template_name) \
.render()
f = tmpdir.mkdir('code').join(f'{script_name}.py')
f.write(py_code)
# Import the script as a module
sys.path.append(f.dirname)
__import__(script_name)
mod = sys.modules[script_name]
assert hasattr(mod, 'df')
assert isinstance(mod.df, pd.DataFrame) # noqa
assert all([c in mod.df.columns for c in expected_columns])
| 17,527
|
async def test_step_user(opp):
"""Test that the user step works."""
conf = {
CONF_USERNAME: "user@host.com",
CONF_PASSWORD: "123abc",
}
with patch(
"openpeerpower.components.tile.async_setup_entry", return_value=True
), patch("openpeerpower.components.tile.config_flow.async_login"):
result = await opp.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
result = await opp.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=conf
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "user@host.com"
assert result["data"] == {
CONF_USERNAME: "user@host.com",
CONF_PASSWORD: "123abc",
}
| 17,528
|
def test_parse_date():
"""
Test that the parse_date helper correctly parses a date
"""
assert parse_date("2020-10-01") == datetime.date(2020, 10, 1)
assert parse_date(None) is None
assert parse_date("Foo") == "Foo"
assert parse_date("hello_a_long_string") == "hello_a_long_string"
| 17,529
|
def getOpenOCO(recvWindow=""):
"""# Query Open OCO (USER_DATA)
#### `GET /api/v3/openOrderList (HMAC SHA256)`
### Weight: 3
### Parameters:
Name |Type |Mandatory |Description
--------|--------|--------|--------
recvWindow |LONG |NO |The value cannot be greater than <code>60000</code>
timestamp |LONG |YES |
<strong>Data Source:</strong> """
endpoint = '/api/v3/openOrderList'
params = {
}
if recvWindow: params["recvWindow"] = recvWindow
return getbinancedata_sig(endpoint, params)
| 17,530
|
def pixel_unshuffle(scale):
""" Pixel unshuffle.
Args:
x (Tensor): Input feature with shape (b, c, hh, hw).
scale (int): Downsample ratio.
Returns:
Tensor: the pixel unshuffled feature.
"""
if scale == 1:
return lambda x: x
def f(x):
b, c, hh, hw = x.size()
out_channel = c * (scale**2)
assert hh % scale == 0 and hw % scale == 0
h = hh // scale
w = hw // scale
x_view = x.view(b, c, h, scale, w, scale)
return x_view.permute(0, 1, 3, 5, 2, 4).reshape(b, out_channel, h, w)
return f
| 17,531
|
def flag(name, thing=None):
"""Generate an Attribute with that name which is valued True or False."""
if thing is None:
thing = Keyword(name)
return attr(name, thing, "Flag")
| 17,532
|
def select_id_from_scores_dic(id1, id2, sc_dic,
get_worse=False,
rev_filter=False):
"""
Based on ID to score mapping, return better (or worse) scoring ID.
>>> id1 = "id1"
>>> id2 = "id2"
>>> id3 = "id3"
>>> sc_dic = {'id1' : 5, 'id2': 3, 'id3': 3}
>>> select_id_from_scores_dic(id1, id2, sc_dic)
'id1'
>>> select_id_from_scores_dic(id1, id2, sc_dic, get_worse=True)
'id2'
>>> select_id_from_scores_dic(id1, id2, sc_dic, rev_filter=True, get_worse=True)
'id1'
>>> select_id_from_scores_dic(id1, id2, sc_dic, rev_filter=True)
'id2'
>>> select_id_from_scores_dic(id2, id3, sc_dic)
False
"""
sc_id1 = sc_dic[id1]
sc_id2 = sc_dic[id2]
if sc_id1 > sc_id2:
if rev_filter:
if get_worse:
return id1
else:
return id2
else:
if get_worse:
return id2
else:
return id1
elif sc_id1 < sc_id2:
if rev_filter:
if get_worse:
return id2
else:
return id1
else:
if get_worse:
return id1
else:
return id2
else:
return False
| 17,533
|
def add_args(parser):
"""Add arguments to the argparse.ArgumentParser
Args:
parser: argparse.ArgumentParser
Returns:
parser: a parser added with args
"""
# Training settings
parser.add_argument(
"--task",
type=str,
default="train",
metavar="T",
help="the type of task: train or denoise",
)
parser.add_argument(
"--datadir",
type=str,
metavar="DD",
help="data directory for training",
)
parser.add_argument(
"--noisy_wav",
type=str,
metavar="NW",
help="path to noisy wav",
)
parser.add_argument(
"--denoised_wav",
type=str,
default="denoised_sample.wav",
metavar="DW",
help="path to denoised wav",
)
parser.add_argument(
"--pretrained",
type=str,
default=None,
metavar="PT",
help="path to pre-trainedmodel",
)
parser.add_argument(
"--saved_model_path",
type=str,
default="model.pth",
metavar="SMP",
help="path to trained model",
)
parser.add_argument(
"--partition_ratio",
type=float,
default=1 / 3,
metavar="PR",
help="partition ratio for trainig (default: 1/3)",
)
parser.add_argument(
"--batch_size",
type=int,
default=5,
metavar="BS",
help="input batch size for training (default: 5)",
)
parser.add_argument(
"--lr",
type=float,
default=0.001,
metavar="LR",
help="learning rate (default: 0.3)",
)
parser.add_argument(
"--momentum",
type=float,
default=0.9,
metavar="M",
help="momentum (default: 0.9)",
)
parser.add_argument(
"--noise_amp",
type=float,
default=0.01,
metavar="NA",
help="amplitude of added noise for trainign (default: 0.01)",
)
parser.add_argument(
"--split_sec",
type=float,
default=1.0,
metavar="SS",
help="interval for splitting [sec]",
)
parser.add_argument(
"--epochs",
type=int,
default=5,
metavar="EP",
help="how many epochs will be trained",
)
parser.add_argument(
"--sampling_rate",
type=int,
default=16000,
metavar="SR",
help="sampling rate",
)
parser.add_argument(
"--log_interval",
type=int,
default=2,
metavar="LI",
help="log interval",
)
parser.add_argument(
"--path_to_loss",
type=str,
default=None,
metavar="PL",
help="path to png filw which shows the transtion of loss",
)
return parser
| 17,534
|
def test_build_all_isolated():
"""Test building all packages in an isolated workspace"""
pass
| 17,535
|
def run_interactive(package: str, action: str, *args: Any, **_kwargs: Any) -> Any:
"""Call the given action's run"""
action_cls = get(package, action)
app, interaction = args
return action_cls(app.args).run(app=app, interaction=interaction)
| 17,536
|
def test_input_files(files, test_type, tmpdir):
""" Test for access to Sample input files. """
file_text = " ".join(files)
sample_data = {SAMPLE_NAME_COLNAME: "test-sample", DATA_SOURCE_COLNAME: file_text}
s = Sample(sample_data)
assert file_text == s.data_source
assert files == s.input_file_paths
if test_type == "to_disk":
path_sample_file = tmpdir.join("test-sample.yaml").strpath
s.to_yaml(path_sample_file)
print("Sample items: {}".format(s.items()))
with open(path_sample_file) as sf:
reloaded_sample_data = yaml.load(sf, SafeLoader)
print("reloaded keys: {}".format(list(reloaded_sample_data.keys())))
try:
s_reloaded = Sample(reloaded_sample_data)
except Exception:
with open(path_sample_file) as sf:
print("LINES (below):\n{}".format("".join(sf.readlines())))
raise
assert files == s_reloaded.input_file_paths
| 17,537
|
def test_droprows_2(generator):
""" Test DropRows
test for factor = 2, 3, 4, 8 that the output is equivalent to applying a
rolling window and taking the mean over the samples.
size of chunk is 5 rows.
"""
for factor in [2, 3, 4, 8]:
generator.reset()
node = DropRows(factor=factor, method="mean")
looper = Looper(node=node, generator=generator)
out_data, _ = looper.run(chunk_size=10)
expected = (
generator._data.rolling(window=factor, min_periods=factor, center=False)
.mean()
.iloc[np.arange(factor - 1, len(generator._data), factor)]
)
pd.testing.assert_frame_equal(
out_data.iloc[: len(generator._data) // factor], expected
)
| 17,538
|
def pythagorean_heuristic(start_point: Tuple[int, int], end_point: Tuple[int, int]) -> float:
"""Return the distance between start_point and end_point using the pythagorean distance
"""
x1, y1 = start_point
x2, y2 = end_point
distance = (((x2 - x1) ** 2) + ((y2 - y1) ** 2)) ** 0.5
return distance
| 17,539
|
def run_simulation(x, simulation_time, dt, rates, sampling_time):
"""
Runs a simulation and stores the sampled sequences the matrix sequences (nb_nucleotide * nb_sequences).
x is modified during the simulation. The original sequence is included in the sequences matrix, in the first row.
"""
ii = 0
time = np.arange(0, simulation_time + 1, dt)
nb_samples = simulation_time // sampling_time
sequences = np.zeros(shape=(len(x), nb_samples + 1), dtype=bool)
for t in time:
if (t % sampling_time == 0):
sequences[:, ii] = x
ii += 1
x = simulation_step(x, dt, rates)
return sequences
| 17,540
|
def test_what_decoder_dtype():
"""Verify what decoder output dtype."""
z_whats = torch.rand(3, 4, 5)
decoder = WhatDecoder(z_what_size=5)
outputs = decoder(z_whats)
assert outputs.dtype == torch.float
assert (outputs >= 0).all()
assert (outputs <= 1).all()
| 17,541
|
def login():
"""Log user in"""
# Forget any user_id
session.clear()
# User reached route via POST (as by submitting a form via POST)
if request.method == "POST":
# Ensure username was submitted
if not request.form.get("username"):
return apology("must provide username")
# Ensure password was submitted
elif not request.form.get("password"):
return apology("must provide password")
# Query database for username
rows = db.execute("SELECT * FROM users WHERE username = :username",
username=request.form.get("username"))
# Ensure username exists and password is correct
if len(rows) != 1 or not check_password_hash(rows[0]["hash"], request.form.get("password")):
return apology("invalid username and/or password")
# Remember which user has logged in
session["user_id"] = rows[0]["id"]
# Redirect user to home page
return redirect("/")
# User reached route via GET (as by clicking a link or via redirect)
else:
return render_template("login.html")
| 17,542
|
def process_item(item_soup):
"""Parse information about a single podcast episode.
@param item_soup: Soup containing information about a single podcast
episode.
@type item_soup: bs4.BeautifulSoup
@return: Dictionary describing the episode. Contains keys name (str value),
date (datetime.date), loc (url - str value), duration (seconds - int),
and orig_tags (tags applied to episode - list of str)
@rtype: dict
"""
title = item_soup.find('title').contents[0].strip()
loc = item_soup.find('guid').contents[0]
pub_date_raw = item_soup.find('pubdate').contents[0]
pub_date = common.interpret_2822_date(pub_date_raw)
tags = map(
lambda x: x.contents[0],
item_soup.findAll('category')
)
duration_soup = item_soup.find('itunes:duration')
if duration_soup == None:
duration = 1800 if 'shorts' in tags else 3600
else:
duration_str = duration_soup.contents[0]
duration = common.interpret_duration(duration_str)
return {
'name': title,
'date': pub_date,
'tags': sorted(set(tags)),
'loc': loc,
'duration': duration
}
| 17,543
|
def working_directory(path):
"""
Temporarily change working directory.
A context manager which changes the working directory to the given
path, and then changes it back to its previous value on exit.
Usage:
```python
# Do something in original directory
with working_directory('/my/new/path'):
# Do something in new directory
# Back to old directory
```
"""
prev_cwd = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(prev_cwd)
| 17,544
|
def update_webhook(request, log, tenantId, groupId, policyId, webhookId, data):
"""
Update a particular webhook.
A webhook may (but do not need to) include some arbitrary medata, and must
include a name.
If successful, no response body will be returned.
Example request::
{
"name": "alice",
"metadata": {
"notes": "this is for Alice"
}
}
"""
rec = get_store().get_scaling_group(log, tenantId, groupId)
deferred = rec.update_webhook(policyId, webhookId, data)
return deferred
| 17,545
|
def configure_master_account_parameters(event):
"""
Update the Master account parameter store in us-east-1 with the deployment_account_id
then updates the main deployment region with that same value
"""
parameter_store_master_account_region = ParameterStore(os.environ["AWS_REGION"], boto3)
parameter_store_master_account_region.put_parameter('deployment_account_id', event['account_id'])
parameter_store_deployment_account_region = ParameterStore(event['deployment_account_region'], boto3)
parameter_store_deployment_account_region.put_parameter('deployment_account_id', event['account_id'])
| 17,546
|
def create_solutions_visualization(results_filename, html_filename):
""" Create a multi-tab visualization of remixt solutions
"""
try:
os.remove(html_filename)
except OSError:
pass
bokeh.plotting.output_file(html_filename)
logging.info('generating solutions tables')
with pd.HDFStore(results_filename, 'r') as store:
solutions = list(retrieve_solutions(store))
chromosome_plot_info = retrieve_chromosome_plot_info(store, solutions[0])
cnv_selected_data, brk_selected_data = create_cnv_brk_data(store, solutions[0], chromosome_plot_info)
cnv_solution_sources = {}
brk_solution_sources = {}
for solution in solutions:
cnv_source, brk_source = create_cnv_brk_data(store, solution, chromosome_plot_info)
cnv_solution_sources[solution] = cnv_source
brk_solution_sources[solution] = brk_source
solutions_data = retrieve_solution_data(store)
read_depth_data = prepare_read_depth_data(store, solution)
assert solutions_data.notnull().all().all()
assert read_depth_data.notnull().all().all()
logging.info('creating bokeh data sources')
cnv_selected_source = bokeh.models.ColumnDataSource(cnv_selected_data)
brk_selected_source = bokeh.models.ColumnDataSource(brk_selected_data)
solutions_source = bokeh.models.ColumnDataSource(solutions_data)
read_depth_source = bokeh.models.ColumnDataSource(read_depth_data)
# TODO: selecting the data source doesnt work
solution_select = create_source_select(
[
(cnv_selected_source, cnv_solution_sources),
(brk_selected_source, brk_solution_sources),
],
"Solution:",
'solutions',
)
logging.info('building bokeh interface')
# Create main interface
tabs = bokeh.models.Tabs()
tabs.tabs.append(build_solutions_panel(solutions_source, read_depth_source))
tabs.tabs.append(build_genome_panel(cnv_selected_source, brk_selected_source, chromosome_plot_info))
input_box = bokeh.models.Column(solution_select)
main_box = bokeh.layouts.row(input_box, tabs)
bokeh.plotting.save(main_box)
| 17,547
|
def covariance_from_internal(internal_values, constr):
"""Undo a cholesky reparametrization."""
chol = chol_params_to_lower_triangular_matrix(internal_values)
cov = chol @ chol.T
return cov[np.tril_indices(len(chol))]
| 17,548
|
def find_result_node(desc, xml_tree):
"""
Returns the <result> node with a <desc> child matching the given text.
Eg: if desc = "text to match", this function will find the following
result node:
<result>
<desc>text to match</desc>
</result>
Parameters
-----
xmlTree : the xml tree to search for the <result> node
desc : the text contained in the desc node
Returns
-----
node : the <result> node containing the child with the given desc
"""
result_nodes = xml_tree.findall("result")
for result_node in result_nodes:
result_desc = result_node.find("desc").text.strip()
if result_desc == desc:
return result_node
return None
| 17,549
|
def y(instance):
"""Syntactic sugar to find all y-coordinates of a given class instance.
Convenience function to return all associated x-coordinates
of a given class instance.
Parameters
----------
instance : DataContainer, Mesh, R3Vector, np.array, list(RVector3)
Return the associated coordinate positions for the given class instance.
"""
return __getCoords('y', 1, instance)
| 17,550
|
def interp_xzplane(y, u, y_target=0.0):
"""Perform linear interpolation of the 3D data at given y-location.
Parameters
----------
y : numpy.ndarray of floats
The y-coordinates along a vertical gridline as a 1D array.
u : numpy.ndarray of floats
The 3D data.
y_target : float (optional)
The y-coordinate at which to interpolate the data.
Returns
-------
u_target : numpy.ndarray of floats
The 2D interpolated data.
"""
idx = numpy.where(y >= y_target)[0][0]
y0, y1 = y[idx - 1], y[idx]
u0, u1 = u[:, idx - 1, :], u[:, idx, :]
u_target = u0 + (y_target - y0) * (u1 - u0) / (y1 - y0)
return u_target
| 17,551
|
def train_transfer(x_train, y_train, vocab_processor, pretrain_emb, x_dev, y_dev,
source_ckpt, target_ckpt, pretrained_values=None):
"""
Train a transfer model on target task: must pass "pretrained_values"
Build model architecture using target task data,
then load pre-trained model's weight value to it (instead of rand init)
"""
# Output directory for models and summaries and csvs
if FLAGS.emb_mode != 'rand':
assert int(len(vocab_processor.vocabulary_)) == int(pretrain_emb.shape[0]), "vocab length not equal to pretrain embedding row!"
assert int(FLAGS.embedding_dim) == int(pretrain_emb.shape[1]), "pretrain embedding col ot equal to embedding_dim!"
if FLAGS.train_sick:
datasetname = "SICK" + str(FLAGS.sent_col)+ "_" + str(FLAGS.sent_label) + "_"
else:
datasetname = "SE_"
today = str(datetime.date.today())
timestamp = datasetname + FLAGS.model_type + "_"
if FLAGS.model_type == 'rnn':
timestamp += FLAGS.cell_type + "_"
timestamp += 'emb-'+FLAGS.emb_mode + "_"
timestamp += 'finetune_' if FLAGS.finetune else 'freeze_'
timestamp += 'batchsize' + str(FLAGS.batch_size) + "_"
timestamp += "evalevery" + str(FLAGS.evaluate_every)
out_dir = os.path.abspath(os.path.join(os.path.curdir, "runs", today, timestamp))
print("========Writing runs to {}\n".format(out_dir))
checkpoint_dir = target_ckpt
checkpoint_prefix = os.path.join(checkpoint_dir, "modelbest")
if not os.path.exists(checkpoint_dir):
raise ValueError("new directory has not been created yet to save the transfer model!")
# write to csv
csv_outdir = os.path.abspath(os.path.join(os.path.curdir,"runs", "results_csv", FLAGS.model_type))
csv_filename = datasetname + str(FLAGS.model_type)
if FLAGS.model_type == 'rnn':
csv_filename += '_'+str(FLAGS.cell_type)
csv_filename += '_'+str(FLAGS.emb_mode) + "_tune" + str(FLAGS.finetune)
csv_filename += '_batchsize' + str(FLAGS.batch_size)
csv_filename += "_evalevery" + str(FLAGS.evaluate_every)
csv_filename_train = os.path.abspath(os.path.join(csv_outdir, csv_filename+'_train_transfer.csv'))
csv_filename_test = os.path.abspath(os.path.join(csv_outdir, csv_filename+'_test_transfer.csv'))
print("========Writing train csv to {}\n".format(csv_filename_train))
print("========Writing test csv to {}\n".format(csv_filename_test))
tf.reset_default_graph()
with tf.Graph().as_default():
session_conf = tf.ConfigProto(
allow_soft_placement=FLAGS.allow_soft_placement,
log_device_placement=FLAGS.log_device_placement)
sess = tf.Session(config=session_conf)
with sess.as_default():
if FLAGS.model_type == 'cnn':
print("=====Training in CNN=====")
model = TextCNN(
sequence_length=x_train.shape[1],
num_classes=y_train.shape[1],
vocab_size=len(vocab_processor.vocabulary_),
embedding_size=FLAGS.embedding_dim,
pretrain_emb=pretrain_emb,
emb_mode=FLAGS.emb_mode,
finetune=FLAGS.finetune,
filter_sizes=list(map(int, FLAGS.filter_sizes.split(","))),
num_filters=FLAGS.num_filters,
multi_label=FLAGS.multi_label,
l2_reg_lamb=FLAGS.l2_reg_lambda)
elif FLAGS.model_type == 'rnn':
print("=====Training in RNN=====")
model = TextRNN(
sequence_length=x_train.shape[1],
num_classes=y_train.shape[1],
vocab_size=len(vocab_processor.vocabulary_),
embedding_size=FLAGS.embedding_dim,
pretrain_emb=pretrain_emb,
emb_mode=FLAGS.emb_mode,
finetune=FLAGS.finetune,
cell_type=FLAGS.cell_type,
hidden_size=FLAGS.hidden_size,
multi_label=FLAGS.multi_label,
l2_reg_lamb=FLAGS.l2_reg_lambda)
elif FLAGS.model_type == 'fasttext':
print("=====Training in fastText (avg-pooling)=====")
model = fastText(
sequence_length=x_train.shape[1],
num_classes=y_train.shape[1],
vocab_size=len(vocab_processor.vocabulary_),
embedding_size=FLAGS.embedding_dim,
pretrain_emb=pretrain_emb,
emb_mode=FLAGS.emb_mode,
finetune=FLAGS.finetune,
multi_label=FLAGS.multi_label)
else:
raise ValueError("mode %s not supported. Valid mode: %s, %s" % (
FLAGS.model_type, 'fasttext', 'cnn','rnn'))
# Summaries for loss and accuracy
loss_summary = tf.summary.scalar(str(FLAGS.emb_mode)+"_loss_"+str('finetune' if FLAGS.finetune else 'freeze'), model.loss)
acc_summary = tf.summary.scalar(str(FLAGS.emb_mode)+"_acc_"+str('finetune' if FLAGS.finetune else 'freeze'), model.accuracy)
# Train Summaries
# train_summary_op = tf.summary.merge([loss_summary, acc_summary, grad_summaries_merged])
train_summary_op = tf.summary.merge([loss_summary, acc_summary])
train_summary_dir = os.path.join(out_dir, "summaries", "train")
train_summary_writer = tf.summary.FileWriter(train_summary_dir, sess.graph)
# Dev summaries
dev_summary_op = tf.summary.merge([loss_summary, acc_summary])
dev_summary_dir = os.path.join(out_dir, "summaries", "dev")
dev_summary_writer = tf.summary.FileWriter(dev_summary_dir, sess.graph)
global_step = tf.Variable(0, name="global_step", trainable=False)
optimizer = tf.train.AdamOptimizer(1e-3)
grads_and_vars = optimizer.compute_gradients(model.loss)
train_op = optimizer.apply_gradients(grads_and_vars,
global_step=global_step)
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver(tf.global_variables(),
max_to_keep=FLAGS.num_checkpoints)
graph = tf.get_default_graph()
load_ops = []
if pretrained_values != None:
print("loading pretrained weight values")
for key in pretrained_values:
print(key)
load_ops.append(tf.assign(graph.get_tensor_by_name(key),
pretrained_values[key]))
def train_step(x_batch, y_batch):
"""
A single training step
"""
feed_dict = {
model.input_x: x_batch,
model.input_y: y_batch,
model.dropout_keep_prob: FLAGS.dropout_keep_prob
}
# for metric: Update the running variables on new batch of samples
_, step, summaries, loss, accuracy, pred = sess.run(
[train_op, global_step, train_summary_op, model.loss, model.accuracy,
model.predictions], feed_dict)
# Calculate the score on this batch
precision_avg, recall_avg = 0., 0.
if not FLAGS.multi_label:
y_true = np.argmax(y_batch, 1)
precision_avg = precision_score(y_true, pred, average='macro')
recall_avg = recall_score(y_true, pred, average='macro')
else:
top_k = len(pred[0])
y_true = np.stack([arr.argsort()[-top_k:][::-1] for arr in y_batch])
for k in range(top_k):
precision_avg += precision_score(y_true[:, k], pred[:, k], average='macro')
recall_avg += recall_score(y_true[:, k], pred[:, k], average='macro')
precision_avg /= top_k
recall_avg /= top_k
time_str = datetime.datetime.now().isoformat()
print("{}: step {}, loss {:g}, acc {:g}, "
"precision {:g}, recall {:g}".format(time_str, step, loss,
accuracy, precision_avg, recall_avg))
train_summary_writer.add_summary(summaries, global_step=step)
mode = 'a' if os.path.exists(csv_filename_train) else 'w'
if mode == 'w':
with open(csv_filename_train, mode) as csvfile:
csvwriter = csv.writer(csvfile, delimiter=',',
quoting=csv.QUOTE_MINIMAL)
csvwriter.writerow(['step', 'accuracy', 'precision_avg','recall_avg'])
csvwriter.writerow([step, accuracy, precision_avg, recall_avg])
else:
with open(csv_filename_train, mode) as csvfile:
csvwriter = csv.writer(csvfile, delimiter=',',
quoting=csv.QUOTE_MINIMAL)
csvwriter.writerow([step, accuracy, precision_avg, recall_avg])
def dev_step(x_batch, y_batch, writer=None):
"""
Evaluates model on the entire dev set
"""
feed_dict = {
model.input_x: x_batch,
model.input_y: y_batch,
model.dropout_keep_prob: 1.0
}
step, summaries, loss, accuracy, pred = sess.run(
[global_step, dev_summary_op, model.loss, model.accuracy,
model.predictions], feed_dict)
# Calculate the score and confusion matrix on this batch
precision_avg, recall_avg = 0., 0.
if not FLAGS.multi_label:
y_true = np.argmax(y_batch, 1)
precision_avg = precision_score(y_true, pred, average='macro')
recall_avg = recall_score(y_true, pred, average='macro')
else:
top_k = len(pred[0])
y_true = np.stack([arr.argsort()[-top_k:][::-1] for arr in y_batch])
for k in range(top_k):
precision_avg = precision_score(y_true[:, k], pred[:, k], average='macro')
recall_avg += recall_score(y_true[:, k], pred[:, k], average='macro')
precision_avg /= top_k
recall_avg /= top_k
time_str = datetime.datetime.now().isoformat()
print("{}: step {}, loss {:g}, acc {:g},"
"precision {:g}, recall {:g}".format(time_str, step, loss, accuracy,
precision_avg, recall_avg))
if writer:
writer.add_summary(summaries, global_step=step)
mode = 'a' if os.path.exists(csv_filename_test) else 'w'
if mode == 'w':
with open(csv_filename_test, mode) as csvfile:
csvwriter = csv.writer(csvfile, delimiter=',',
quoting=csv.QUOTE_MINIMAL)
csvwriter.writerow(['step', 'accuracy', 'precision_avg','recall_avg'])
csvwriter.writerow([step, accuracy, precision_avg, recall_avg])
else:
with open(csv_filename_test, mode) as csvfile:
csvwriter = csv.writer(csvfile, delimiter=',',
quoting=csv.QUOTE_MINIMAL)
csvwriter.writerow([step, accuracy, precision_avg, recall_avg])
return accuracy
# Generate batches
batches = data_helpers.batch_iter(
list(zip(x_train, y_train)), FLAGS.batch_size, FLAGS.num_epochs)
if pretrained_values != None:
sess.run([load_ops])
# 0-step eval
print("\nEvaluation at step 0:")
dev_step(x_dev, y_dev, writer=dev_summary_writer)
print("")
moving_avg_test_acc = 0
num_eval = 0
# Training loop. For each batch...
for batch in batches:
x_batch, y_batch = zip(*batch)
train_step(x_batch, y_batch)
current_step = tf.train.global_step(sess, global_step)
if current_step % FLAGS.evaluate_every == 0:
print("\nEvaluation:")
cur_test_acc = dev_step(x_dev, y_dev,
writer=dev_summary_writer)
moving_avg_test_acc += cur_test_acc
num_eval += 1
print("")
if num_eval != 0 and moving_avg_test_acc / num_eval < cur_test_acc:
print("cur test acc:", cur_test_acc)
print("avg test acc: ", moving_avg_test_acc / num_eval)
path = saver.save(sess, checkpoint_prefix+'best', global_step=current_step)
print("Saved best model checkpoint to {}\n".format(path))
path = saver.save(sess, checkpoint_prefix+'final', global_step=current_step)
print("Saved final model checkpoint to {}\n".format(path))
return csv_filename_train, csv_filename_test, checkpoint_dir
| 17,552
|
def liste_vers_paires(l):
"""
Passer d'une structure en list(list(str)) ) list([str, str])
:param l:
:return:
"""
res = []
for i in l:
taille_i = len(i)
for j in range(taille_i-1):
for k in range(j+1, taille_i):
res.append([i[j], i[k]])
return res
| 17,553
|
def assert_typing(
input_text_word_predictions: List[Dict[str, Any]]
) -> List[Dict[str, str]]:
"""
this is only to ensure correct typing, it does not actually change anything
Args:
input_text_word_predictions: e.g. [
{"char_start": 0, "char_end": 7, "token": "example", "tag": "O"},
..
]
Returns:
input_text_word_predictions_str: e.g. [
{"char_start": "0", "char_end": "7", "token": "example", "tag": "O"},
..
]
"""
return [
{k: str(v) for k, v in input_text_word_prediction.items()}
for input_text_word_prediction in input_text_word_predictions
]
| 17,554
|
def download(file):
"""Download files from live server, delete recerds of those that 404.
"""
url = 'https://www.' + settings.DOMAIN.partition('.')[2] + file.url()
try:
print(url)
return urllib.request.urlopen(url, timeout=15).read()
except urllib.error.HTTPError as e:
print(e.code, url)
file.delete()
time.sleep(.5)
except urllib.error.URLError as e:
print(e.args, url)
return ''
| 17,555
|
def plot_costs(costs):
"""
利用costs展示模型的训练曲线
Args:
costs: 记录了训练过程的cost变化的list,每一百次迭代记录一次
Return:
"""
costs = np.squeeze(costs)
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('iterations (per hundreds)')
plt.title("Learning rate = 0.00002")
# plt.show()
plt.savefig('costs.png')
| 17,556
|
def get_current_ledger_state(case_ids, ensure_form_id=False):
"""
Given a list of cases returns a dict of all current ledger data of the following format:
{
"case_id": {
"section_id": {
"product_id": StockState,
"product_id": StockState,
...
},
...
},
...
}
:param ensure_form_id: Set to True to make sure return StockState
have the ``last_modified_form_id`` field populated
"""
from corehq.apps.commtrack.models import StockState
if not case_ids:
return {}
states = StockState.objects.filter(
case_id__in=case_ids
)
ret = {case_id: {} for case_id in case_ids}
for state in states:
sections = ret[state.case_id].setdefault(state.section_id, {})
sections[state.product_id] = state
if ensure_form_id and not state.last_modified_form_id:
transaction = StockTransaction.latest(state.case_id, state.section_id, state.product_id)
if transaction is not None:
state.last_modified_form_id = transaction.report.form_id
state.save()
return ret
| 17,557
|
def _FlattenPadding(padding):
"""Returns padding reduced to have only the time dimension."""
if padding is None:
return padding
r = tf.rank(padding)
return tf.reduce_min(padding, axis=tf.range(1, r))
| 17,558
|
def revive_custom_object(identifier, metadata):
"""Revives object from SavedModel."""
if ops.executing_eagerly_outside_functions():
model_class = training_lib.Model
else:
model_class = training_lib_v1.Model
revived_classes = {
'_tf_keras_layer': (RevivedLayer, base_layer.Layer),
'_tf_keras_input_layer': (RevivedInputLayer, input_layer.InputLayer),
'_tf_keras_network': (RevivedNetwork, network_lib.Network),
'_tf_keras_model': (RevivedNetwork, model_class),
'_tf_keras_sequential': (RevivedNetwork, models_lib.Sequential)
}
parent_classes = revived_classes.get(identifier, None)
if parent_classes is not None:
parent_classes = revived_classes[identifier]
revived_cls = type(
compat.as_str(metadata['class_name']), parent_classes, {})
return revived_cls._init_from_metadata(metadata) # pylint: disable=protected-access
| 17,559
|
async def _close_tasks(pending_tasks, timeout=5):
"""Close still pending tasks."""
if pending_tasks:
# Give tasks time to cancel.
with suppress(asyncio.CancelledError):
await asyncio.wait_for(asyncio.gather(*pending_tasks), timeout=timeout)
await asyncio.gather(*pending_tasks)
| 17,560
|
def all_users():
"""Returns all users in database sorted by name
Returns:
QuerySet[User]: List containing each User instance
"""
# Return all unique users in Database.
# sorted by full name
# returns query set. same as python list. Each index in user_list is a user model.
user_list = User.objects.order_by("full_name")
return user_list
| 17,561
|
def plugin_func_list(tree):
"""Return a list of expected reports."""
return [EXPECTED_REPORT + (type(plugin_func_list),)]
| 17,562
|
def entity_ids(value):
"""Validate Entity IDs."""
if value is None:
raise vol.Invalid('Entity IDs can not be None')
if isinstance(value, str):
value = [ent_id.strip() for ent_id in value.split(',')]
return [entity_id(ent_id) for ent_id in value]
| 17,563
|
def random_unitary(dim, seed=None):
"""
Return a random dim x dim unitary Operator from the Haar measure.
Args:
dim (int): the dim of the state space.
seed (int): Optional. To set a random seed.
Returns:
Operator: (dim, dim) unitary operator.
Raises:
QiskitError: if dim is not a positive power of 2.
"""
if seed is not None:
np.random.seed(seed)
if dim == 0 or not math.log2(dim).is_integer():
raise QiskitError("Desired unitary dimension not a positive power of 2.")
return Operator(unitary_group.rvs(dim))
| 17,564
|
def allc(IL, IR):
"""
Compute the all-chain set (ALLC).
Parameters
----------
IL : ndarray
Left matrix profile indices
IR : ndarray
Right matrix profile indices
Returns
-------
S : list(ndarray)
All-chain set
C : ndarray
Anchored time series chain for the longest chain (also known as the
unanchored chain)
Notes
-----
`DOI: 10.1109/ICDM.2017.79 <https://www.cs.ucr.edu/~eamonn/chains_ICDM.pdf>`__
See Table II
Unlike the original paper, we've replaced the while-loop with a more stable
for-loop.
This is the implementation for the all-chain set (ALLC) and the unanchored
chain is simply the longest one among the all-chain set. Both the
all-chain set and unanchored chain are returned.
The all-chain set, S, is returned as a list of unique numpy arrays.
"""
L = np.ones(IL.size, dtype=np.int64)
S = set() # type: ignore
for i in range(IL.size):
if L[i] == 1:
j = i
C = deque([j])
for k in range(IL.size):
if IR[j] == -1 or IL[IR[j]] != j:
break
else:
j = IR[j]
L[j] = -1
L[i] = L[i] + 1
C.append(j)
S.update([tuple(C)])
C = atsc(IL, IR, L.argmax())
S = [np.array(s, dtype=np.int64) for s in S] # type: ignore
return S, C
| 17,565
|
def build_resilient_url(host, port):
"""
Build basic url to resilient instance
:param host: host name
:type host: str
:param port: port
:type port: str|int
:return: base url
:rtype: str
"""
if host.lower().startswith("http"):
return "{0}:{1}".format(host, port)
return "https://{0}:{1}".format(host, port)
| 17,566
|
def relative_angle(pos1, pos2):
""" Angle between agents. An element (k,i,j) from the output is the angle at kth sample between ith (reference head) and jth (target base).
arg:
pos1: positions of the thoraces for all flies. [time, flies, y/x]
pos2: positions of the heads for all flies. [time, flies, y/x]
returns:
rel_angles: orientation of flies with respect to chamber. [time, flies, flies]
"""
d0 = pos2 - pos1
d1 = pos1[:, np.newaxis, :, :] - pos2[:, :, np.newaxis, :] # all pairwise "distances"
dot = d0[:, :, np.newaxis, 1]*d1[:, :, :, 1] + d0[:, :, np.newaxis, 0]*d1[:, :, :, 0]
det = d0[:, :, np.newaxis, 1]*d1[:, :, :, 0] - d0[:, :, np.newaxis, 0]*d1[:, :, :, 1]
rel_angles = np.arctan2(det, dot)
return rel_angles * 180.0 / np.pi
| 17,567
|
def run_value_iteration(env):
"""Run a random policy for the given environment.
Logs the total reward and the number of steps until the terminal
state was reached.
Parameters
----------
env: gym.envs.Environment
Instance of an OpenAI gym.
Returns
-------
(float, int)
First number is the total undiscounted reward received. The
second number is the total number of actions taken before the
episode finished.
"""
initial_state = env.reset()
env.render()
# time.sleep(1) # just pauses so you can see the output
total_reward = 0
num_steps = 0
gamma = 0.9
tol = 1e-3
max_iterations = 1000
state = initial_state
optimal_value_function, iterations = value_iteration(env, gamma, max_iterations, tol)
policy = value_function_to_policy(env, gamma, optimal_value_function)
while True:
action_cur = policy[state]
print(" ")
print("step %d" % num_steps)
print("action is %s" % action_names[action_cur])
nextstate, reward, is_terminal, debug_info = env.step(action_cur)
print(debug_info)
state = nextstate
env.render()
print("move to state %d" % nextstate)
total_reward += reward
num_steps += 1
if is_terminal:
break
# time.sleep(1)
return total_reward, num_steps
| 17,568
|
def get_shape(dset):
"""
Extract the shape of a (possibly constant) dataset
Parameters:
-----------
dset: an h5py.Dataset or h5py.Group (when constant)
The object whose shape is extracted
Returns:
--------
A tuple corresponding to the shape
"""
# Case of a constant dataset
if isinstance(dset, h5py.Group):
shape = dset.attrs['shape']
# Case of a non-constant dataset
elif isinstance(dset, h5py.Dataset):
shape = dset.shape
return(shape)
| 17,569
|
def predict(reaction_mech, T_list, pressure_0, CCl4_X_0, mass_flow_rate,
n_steps, n_pfr, length, area, save_fig=False, name='predict',fold_no=None,iter_CCl4=False):
"""
Load the saved parameters of StandardScaler() and rebuild the ML model to
do predictions.
=============== =============================================================
Attribute Description
=============== =============================================================
`reaction_mech` Doctinary of Cantera reaction mechanism(s) (.cti file)
`T_list` Temperature profile (°C)
`pressure_0` Initial pressue (atm)
`CCl4_X_0` Initial CCl4 concentration (mass fraction)
`mass_flow_rate`Mass flow rate of input gas (T/H)
`n_steps` Number of iterations/number of CSTRs
`n_pfr` Number of PFRs
`length` Length of each PFR (m)
`area` Cross-sectional area (m**2)
`save_fig` Save figure to `plots` folder
`name` The file name of the saving figure
=============== =============================================================
"""
# Load scaler parameter
with open(os.path.join(RESULTPATH, 'clf.pickle'), 'rb') as f:
scaler = pickle.load(f)
# Load model
model = build_model()
model.load_weights(os.path.join(RESULTPATH,'model.h5'))
if CCl4_X_0 > 1 : #ppm
CCl4_X_0 = float(CCl4_X_0) / 1000000
if type(reaction_mech) != dict:
raise TypeError('The datatype of `reaction_mech` is {}.It should be a dict.'.format(type(reaction_mech)))
results = {}
for label in reaction_mech.keys():
compositions, t, cracking_rates = EDC_cracking(
reaction_mech[label],
T_list,
pressure_0,
CCl4_X_0,
mass_flow_rate,
n_steps,
n_pfr,
length,
area,
label=label
)
results[label] = {
'compositions': compositions,
't': t,
'cracking_rates': cracking_rates,
}
# Use ML model to predict
KM_label = 'Schirmeister'
y_predicted = [0]
for i, T in enumerate(T_list[1:]):
Ti = T_list[i]
Te = T
compositions = results[KM_label]['compositions'][i]
t = sum(results[KM_label]['t'][:i+1])
t_r = results[KM_label]['t'][i]
x_predict = [Ti, Te, compositions, pressure_0, CCl4_X_0, t, t_r]
x_predict = np.hstack(x_predict).reshape(1, -1)
x_predict = x_predict[:,:-4]
rescaled_x_predict = scaler.transform(x_predict)
x_predict = [rescaled_x_predict[:,2:], rescaled_x_predict[:,:2]]
y = float(model.predict(x_predict))
y_predicted.append(y)
results['ML'] = {'cracking_rates': y_predicted}
from sklearn.metrics import mean_absolute_error
loss = mean_absolute_error(results['Choi']['cracking_rates'],
results['ML']['cracking_rates'])
print(f"loss in {CCl4_X_0}: {loss} ")
'''
Ti = T_list[:-1]
Te = T_list[1:]
df = read_csv("Data/RawDataInput.csv")
y_ground_df = df.query(
"CCl4_X_0 == @CCl4_X_0 &"\
"pressure_0 == @pressure_0 &"\
"mass_flow_rate == @mass_flow_rate &"\
"Ti == @Ti &"\
"Te == @Te"
)['X']
if not y_ground_df.empty:
y_ground = [0]
if len(y_ground_df) >= 18:
for index in y_ground_df.index:
try:
if y_ground_df.loc[[index+17]].index == index + 17:
for i in range(index,index+18):
y_ground.append(y_ground_df.loc[i]/100)
break
except KeyError:
print("index + 17 dont exist in y_ground_df, continue")
continue
print(len(y_ground))
results['FPC'] = {'cracking_rates': y_ground}
loss = mean_absolute_error(results['FPC']['cracking_rates'],
results['ML']['cracking_rates'])
'''
if CCl4_X_0 < 1 : #ppm
CCl4_X_0 = float(CCl4_X_0) * 1000000
# Plot figure
if save_fig:
ndata = len(T_list)
fig, ax1 = plt.subplots()
ln = ax1.plot(range(ndata), T_list, color='r', marker='o', label='Temperature ($^\circ$C)')
ax1.set_ylabel('Temperature ($^\circ$C)')
ax1.set_ylim(0, 600)
ax2 = ax1.twinx()
lns = ln
for label in results.keys():
cracking_rates = [i * 100 for i in results[label]['cracking_rates']]
lns += ax2.plot(range(ndata), cracking_rates, marker='o', label=label)
ax2.set_ylabel('Cracking rates (%)')
ax2.set_ylim(-5, 100)
labs = [l.get_label() for l in lns]
ax1.legend(lns, labs, loc='lower right', frameon=True)
plt.title('Temperature and cracking rates curves')
ax1.set_xlabel('PFR index')
plt.xticks(range(ndata))
if DOKFOLD:
plt.savefig(os.path.join(PLOTPATH, f'{fold_no}_{name}.png'))
elif iter_CCl4 is True:
if not os.path.exists(os.path.join(PLOTPATH,"predict")):
os.mkdir(os.path.join(PLOTPATH,"predict"))
plt.savefig(os.path.join(PLOTPATH, f'predict/CCl4_{CCl4_X_0:.6f}_mass_{mass_flow_rate}_temp_{T_list[0]}_{name}.png'))
else:
plt.savefig(os.path.join(PLOTPATH, '{}.png'.format(name)))
return loss
| 17,570
|
def server_params_test(client_context, server_context, indata=b"FOO\n",
chatty=True, connectionchatty=False, sni_name=None,
session=None):
"""
Launch a server, connect a client to it and try various reads
and writes.
"""
stats = {}
server = ThreadedEchoServer(context=server_context,
chatty=chatty,
connectionchatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=sni_name, session=session) as s:
s.connect((HOST, server.port))
for arg in [indata, bytearray(indata), memoryview(indata)]:
if connectionchatty:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(arg)
outdata = s.read()
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
raise AssertionError(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
stats.update({
'compression': s.compression(),
'cipher': s.cipher(),
'peercert': s.getpeercert(),
'client_alpn_protocol': s.selected_alpn_protocol(),
'version': s.version(),
'session_reused': s.session_reused,
'session': s.session,
})
s.close()
stats['server_alpn_protocols'] = server.selected_alpn_protocols
stats['server_shared_ciphers'] = server.shared_ciphers
return stats
| 17,571
|
def _sanitize_bool(val: typing.Any, /) -> bool:
"""Sanitize argument values to boolean."""
if isinstance(val, str):
return val.lower() == 'true'
return bool(val)
| 17,572
|
def write_all(quality_annotations, filt_primer, filtered_annotations, dedup_ap, all_reads, motif_dir, motif_modules, index_rep, index_rep2, j, quiet=False):
"""
Write all output files: quality annotations, one-primer annotations, filtered annotations, statistics, repetitions + images.
:param quality_annotations: list(Annotation) - list of blue annotations
:param filt_primer: list(Annotation) - list of grey annotations
:param filtered_annotations: list(Annotation) - list of filtered out annotations
:param dedup_ap: list(AnnotationPair) - deduplicated annotation pairs
:param all_reads: int - number of all reads
:param motif_dir: str - path to motif directory
:param motif_modules: dict - motif modules dictionary from config
:param index_rep: int - index of first repetition in modules
:param index_rep2: int - index of second repetition in modules
:param j: int - index of postfilter
:param quiet: boolean - less files on the output?
:return: None
"""
# create dir if not exists:
if (not quiet or len(quality_annotations) > 0 or len(filt_primer) > 0) and not os.path.exists(motif_dir):
os.makedirs(motif_dir)
# write output files
if not quiet:
write_annotations('%s/annotations_%d.txt' % (motif_dir, j + 1), quality_annotations)
write_annotations('%s/filtered_%d.txt' % (motif_dir, j + 1), filtered_annotations)
write_annotations('%s/filtered_primer_%d.txt' % (motif_dir, j + 1), filt_primer)
write_summary_statistics('%s/stats_%d.txt' % (motif_dir, j + 1), quality_annotations, all_reads)
if index_rep2 is not None:
# print("aps", len(dedup_ap[i]))
write_histogram_image2d('%s/repetitions_%d' % (motif_dir, j + 1), quality_annotations + filt_primer, index_rep - 1, index_rep2 - 1,
motif_modules[index_rep - 1]['seq'], motif_modules[index_rep2 - 1]['seq'])
write_alignment('%s/alignment_%d.fasta' % (motif_dir, j + 1), quality_annotations, index_rep - 1, index_rep2 - 1)
else:
write_histogram_image('%s/repetitions_%d' % (motif_dir, j + 1), quality_annotations, filt_primer, index_rep - 1)
write_alignment('%s/alignment_%d.fasta' % (motif_dir, j + 1), quality_annotations, index_rep - 1)
if not quiet or len(quality_annotations) > 0:
write_histogram('%s/repetitions_%d.txt' % (motif_dir, j + 1), quality_annotations, profile_file='%s/profile_%d.txt' % (motif_dir, j + 1), index_rep=index_rep - 1, quiet=quiet)
if not quiet or len(filt_primer) > 0:
write_histogram('%s/repetitions_grey_%d.txt' % (motif_dir, j + 1), filt_primer, quiet=quiet)
| 17,573
|
def isSameLinkedList(linked_list1, linked_list2):
"""
Check whether two linked lists are the same.
Args:
linked_list1: -
linked_list2: -
"""
while linked_list1:
if linked_list1.val != linked_list2.val:
return False
linked_list1, linked_list2 = linked_list1.next, linked_list2.next
return True
| 17,574
|
def ChenFoxLyndonBreakpoints(s):
"""Find starting positions of Chen-Fox-Lyndon decomposition of s.
The decomposition is a set of Lyndon words that start at 0 and
continue until the next position. 0 itself is not output, but
the final breakpoint at the end of s is. The argument s must be
of a type that can be indexed (e.g. a list, tuple, or string).
The algorithm follows Duval, J. Algorithms 1983, but uses 0-based
indexing rather than Duval's choice of 1-based indexing.
Algorithms on strings and sequences based on Lyndon words.
David Eppstein, October 2011.
"""
k = 0
while k < len(s):
i, j = k, k+1
while j < len(s) and s[i] <= s[j]:
i = (s[i] == s[j]) and i+1 or k # Python cond?yes:no syntax
j += 1
while k < i+1:
k += j-i
yield k
| 17,575
|
async def async_get_config_entry_diagnostics(
hass: HomeAssistant, config_entry: ConfigEntry
) -> dict[str, Any]:
"""Return diagnostics for a config entry."""
coordinator: SolcastUpdateCoordinator = hass.data[DOMAIN][config_entry.entry_id]
return {
"info": async_redact_data(config_entry.options, TO_REDACT),
"data": async_redact_data(coordinator.data, TO_REDACT),
}
| 17,576
|
def test_discard_report(class_testsuite, platforms_list, all_testcases_dict, caplog, tmpdir):
""" Testing discard_report function of Testsuite class in sanitycheck
Test 1: Check if apply_filters function has been run before running
discard_report
Test 2: Test if the generated report is not empty
Test 3: Test if the gerenrated report contains the expected columns"""
class_testsuite.platforms = platforms_list
class_testsuite.testcases = all_testcases_dict
filename = tmpdir.mkdir("test_discard").join("discard_report.csv")
with pytest.raises(SystemExit):
class_testsuite.discard_report(filename)
assert "apply_filters() hasn't been run!" in caplog.text
kwargs = {"exclude_tag" : ['test_a'], "exclude_platform" : ['demo_board_1'],
"platform" : ['demo_board_2']}
class_testsuite.apply_filters(**kwargs)
class_testsuite.discard_report(filename)
assert os.stat(filename).st_size != 0
with open(filename, "r") as file:
csv_reader = csv.reader(file)
assert set(['test', 'arch', 'platform', 'reason']) == set(list(csv_reader)[0])
| 17,577
|
def calculate_value_function(transition_costs):
"""Recursively apply the bellman equation from the end to the start. """
state_dim = [tc.shape[0] for tc in transition_costs]
state_dim.append(transition_costs[-1].shape[1])
V = [np.zeros(d) for d in state_dim]
V_ind = [np.zeros(d) for d in state_dim]
for i in range(len(state_dim) - 2, -1, -1):
rhs = transition_costs[i] + V[i + 1]
V[i] = np.min(rhs, axis=1)
V_ind[i] = np.argmin(rhs, axis=1)
return V_ind, V
| 17,578
|
def main() -> None:
"""Run the command."""
sources = sys.argv[1:]
report_dir = "/reports/coverage/api"
dest_dir = "/tmp/coverage/api" # nosec
shutil.rmtree(dest_dir, ignore_errors=True)
shutil.copytree(report_dir, dest_dir)
cov = coverage.Coverage(
data_file=os.path.join(dest_dir, "coverage"), data_suffix=True, source=sources or None, branch=True
)
cov.combine([dest_dir], strict=True)
cov.html_report(directory=dest_dir, ignore_errors=True)
cov.xml_report(outfile=os.path.join(dest_dir, "coverage.xml"), ignore_errors=True)
cov.report(ignore_errors=True)
| 17,579
|
def load_config(fname: str) -> JSON_TYPE:
"""Load a YAML file."""
return load_yaml(fname)
| 17,580
|
def get_date_folders():
"""
Return a list of the directories used for backing up the database.
"""
directories_in_curdir = list(filter(os.path.isdir, os.listdir(os.getcwd())))
date_folders = [
d for d in directories_in_curdir if re.match(r"([0-9]+(-[0-9]+)+)", d)
]
return date_folders
| 17,581
|
def show_header():
"""Shows the project header."""
console = Console()
text = Text("AWS Account Lifecycle Manager")
text.stylize("bold magenta")
console.print(text2art('AWS ALF CLI'))
console.print(text)
| 17,582
|
def read_tree_color_map(filename):
"""Reads a tree colormap from a file"""
infile = util.open_stream(filename)
maps = []
for line in infile:
expr, red, green, blue = line.rstrip().split("\t")
maps.append([expr, map(float, (red, green, blue))])
name2color = make_expr_mapping(maps)
def leafmap(node):
return name2color(node.name)
return tree_color_map(leafmap)
| 17,583
|
def sin_cos_encoding(arr):
""" Encode an array of angle value to correspongding Sines and Cosines, avoiding value jump in 2PI measure like from PI to -PI. """
return np.concatenate((np.sin(arr), np.cos(arr)))
| 17,584
|
def run(f, ip="", counter=1024):
"""
Run Whitefuck script.
Parameters
----------
f : str
Script to run.
ip : str, optional
Input, by default ""
counter : int, optional
Number of counters to make, by default 1024
"""
s = ""
# Format to brainfuck
for l in f.split("\n"):
g = d.get(l)
if g:
s += g
# Check syntax
bc = 0
for c in s:
if c == "[":
bc += 1
elif c == "]":
bc -= 1
assert bc >= 0
assert bc == 0
# Run script
i = 0
ii = 0
cn = [0] * counter
po = 0
br = []
while i < len(s):
c = s[i]
assert po in range(counter)
if c == ">":
po += 1
elif c == "<":
po -= 1
elif c == "+":
cn[po] += 1
elif c == "-":
cn[po] = max(cn[po] - 1, 0)
elif c == ".":
print(chr(cn[po]), end="")
elif c == ",":
cn[po] = ip[ii]
ii += 1
elif c == "[":
if cn[po] == 0:
while s[i] == "]":
i += 1
i += 1
else:
br.append(i)
elif c == "]":
if cn[po] != 0:
i = br[-1]
else:
br.pop(-1)
i += 1
# print(i, cn[po], po, c, len(br))
| 17,585
|
def func_use_queue(q):
"""
将数据存入队列,便于进程通信
:param q: Queue
:return:
"""
q.put([1, "a", None])
| 17,586
|
def atom_free_electrons(mgrph, idx):
""" number of unbound valence electrons for an atom in a molecular graph
"""
atms = atoms(mgrph)
vlnc = valence(atms[idx])
bcnt = atom_bond_count(mgrph, idx)
return vlnc - bcnt
| 17,587
|
def identify_guest():
"""Returns with an App Engine user or an anonymous user.
"""
app_engine_user = users.get_current_user()
if app_engine_user:
return Guest.app_engine_user(app_engine_user)
ip_address = ip_address_from_request(request)
if ip_address:
return Guest.ip_address(ip_address)
else:
return Guest()
| 17,588
|
def compute_placevalues(tokens):
"""Compute the placevalues for each token in the list tokens"""
pvs = []
for tok in tokens:
if tok == "point":
pvs.append(0)
else:
pvs.append(placevalue(get_value(tok)[0]))
return pvs
| 17,589
|
def save_fgong(filename, glob, var, fmt='%16.9E', ivers=0,
comment=['\n','\n','\n','\n']):
"""Given data for an FGONG file in the format returned by
:py:meth:`~tomso.fgong.load_fgong` (i.e. two NumPy arrays and a
possible header), writes the data to a file.
Parameters
----------
filename: str
Filename to which FGONG data is written.
glob: NumPy array
The global variables for the stellar model.
var: NumPy array
The point-wise variables for the stellar model. i.e. things
that vary through the star like temperature, density, etc.
ivers: int, optional
The integer indicating the version number of the file.
(default=0)
comment: list of strs, optional
The first four lines of the FGONG file, which usually contain
notes about the stellar model.
"""
nn, ivar = var.shape
iconst = len(glob)
with open(filename, 'wt') as f:
f.writelines(comment)
line = '%10i'*4 % (nn, iconst, ivar, ivers)
f.writelines([line + '\n'])
for i in range(0, iconst, 5):
N = np.mod(i+4, 5)+1 # number of floats in this row
line = fmt*N % tuple(glob[i:i+5])
if len(line) != N*int(fmt[1:3]):
line = ''
for j in range(5):
part = fmt % glob[i:i+5][j]
if len(part) != int(fmt[1:3]):
part = part.replace('E', '')
line += part
f.writelines([line + '\n'])
for row in var:
for i in range(0, ivar, 5):
N = np.mod(i+4, 5)+1 # number of floats in this row
line = fmt*N % tuple(row[i:i+5])
if len(line) != N * int(fmt[1:3]):
line = ''
for j in range(5):
part = fmt % row[i:i+5][j]
if len(part) != int(fmt[1:3]):
part = part.replace('E', '')
line += part
f.writelines([line + '\n'])
| 17,590
|
def get_model_damping(global_step, damping_init, decay_rate, total_epochs, steps_per_epoch):
"""get_model_damping"""
damping_each_step = []
total_steps = steps_per_epoch * total_epochs
for step in range(total_steps):
epoch = (step + 1) / steps_per_epoch
damping_here = damping_init * (decay_rate ** (epoch / 10))
damping_each_step.append(damping_here)
current_step = global_step
damping_each_step = np.array(damping_each_step).astype(np.float32)
damping_now = damping_each_step[current_step:]
return damping_now
| 17,591
|
def setup_minicluster(enable_k8s=False):
"""
setup minicluster
"""
log.info("setup cluster")
if os.getenv("CLUSTER", ""):
log.info("cluster mode")
else:
log.info("local minicluster mode")
setup(enable_peloton=True, enable_k8s=enable_k8s)
time.sleep(5)
| 17,592
|
def transformData(Z,Time,Spec):
# transformData Transforms each data series based on Spec.Transformation
#
# Input Arguments:
#
# Z : T x N numeric array, raw (untransformed) observed data
# Spec : structure , model specification
#
# Output Arguments:
#
# X : T x N numeric array, transformed data (stationary to enter DFM)
"""
Transformation notes:
'lin' = Levels (No Transformation)
'chg' = Change (Difference)
'ch1' = Year over Year Change (Difference)
'pch' = Percent Change
'pc1' = Year over Year Percent Change
'pca' = Percent Change (Annual Rate)
'log' = Natural Log
"""
T,N = Z.shape
X = np.empty((T, N))
X[:] = np.nan
Freq_dict = {"m":1,"q":3}
formula_dict = {"lin":lambda x:x*2,
"chg":lambda x:np.append(np.nan,x[t1+step::step] - x[t1:-1-t1:step]),
"ch1":lambda x:x[12+t1::step] - x[t1:-12:step],
"pch":lambda x:(np.append(np.nan,x[t1+step::step]/x[t1:-1-t1:step]) - 1)*100,
"pc1":lambda x:((x[12+t1::step]/x[t1:-12:step])-1)*100,
"pca":lambda x:(np.append(np.nan,x[t1+step::step]/x[t1:-step:step])**(1/n) - 1)*100,
"log":lambda x:np.log(x)
}
for i in range(N):
formula = Spec.Transformation[i]
freq = Spec.Frequency[i]
step = Freq_dict[freq] # time step for different frequencies based on monthly time
t1 = step -1 # assume monthly observations start at beginning of quarter (subtracted 1 for indexing)
n = step/12 # number of years, needed to compute annual % changes
series = Spec.SeriesName[i]
if formula == 'lin':
X[:,i] = Z[:,i].copy()
elif formula == 'chg':
X[t1::step,i] = formula_dict['chg'](Z[:,i].copy())
elif formula == 'ch1':
X[12+t1::step, i] = formula_dict['ch1'](Z[:, i].copy())
elif formula == 'pch':
X[t1::step, i] = formula_dict['pch'](Z[:, i].copy())
elif formula == 'pc1':
X[12+t1::step, i] = formula_dict['pc1'](Z[:, i].copy())
elif formula == 'pca':
X[t1::step, i] = formula_dict['pca'](Z[:, i].copy())
elif formula == 'log':
X[:, i] = formula_dict['log'](Z[:, i].copy())
else:
ValueError("{}: Transformation is unknown".format(formula))
# Drop first quarter of observations
# since transformations cause missing values
return X[3:,:],Time[3:],Z[3:,:]
| 17,593
|
def torch_fn():
"""Create a ReLU layer in torch."""
return ReLU()
| 17,594
|
def all_user_tickets(uid, conference):
"""
Versione cache-friendly della user_tickets, restituisce un elenco di
(ticket_id, fare_type, fare_code, complete)
per ogni biglietto associato all'utente
"""
qs = _user_ticket(User.objects.get(id=uid), conference)
output = []
for t in qs:
output.append((
t.id, t.fare.ticket_type, t.fare.code,
_ticket_complete(t)
))
return output
| 17,595
|
def start(graph=get_graph_for_controllers(ALL_SERVICE_CONTROLLERS),
strategy: Optional[str] = None, **kwargs):
"""Starts the ETL pipelines"""
bonobo.run(graph, strategy=strategy, **kwargs)
| 17,596
|
def get_hardconcrete_linear_modules(module: nn.Module) -> List[nn.Module]:
"""Get all HardConcrete*Linear modules.
Parameters
----------
module : nn.Module
The input module
Returns
-------
List[nn.Module]
A list of the HardConcrete*Linear module.
"""
modules = []
for m in module.children():
if isinstance(m, HardConcreteProjectedLinear):
modules.append(m)
elif isinstance(m, HardConcreteLinear):
modules.append(m)
else:
modules.extend(get_hardconcrete_linear_modules(m))
return modules
| 17,597
|
def approx_partial(model, ori_target, param, current_val, params, loss_list, information_loss_list, xs_list, ys_list, train=False, optimizer=None):
"""Compute the approximate partial derivative using the finite-difference method.
:param param:
:param current_val:
:param params:
:return:
"""
#step_size = STEP_SIZES[param]
step_size = 10
losses = []
for sign in [-1, 1]:
set_param(param, current_val + sign * step_size / 2, params)
loss = get_attack_loss(model, ori_target, information_loss_list, xs_list, ys_list,
loss_f=torch.nn.MSELoss(reduction='none'),
xs=params['x'], ys=params['y'],
shape=(320, 320), n_pixel_range=(10, 11), train=train, optimizer=optimizer)
# image = RENDERER.render()
# with torch.no_grad():
# out = MODEL(image)
# loss = CRITERION(out, LABELS).item()
losses.append(loss)
grad = (losses[1] - losses[0]) / step_size
loss_list += losses
return grad
| 17,598
|
def check_successful_connections(_ctx: Context) -> bool:
"""Checks if there are no successful connections more than SUCCESSFUL_CONNECTIONS_CHECK_PERIOD sec.
Returns True if there was successful connection for last NO_SUCCESSFUL_CONNECTIONS_DIE_PERIOD_SEC sec.
:parameter _ctx: Context
"""
now_ns = time.time_ns()
lower_bound = max(_ctx.get_start_time_ns(),
_ctx.Statistic.connect.last_check_time)
diff_sec = ns2s(now_ns - lower_bound)
if _ctx.Statistic.connect.success == _ctx.Statistic.connect.success_prev:
if diff_sec > SUCCESSFUL_CONNECTIONS_CHECK_PERIOD_SEC:
_ctx.add_error(Errors('Check connection', no_successful_connections_error_msg(_ctx)))
return diff_sec <= NO_SUCCESSFUL_CONNECTIONS_DIE_PERIOD_SEC
else:
_ctx.Statistic.connect.last_check_time = now_ns
_ctx.Statistic.connect.sync_success()
_ctx.remove_error(Errors('Check connection', no_successful_connections_error_msg(_ctx)).uuid)
return True
| 17,599
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.