hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
682223aac708033196ca76e4af2af0958377ef44
| 478
|
py
|
Python
|
assign5-2.py
|
arhue/python-learning
|
058c93315fd5aa76584e32432e7c80cb3972478e
|
[
"MIT"
] | null | null | null |
assign5-2.py
|
arhue/python-learning
|
058c93315fd5aa76584e32432e7c80cb3972478e
|
[
"MIT"
] | null | null | null |
assign5-2.py
|
arhue/python-learning
|
058c93315fd5aa76584e32432e7c80cb3972478e
|
[
"MIT"
] | null | null | null |
smallest=None
largest=None
while True:
number=input("Enter number")
if number=="done":
break
try:
number=int(number)
except:
print("Invalid input")
continue
if (smallest is None) or (largest is None):
smallest=number
largest=number
elif number > largest:
largest=number
elif number < smallest:
smallest=number
print("Maximum is", largest)
print("Minimum is", smallest)
| 18.384615
| 47
| 0.59205
|
a3287cf8bbd3a68ea99d14968e74d7a18a30b7bd
| 565
|
py
|
Python
|
resources/evaluation/SoK/dump_ida_layout.py
|
russells-crockpot/lancelot
|
50fcc92d19757c50fe78355bd7d65c1a409c47a6
|
[
"Apache-2.0"
] | 46
|
2019-06-27T11:22:22.000Z
|
2021-12-15T20:01:47.000Z
|
resources/evaluation/SoK/dump_ida_layout.py
|
russells-crockpot/lancelot
|
50fcc92d19757c50fe78355bd7d65c1a409c47a6
|
[
"Apache-2.0"
] | 80
|
2019-06-11T20:19:09.000Z
|
2022-03-01T19:57:35.000Z
|
resources/evaluation/SoK/dump_ida_layout.py
|
russells-crockpot/lancelot
|
50fcc92d19757c50fe78355bd7d65c1a409c47a6
|
[
"Apache-2.0"
] | 8
|
2019-06-29T17:12:09.000Z
|
2021-07-09T23:06:47.000Z
|
import idaapi
import idautils
lines = []
for ea in idautils.Functions(0x0, 0xFFFFFFFFFFFFFFFF):
lines.append("function: %s" % hex(ea))
f = idaapi.get_func(ea)
for bb in idaapi.FlowChart(f, flags=idaapi.FC_PREDS):
lines.append("basic block: %s" % hex(bb.start_ea))
for head in idautils.Heads(bb.start_ea, bb.end_ea):
insn = idautils.DecodeInstruction(head)
if not insn:
continue
lines.append("instruction: %s" % hex(head))
print("\n".join(lines))
import ida_pro
ida_pro.qexit(0)
| 23.541667
| 59
| 0.630088
|
75a6e1c86fc2818f914f617b2653877796b24632
| 2,960
|
py
|
Python
|
api_core/tests/unit/gapic/test_config.py
|
bomboradata/bombora-google-cloud-python
|
255bbebe6c50490f40fcc3eed40bae1e77e03859
|
[
"Apache-2.0"
] | null | null | null |
api_core/tests/unit/gapic/test_config.py
|
bomboradata/bombora-google-cloud-python
|
255bbebe6c50490f40fcc3eed40bae1e77e03859
|
[
"Apache-2.0"
] | null | null | null |
api_core/tests/unit/gapic/test_config.py
|
bomboradata/bombora-google-cloud-python
|
255bbebe6c50490f40fcc3eed40bae1e77e03859
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from google.api_core import exceptions
from google.api_core.gapic_v1 import config
INTERFACE_CONFIG = {
'retry_codes': {
'idempotent': ['DEADLINE_EXCEEDED', 'UNAVAILABLE'],
'other': ['FAILED_PRECONDITION'],
'non_idempotent': []
},
'retry_params': {
'default': {
'initial_retry_delay_millis': 1000,
'retry_delay_multiplier': 2.5,
'max_retry_delay_millis': 120000,
'initial_rpc_timeout_millis': 120000,
'rpc_timeout_multiplier': 1.0,
'max_rpc_timeout_millis': 120000,
'total_timeout_millis': 600000
},
'other': {
'initial_retry_delay_millis': 1000,
'retry_delay_multiplier': 1,
'max_retry_delay_millis': 1000,
'initial_rpc_timeout_millis': 1000,
'rpc_timeout_multiplier': 1,
'max_rpc_timeout_millis': 1000,
'total_timeout_millis': 1000
},
},
'methods': {
'AnnotateVideo': {
'timeout_millis': 60000,
'retry_codes_name': 'idempotent',
'retry_params_name': 'default'
},
'Other': {
'timeout_millis': 60000,
'retry_codes_name': 'other',
'retry_params_name': 'other'
},
'Plain': {
'timeout_millis': 30000
}
}
}
def test_create_method_configs():
method_configs = config.parse_method_configs(INTERFACE_CONFIG)
retry, timeout = method_configs['AnnotateVideo']
assert retry._predicate(exceptions.DeadlineExceeded(None))
assert retry._predicate(exceptions.ServiceUnavailable(None))
assert retry._initial == 1.0
assert retry._multiplier == 2.5
assert retry._maximum == 120.0
assert retry._deadline == 600.0
assert timeout._initial == 120.0
assert timeout._multiplier == 1.0
assert timeout._maximum == 120.0
retry, timeout = method_configs['Other']
assert retry._predicate(exceptions.FailedPrecondition(None))
assert retry._initial == 1.0
assert retry._multiplier == 1.0
assert retry._maximum == 1.0
assert retry._deadline == 1.0
assert timeout._initial == 1.0
assert timeout._multiplier == 1.0
assert timeout._maximum == 1.0
retry, timeout = method_configs['Plain']
assert retry is None
assert timeout._timeout == 30.0
| 32.888889
| 74
| 0.642905
|
c6b0279da339fcc60ab4e52a5f3706d89dbb9a9d
| 1,283
|
py
|
Python
|
sdks/python/client/setup.py
|
parallel-domain/argo-workflows
|
c055b48b6e216dcdeb1c9840f14199a72329bdaf
|
[
"Apache-2.0"
] | 1
|
2022-02-24T01:45:03.000Z
|
2022-02-24T01:45:03.000Z
|
sdks/python/client/setup.py
|
parallel-domain/argo-workflows
|
c055b48b6e216dcdeb1c9840f14199a72329bdaf
|
[
"Apache-2.0"
] | 18
|
2022-02-01T23:09:58.000Z
|
2022-03-31T23:28:41.000Z
|
sdks/python/client/setup.py
|
parallel-domain/argo-workflows
|
c055b48b6e216dcdeb1c9840f14199a72329bdaf
|
[
"Apache-2.0"
] | null | null | null |
"""
Argo Workflows API
Argo Workflows is an open source container-native workflow engine for orchestrating parallel jobs on Kubernetes. For more information, please see https://argoproj.github.io/argo-workflows/ # noqa: E501
The version of the OpenAPI document: VERSION
Generated by: https://openapi-generator.tech
"""
from setuptools import setup, find_packages # noqa: H301
NAME = "argo-workflows"
VERSION = "0.0.0-pre"
# To install the library, run the following
#
# python setup.py install
#
# prerequisite: setuptools
# http://pypi.python.org/pypi/setuptools
REQUIRES = [
"urllib3 >= 1.25.3",
"python-dateutil",
]
setup(
name=NAME,
version=VERSION,
description="Argo Workflows API",
author="OpenAPI Generator community",
author_email="team@openapitools.org",
url="",
keywords=["OpenAPI", "OpenAPI-Generator", "Argo Workflows API"],
python_requires=">=3.6",
install_requires=REQUIRES,
packages=find_packages(exclude=["test", "tests"]),
include_package_data=True,
long_description="""\
Argo Workflows is an open source container-native workflow engine for orchestrating parallel jobs on Kubernetes. For more information, please see https://argoproj.github.io/argo-workflows/ # noqa: E501
"""
)
| 29.837209
| 206
| 0.71629
|
022ec08bb4f8811e5ed2815213d66e0ab529d349
| 1,022
|
py
|
Python
|
data_migration/conf/default_settings.py
|
gangh/bk-sops
|
29f4b4915be42650c2eeee637e0cf798e4066f09
|
[
"Apache-2.0"
] | 1
|
2019-12-23T07:23:35.000Z
|
2019-12-23T07:23:35.000Z
|
data_migration/conf/default_settings.py
|
bk-sops/bk-sops
|
9f5950b13473bf7b5032528b20016b7a571bb3cd
|
[
"Apache-2.0"
] | 9
|
2020-02-12T03:15:49.000Z
|
2021-06-10T22:04:51.000Z
|
data_migration/conf/default_settings.py
|
bk-sops/bk-sops
|
9f5950b13473bf7b5032528b20016b7a571bb3cd
|
[
"Apache-2.0"
] | 1
|
2022-01-17T11:32:05.000Z
|
2022-01-17T11:32:05.000Z
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2019 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import importlib
from django.conf import settings
ver_settings = importlib.import_module('data_migration.conf.sites.%s.ver_settings' % settings.RUN_VER)
for _setting in dir(ver_settings):
if _setting.upper() == _setting:
locals()[_setting] = getattr(ver_settings, _setting)
| 44.434783
| 115
| 0.779843
|
65e203f967f624e25312edce83f04dd3f9f50f12
| 7,033
|
py
|
Python
|
toyClassification/Ensemble-MAP-Adam/eval.py
|
dataflowr/evaluating_bdl
|
b7d7e3f2b8095a0ec43118d2b69b4b49e0b910f2
|
[
"MIT"
] | 110
|
2019-06-04T13:30:23.000Z
|
2022-03-05T07:37:52.000Z
|
toyClassification/Ensemble-MAP-Adam/eval.py
|
dataflowr/evaluating_bdl
|
b7d7e3f2b8095a0ec43118d2b69b4b49e0b910f2
|
[
"MIT"
] | 3
|
2020-08-31T17:12:39.000Z
|
2021-09-12T01:21:24.000Z
|
toyClassification/Ensemble-MAP-Adam/eval.py
|
dataflowr/evaluating_bdl
|
b7d7e3f2b8095a0ec43118d2b69b4b49e0b910f2
|
[
"MIT"
] | 23
|
2019-06-05T08:53:28.000Z
|
2022-03-05T09:01:25.000Z
|
# code-checked
# server-checked
from model import ToyNet
import torch
import torch.utils.data
import torch.nn as nn
from torch.autograd import Variable
import torch.optim as optim
import torch.nn.functional as F
import numpy as np
import pickle
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import cv2
batch_size = 32
M = 1
x_min = -6.0
x_max = 6.0
num_points = 60
networks = []
for i in range(M):
network = ToyNet("eval_Ensemble-MAP-Adam_1_M1024", project_dir="/root/evaluating_bdl/toyClassification").cuda()
network.load_state_dict(torch.load("/root/evaluating_bdl/toyClassification/training_logs/model_Ensemble-MAP-Adam_1_M1024_%d/checkpoints/model_Ensemble-MAP-Adam_1_M1024_epoch_150.pth" % i))
networks.append(network)
M = float(len(networks))
print (M)
for network in networks:
network.eval()
false_prob_values = np.zeros((num_points, num_points))
x_values = np.linspace(x_min, x_max, num_points, dtype=np.float32)
for x_1_i, x_1_value in enumerate(x_values):
for x_2_i, x_2_value in enumerate(x_values):
x = torch.from_numpy(np.array([x_1_value, x_2_value])).unsqueeze(0).cuda() # (shape: (1, 2))
mean_prob_vector = np.zeros((2, ))
for network in networks:
logits = network(x) # (shape: (1, num_classes)) (num_classes==2)
prob_vector = F.softmax(logits, dim=1) # (shape: (1, num_classes))
prob_vector = prob_vector.data.cpu().numpy()[0] # (shape: (2, ))
mean_prob_vector += prob_vector/M
false_prob_values[x_2_i, x_1_i] = mean_prob_vector[0]
plt.figure(1)
x_1, x_2 = np.meshgrid(x_values, x_values)
plt.pcolormesh(x_1, x_2, false_prob_values, cmap="RdBu")
plt.xlabel("x_1")
plt.ylabel("x_2")
plt.title("Predictive Density")
plt.colorbar()
plt.savefig("%s/predictive_density.png" % network.model_dir)
plt.close(1)
plt.figure(1)
plt.pcolormesh(x_1, x_2, false_prob_values, cmap="binary")
plt.xlabel("x_1")
plt.ylabel("x_2")
plt.title("Predictive Density")
plt.colorbar()
plt.savefig("%s/predictive_density_gray.png" % network.model_dir)
plt.close(1)
x_values = np.linspace(x_min, x_max, 1000, dtype=np.float32)
x_1, x_2 = np.meshgrid(x_values, x_values)
dist = np.sqrt(x_1**2 + x_2**2)
false_prob_values_GT = np.zeros(dist.shape)
false_prob_values_GT[dist < 2.4] = 1.0
plt.figure(1)
plt.pcolormesh(x_1, x_2, false_prob_values_GT, cmap="RdBu")
plt.xlabel("x_1")
plt.ylabel("x_2")
plt.title("Predictive Density - Ground Truth")
plt.colorbar()
plt.savefig("%s/predictive_density_GT.png" % network.model_dir)
plt.close(1)
plt.figure(1)
plt.pcolormesh(x_1, x_2, false_prob_values_GT, cmap="binary")
plt.xlabel("x_1")
plt.ylabel("x_2")
plt.title("Predictive Density - Ground Truth")
plt.colorbar()
plt.savefig("%s/predictive_density_gray_GT.png" % network.model_dir)
plt.close(1)
with open("/root/evaluating_bdl/toyClassification/HMC/false_prob_values.pkl", "rb") as file: # (needed for python3)
false_prob_values_HMC = pickle.load(file) # (shape: (60, 60))
x_values = np.linspace(x_min, x_max, num_points, dtype=np.float32)
x_1, x_2 = np.meshgrid(x_values, x_values)
x_values_GT = np.linspace(x_min, x_max, 1000, dtype=np.float32)
x_1_GT, x_2_GT = np.meshgrid(x_values_GT, x_values_GT)
fig, axes = plt.subplots(nrows=1, ncols=2, constrained_layout=True, sharex=True, sharey=True, figsize=(11.0, 5.0))
im = axes.flat[0].pcolormesh(x_1, x_2, false_prob_values_HMC, cmap="RdBu", vmin=0, vmax=1)
im = axes.flat[1].pcolormesh(x_1, x_2, false_prob_values, cmap="RdBu", vmin=0, vmax=1)
fig.colorbar(im, ax=axes.flat)
plt.savefig("%s/predictive_density_comparison.png" % network.model_dir)
plt.close()
M = int(M)
fc1_weight_samples = np.zeros((M, 1, 10, 2))
fc1_bias_samples = np.zeros((M, 1, 10))
fc2_weight_samples = np.zeros((M, 1, 10, 10))
fc2_bias_samples = np.zeros((M, 1, 10))
fc3_weight_samples = np.zeros((M, 1, 2, 10))
fc3_bias_samples = np.zeros((M, 1, 2))
for index, network in enumerate(networks):
for name, param in network.named_parameters():
if name == "fc1.weight":
fc1_weight_samples[index, 0, :] = param.data.cpu().numpy()
elif name == "fc1.bias":
fc1_bias_samples[index, 0, :] = param.data.cpu().numpy()
elif name == "fc2.weight":
fc2_weight_samples[index, 0, :] = param.data.cpu().numpy()
elif name == "fc2.bias":
fc2_bias_samples[index, 0, :] = param.data.cpu().numpy()
elif name == "fc3.weight":
fc3_weight_samples[index, 0, :] = param.data.cpu().numpy()
elif name == "fc3.bias":
fc3_bias_samples[index, 0, :] = param.data.cpu().numpy()
else:
raise Exception("Unknown network parameter!")
import os
if not os.path.exists("%s/param_distributions" % (network.model_dir)):
os.makedirs("%s/param_distributions" % (network.model_dir))
# (fc1_weight_samples has shape: (M, 1, 10, 2))
for param_index_i in range(10):
for param_index_j in range(2):
values = fc1_weight_samples[:, 0, param_index_i, param_index_j] # (shape: (M, ))
plt.figure(1)
plt.hist(np.array(values), bins=100)
plt.savefig("%s/param_distributions/fc1_weight_%d_%d.png" % (network.model_dir, param_index_i, param_index_j))
plt.close(1)
# (fc1_bias_samples has shape: (M, 1, 10))
for param_index in range(10):
values = fc1_bias_samples[:, 0, param_index] # (shape: (M, ))
plt.figure(1)
plt.hist(np.array(values), bins=100)
plt.savefig("%s/param_distributions/fc1_bias_%d.png" % (network.model_dir, param_index))
plt.close(1)
# (fc2_weight_samples has shape: (M, 1, 10, 10))
for param_index_i in range(10):
for param_index_j in range(10):
values = fc2_weight_samples[:, 0, param_index_i, param_index_j] # (shape: (M, ))
plt.figure(1)
plt.hist(np.array(values), bins=100)
plt.savefig("%s/param_distributions/fc2_weight_%d_%d.png" % (network.model_dir, param_index_i, param_index_j))
plt.close(1)
# (fc2_bias_samples has shape: (M, 1, 10))
for param_index in range(10):
values = fc2_bias_samples[:, 0, param_index] # (shape: (M, ))
plt.figure(1)
plt.hist(np.array(values), bins=100)
plt.savefig("%s/param_distributions/fc2_bias_%d.png" % (network.model_dir, param_index))
plt.close(1)
# (fc3_weight_samples has shape: (M, 1, 2, 10))
for param_index_i in range(2):
for param_index_j in range(10):
values = fc3_weight_samples[:, 0, param_index_i, param_index_j] # (shape: (M, ))
plt.figure(1)
plt.hist(np.array(values), bins=100)
plt.savefig("%s/param_distributions/fc3_weight_%d_%d.png" % (network.model_dir, param_index_i, param_index_j))
plt.close(1)
# (fc3_bias_samples has shape: (M, 1, 2))
for param_index in range(2):
values = fc3_bias_samples[:, 0, param_index] # (shape: (M, ))
plt.figure(1)
plt.hist(np.array(values), bins=100)
plt.savefig("%s/param_distributions/fc3_bias_%d.png" % (network.model_dir, param_index))
plt.close(1)
| 37.015789
| 192
| 0.690886
|
08e79bfcd21c031ba1ce8d2212e5dd87cd9bf5a3
| 1,428
|
py
|
Python
|
test/unit/app/jobs/test_expression_run.py
|
quacksawbones/galaxy-1
|
65f7259b29d3886e526d9be670c60d9da9fbe038
|
[
"CC-BY-3.0"
] | 1,085
|
2015-02-18T16:14:38.000Z
|
2022-03-30T23:52:07.000Z
|
test/unit/app/jobs/test_expression_run.py
|
quacksawbones/galaxy-1
|
65f7259b29d3886e526d9be670c60d9da9fbe038
|
[
"CC-BY-3.0"
] | 11,253
|
2015-02-18T17:47:32.000Z
|
2022-03-31T21:47:03.000Z
|
test/unit/app/jobs/test_expression_run.py
|
quacksawbones/galaxy-1
|
65f7259b29d3886e526d9be670c60d9da9fbe038
|
[
"CC-BY-3.0"
] | 1,000
|
2015-02-18T16:18:10.000Z
|
2022-03-29T08:22:56.000Z
|
import json
import os
import shutil
import subprocess
import tempfile
from galaxy.tools import expressions
from galaxy.util import galaxy_directory
LIB_DIRECTORY = os.path.join(galaxy_directory(), "lib")
def test_run_simple():
test_directory = tempfile.mkdtemp()
try:
environment_path = os.path.join(test_directory, "env.json")
environment = {
'job': {'input1': '7'},
'outputs': [
{'name': 'out1', 'from_expression': "output1", 'path': 'moo'}
],
'script': "{return {'output1': parseInt($job.input1)};}",
}
with open(environment_path, "w") as f:
json.dump(environment, f)
expressions.write_evalute_script(
test_directory,
)
new_env = os.environ.copy()
if "PYTHONPATH" in new_env:
new_env['PYTHONPATH'] = "{}:{}".format(LIB_DIRECTORY, new_env["PYTHONPATH"])
else:
new_env['PYTHONPATH'] = LIB_DIRECTORY
new_env['GALAXY_EXPRESSION_INPUTS'] = environment_path
subprocess.check_call(
args=expressions.EXPRESSION_SCRIPT_CALL,
shell=True,
cwd=test_directory,
env=new_env,
)
with open(os.path.join(test_directory, 'moo')) as f:
out_content = f.read()
assert out_content == '7', out_content
finally:
shutil.rmtree(test_directory)
| 31.043478
| 88
| 0.591737
|
3c1c06b8328510e7a9ca9b7222db610dcc3ec94c
| 7,482
|
py
|
Python
|
yolo/train.py
|
antcc/proyecto-vc
|
a98f82e47a5121ada6a72349f5c03874ded840c0
|
[
"MIT"
] | null | null | null |
yolo/train.py
|
antcc/proyecto-vc
|
a98f82e47a5121ada6a72349f5c03874ded840c0
|
[
"MIT"
] | null | null | null |
yolo/train.py
|
antcc/proyecto-vc
|
a98f82e47a5121ada6a72349f5c03874ded840c0
|
[
"MIT"
] | null | null | null |
import os
import numpy as np
import json
from .voc import parse_voc_annotation
from .yolo import create_yolov3_model, dummy_loss
from .utils.utils import makedirs
from keras.callbacks import EarlyStopping, ReduceLROnPlateau
from keras.optimizers import Adam
from .callbacks import CustomModelCheckpoint, CustomTensorBoard
from .utils.multi_gpu_model import multi_gpu_model
import tensorflow.compat.v1 as tf
import keras
from keras.models import load_model
def create_training_instances(
train_annot_folder,
train_image_folder,
train_cache,
valid_annot_folder,
valid_image_folder,
valid_cache,
labels,
):
# parse annotations of the training set
train_ints, train_labels = parse_voc_annotation(train_annot_folder, train_image_folder, train_cache, labels)
# parse annotations of the validation set, if any, otherwise split the training set
if os.path.exists(valid_annot_folder):
valid_ints, valid_labels = parse_voc_annotation(valid_annot_folder, valid_image_folder, valid_cache, labels)
else:
print("valid_annot_folder not exists. Spliting the trainining set.")
train_valid_split = int(0.8*len(train_ints))
np.random.seed(0)
np.random.shuffle(train_ints)
np.random.seed()
valid_ints = train_ints[train_valid_split:]
train_ints = train_ints[:train_valid_split]
# compare the seen labels with the given labels in config.json
if len(labels) > 0:
overlap_labels = set(labels).intersection(set(train_labels.keys()))
print('Seen labels: \t' + str(train_labels) + '\n')
print('Given labels: \t' + str(labels))
# return None, None, None if some given label is not in the dataset
if len(overlap_labels) < len(labels):
print('Some labels have no annotations! Please revise the list of labels in the config.json.')
return None, None, None
else:
print('No labels are provided. Train on all seen labels.')
print(train_labels)
labels = train_labels.keys()
max_box_per_image = max([len(inst['object']) for inst in (train_ints + valid_ints)])
return train_ints, valid_ints, sorted(labels), max_box_per_image
def create_callbacks(saved_weights_name, tensorboard_logs, model_to_save):
makedirs(tensorboard_logs)
early_stop = EarlyStopping(
monitor = 'loss',
min_delta = 0.01,
patience = 7,
mode = 'min',
verbose = 1
)
checkpoint = CustomModelCheckpoint(
model_to_save = model_to_save,
filepath = saved_weights_name,# + '{epoch:02d}.h5',
monitor = 'loss',
verbose = 1,
save_best_only = True,
mode = 'min',
period = 1
)
reduce_on_plateau = ReduceLROnPlateau(
monitor = 'loss',
factor = 0.1,
patience = 2,
verbose = 1,
mode = 'min',
min_delta = 0.01,
cooldown = 0,
min_lr = 0
)
tensorboard = CustomTensorBoard(
log_dir = tensorboard_logs,
write_graph = True,
write_images = True,
)
return [early_stop, checkpoint, reduce_on_plateau, tensorboard]
def create_model(
nb_class,
anchors,
max_box_per_image,
max_grid, batch_size,
warmup_batches,
ignore_thresh,
multi_gpu,
saved_weights_name,
lr,
grid_scales,
obj_scale,
noobj_scale,
xywh_scale,
class_scale,
backend_path,
fine_tune = 0
):
if multi_gpu > 1:
with tf.device('/cpu:0'):
template_model, infer_model = create_yolov3_model(
nb_class = nb_class,
anchors = anchors,
max_box_per_image = max_box_per_image,
max_grid = max_grid,
batch_size = batch_size//multi_gpu,
warmup_batches = warmup_batches,
ignore_thresh = ignore_thresh,
grid_scales = grid_scales,
obj_scale = obj_scale,
noobj_scale = noobj_scale,
xywh_scale = xywh_scale,
class_scale = class_scale,
finetune = fine_tune == 4
)
else:
template_model, infer_model = create_yolov3_model(
nb_class = nb_class,
anchors = anchors,
max_box_per_image = max_box_per_image,
max_grid = max_grid,
batch_size = batch_size,
warmup_batches = warmup_batches,
ignore_thresh = ignore_thresh,
grid_scales = grid_scales,
obj_scale = obj_scale,
noobj_scale = noobj_scale,
xywh_scale = xywh_scale,
class_scale = class_scale,
finetune = fine_tune == 4
)
# load the pretrained weight if exists, otherwise load the backend weight only
if os.path.exists(saved_weights_name):
print("\nLoading pretrained weights.\n")
template_model.load_weights(saved_weights_name)
else:
template_model.load_weights(backend_path, by_name=True)
if multi_gpu > 1:
train_model = multi_gpu_model(template_model, gpus=multi_gpu)
else:
train_model = template_model
# Fine-tuning
if fine_tune == 1:
for layer in train_model.layers:
layer.trainable = False
# Unfreeze large detection block (small objects)
train_model.layers[254].trainable = True
train_model.layers[242].trainable = True
train_model.layers[237].trainable = True
train_model.layers[234].trainable = True
# Unfreeze medium detection block
train_model.layers[252].trainable = True
train_model.layers[241].trainable = True
train_model.layers[217].trainable = True
train_model.layers[214].trainable = True
# Unfreeze small detection block
train_model.layers[249].trainable = True
train_model.layers[240].trainable = True
train_model.layers[197].trainable = True
train_model.layers[194].trainable = True
elif fine_tune == 2:
for layer in train_model.layers:
layer.trainable = False
# Unfreeze large detection block (small objects)
train_model.layers[254].trainable = True
train_model.layers[242].trainable = True
train_model.layers[237].trainable = True
train_model.layers[234].trainable = True
# Unfreeze medium detection block
train_model.layers[252].trainable = True
train_model.layers[241].trainable = True
train_model.layers[217].trainable = True
train_model.layers[214].trainable = True
elif fine_tune == 3:
for layer in train_model.layers:
layer.trainable = False
# Unfreeze large detection block (small objects)
train_model.layers[254].trainable = True
train_model.layers[242].trainable = True
train_model.layers[237].trainable = True
train_model.layers[234].trainable = True
optimizer = Adam(lr=lr, clipnorm=0.001)
train_model.compile(loss=dummy_loss, optimizer=optimizer)
return train_model, infer_model
| 34.962617
| 116
| 0.619487
|
e3c6a88a0a757fd3e0d2c2f03ea37fea4172c341
| 27,747
|
py
|
Python
|
src/python/zensols/deeplearn/model/facade.py
|
plandes/deeplearn
|
925f02200c62a7dc798e474ed94a86e009fd1ebf
|
[
"MIT"
] | 2
|
2021-04-30T17:19:14.000Z
|
2021-05-04T03:48:59.000Z
|
src/python/zensols/deeplearn/model/facade.py
|
plandes/deeplearn
|
925f02200c62a7dc798e474ed94a86e009fd1ebf
|
[
"MIT"
] | null | null | null |
src/python/zensols/deeplearn/model/facade.py
|
plandes/deeplearn
|
925f02200c62a7dc798e474ed94a86e009fd1ebf
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
"""Client entry point to the model.
"""
__author__ = 'Paul Landes'
from typing import Any, Callable, List, Union, Iterable
from dataclasses import dataclass, field, InitVar
import sys
import logging
import pandas as pd
from io import TextIOBase
from pathlib import Path
from zensols.util import time
from zensols.config import (
Configurable,
ConfigFactory,
Writable,
ImportConfigFactory,
)
from zensols.persist import (
persisted, PersistableContainer, PersistedWork,
Deallocatable, Stash,
)
from zensols.dataset import DatasetSplitStash
from zensols.deeplearn import ModelError, NetworkSettings, ModelSettings
from zensols.deeplearn.vectorize import (
SparseTensorFeatureContext, FeatureVectorizerManagerSet,
)
from zensols.deeplearn.batch import (
Batch, DataPoint, BatchStash, BatchMetadata,
BatchMetadataFactory, BatchFeatureMapping
)
from zensols.deeplearn.result import (
EpochResult, ModelResult, ModelResultManager, PredictionsDataFrameFactory
)
from . import (
ModelManager, ModelExecutor, PredictionMapper,
FacadeClassExplorer, MetadataNetworkSettings,
ResultAnalyzer,
)
logger = logging.getLogger(__name__)
@dataclass
class ModelFacade(PersistableContainer, Writable):
"""This class provides easy to use client entry points to the model executor,
which trains, validates, tests, saves and loads the model.
More common attributes, such as the learning rate and number of epochs, are
properties that dispatch to :py:obj:`executor`. For the others, go
directly to the property.
:see: :class:`zensols.deeplearn.domain.ModelSettings`
"""
SINGLETONS = {}
config: Configurable = field()
"""The configuraiton used to create the facade, and used to create a new
configuration factory to load models.
"""
config_factory: InitVar[ConfigFactory] = field(default=None)
"""The configuration factory used to create this facade, or ``None`` if no
factory was used.
"""
progress_bar: bool = field(default=True)
"""Create text/ASCII based progress bar if ``True``."""
progress_bar_cols: int = field(default=None)
"""The number of console columns to use for the text/ASCII based progress
bar.
"""
executor_name: str = field(default='executor')
"""The configuration entry name for the executor, which defaults to
``executor``.
"""
writer: TextIOBase = field(default=sys.stdout)
"""The writer to this in methods like :meth:`train`, and :meth:`test` for
writing performance metrics results and predictions or ``None`` to not
output them.
"""
def __post_init__(self, config_factory: ConfigFactory):
super().__init__()
self._init_config_factory(config_factory)
self._config_factory = PersistedWork('_config_factory', self)
self._executor = PersistedWork('_executor', self)
self.debuged = False
@classmethod
def get_singleton(cls, *args, **kwargs) -> Any:
key = str(cls)
inst = cls.SINGLETONS.get(key)
if inst is None:
inst = cls(*args, **kwargs)
cls.SINGLETONS[key] = inst
return inst
def _init_config_factory(self, config_factory: ConfigFactory):
if isinstance(config_factory, ImportConfigFactory):
params = config_factory.__dict__
keeps = set('reload shared reload_pattern'.split())
params = {k: params[k] for k in set(params.keys()) & keeps}
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'import config factory params: {params}')
self._config_factory_params = params
else:
self._config_factory_params = {}
def _create_executor(self) -> ModelExecutor:
"""Create a new instance of an executor. Used by :obj:`executor`.
"""
logger.info('creating new executor')
executor = self.config_factory(
self.executor_name,
progress_bar=self.progress_bar,
progress_bar_cols=self.progress_bar_cols)
return executor
@property
@persisted('_config_factory')
def config_factory(self):
"""The configuration factory used to create facades.
"""
return ImportConfigFactory(self.config, **self._config_factory_params)
@property
@persisted('_executor')
def executor(self) -> ModelExecutor:
"""A cached instance of the executor tied to the instance of this class.
"""
return self._create_executor()
@property
def net_settings(self) -> NetworkSettings:
"""Return the executor's network settings.
"""
return self.executor.net_settings
@property
def model_settings(self) -> ModelSettings:
"""Return the executor's model settings.
"""
return self.executor.model_settings
@property
def result_manager(self) -> ModelResultManager:
"""Return the executor's result manager.
"""
rm: ModelResultManager = self.executor.result_manager
if rm is None:
rm = ModelError('No result manager available')
return rm
@property
def feature_stash(self) -> Stash:
"""The stash used to generate the feature, which is not to be confused
with the batch source stash ``batch_stash``.
"""
return self.executor.feature_stash
@property
def batch_stash(self) -> BatchStash:
"""The stash used to encode and decode batches by the executor.
"""
return self.executor.batch_stash
@property
def dataset_stash(self) -> DatasetSplitStash:
"""The stash used to encode and decode batches split by dataset.
"""
return self.executor.dataset_stash
@property
def vectorizer_manager_set(self) -> FeatureVectorizerManagerSet:
"""Return the vectorizer manager set used for the facade. This is taken from
the executor's batch stash.
"""
return self.batch_stash.vectorizer_manager_set
@property
def batch_metadata(self) -> BatchMetadata:
"""Return the batch metadata used on the executor.
:see: :class:`zensols.deepnlp.model.module.EmbeddingNetworkSettings`
"""
ns = self.net_settings
meta: BatchMetadata
if isinstance(ns, MetadataNetworkSettings):
meta = ns.batch_metadata_factory()
else:
fac = BatchMetadataFactory(self.batch_stash)
meta = fac()
return meta
@property
def label_attribute_name(self):
"""Get the label attribute name.
"""
bmeta = self.batch_metadata
if bmeta is not None:
return bmeta.mapping.label_attribute_name
def remove_metadata_mapping_field(self, attr: str) -> bool:
"""Remove a field by attribute if it exists across all metadata mappings.
This is useful when a very expensive vectorizer slows down tasks, such
as prediction, on a single run of a program. For this use case,
override :meth:`predict` to call this method before calling the super
``predict`` method.
:param attr: the name of the field's attribute to remove
:return: ``True`` if the field was removed, ``False`` otherwise
"""
removed = False
meta: BatchMetadata = self.batch_metadata
mapping: BatchFeatureMapping
for mapping in meta.mapping.manager_mappings:
removed = removed or mapping.remove_field(attr)
return removed
@property
def dropout(self) -> float:
"""The dropout for the entire network.
"""
return self.net_settings.dropout
@dropout.setter
def dropout(self, dropout: float):
"""The dropout for the entire network.
"""
self.net_settings.dropout = dropout
@property
def epochs(self) -> int:
"""The number of epochs for training and validation.
"""
return self.model_settings.epochs
@epochs.setter
def epochs(self, n_epochs: int):
"""The number of epochs for training and validation.
"""
self.model_settings.epochs = n_epochs
@property
def learning_rate(self) -> float:
"""The learning rate to set on the optimizer.
"""
return self.model_settings.learning_rate
@learning_rate.setter
def learning_rate(self, learning_rate: float):
"""The learning rate to set on the optimizer.
"""
self.executor.model_settings.learning_rate = learning_rate
@property
def cache_batches(self) -> bool:
"""The cache_batches for the entire network.
"""
return self.model_settings.cache_batches
@cache_batches.setter
def cache_batches(self, cache_batches: bool):
"""The cache_batches for the entire network.
"""
# if the caching strategy changed, be safe and deallocate and purge to
# lazy recreate everything
if self.model_settings.cache_batches != cache_batches:
self.clear()
self.model_settings.cache_batches = cache_batches
def clear(self):
"""Clear out any cached executor.
"""
if logger.isEnabledFor(logging.INFO):
logger.info('clearing')
executor = self.executor
config_factory = self.config_factory
executor.deallocate()
config_factory.deallocate()
self._executor.clear()
self._config_factory.clear()
def reload(self):
"""Clears all state and reloads the configuration.
"""
self.clear()
self.config.reload()
def deallocate(self):
super().deallocate()
self.SINGLETONS.pop(str(self.__class__), None)
@classmethod
def load_from_path(cls, path: Path, *args, **kwargs) -> ModelFacade:
"""Construct a new facade from the data saved in a persisted model file. This
uses the :py:meth:`.ModelManager.load_from_path` to reconstruct the
returned facade, which means some attributes are taken from default if
not taken from ``*args`` or ``**kwargs``.
Arguments:
Passed through to the initializer of invoking class ``cls``.
:return: a new instance of a :class:`.ModelFacade`
:see: :meth:`.ModelManager.load_from_path`
"""
if logger.isEnabledFor(logging.INFO):
logger.info(f'loading from facade from {path}')
mm = ModelManager.load_from_path(path)
if 'executor_name' not in kwargs:
kwargs['executor_name'] = mm.model_executor_name
executor = mm.load_executor()
mm.config_factory.deallocate()
facade: ModelFacade = cls(executor.config, *args, **kwargs)
facade._config_factory.set(executor.config_factory)
facade._executor.set(executor)
return facade
def debug(self, debug_value: Union[bool, int] = True):
"""Debug the model by setting the configuration to debug mode and invoking a
single forward pass. Logging must be configured properly to get the
output, which is typically just invoking
:py:meth:`logging.basicConfig`.
:param debug_value: ``True`` turns on executor debugging; if an
``int``, the higher the value, the more the logging
"""
executor = self.executor
self._configure_debug_logging()
executor.debug = debug_value
executor.progress_bar = False
executor.model_settings.batch_limit = 1
self.debuged = True
executor.train()
def persist_result(self):
"""Save the last recorded result during an :py:meth:`.Executor.train` or
:py:meth:`.Executor.test` invocation to disk. Optionally also save a
plotted graphics file to disk as well when :obj:`persist_plot_result`
is set to ``True``.
Note that in Jupyter notebooks, this method has the side effect of
plotting the results in the cell when ``persist_plot_result`` is
``True``.
:param persist_plot_result: if ``True``, plot and save the graph as a
PNG file to the results directory
"""
executor = self.executor
rmng: ModelResultManager = self.result_manager
if executor.result_manager is not None:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'dumping model result: {executor.model_result}')
rmng.dump(executor.model_result)
def train(self, description: str = None) -> ModelResult:
"""Train and test or just debug the model depending on the configuration.
:param description: a description used in the results, which is useful
when making incremental hyperparameter changes to
the model
"""
executor = self.executor
executor.reset()
# if self.writer is not None:
# executor.write(writer=self.writer)
logger.info('training...')
with time('trained'):
res = executor.train(description)
return res
def test(self, description: str = None) -> ModelResult:
"""Load the model from disk and test it.
"""
if self.debuged:
raise ModelError('Testing is not allowed in debug mode')
executor = self.executor
executor.load()
logger.info('testing...')
with time('tested'):
res = executor.test(description)
if self.writer is not None:
res.write(writer=self.writer)
return res
def train_production(self, description: str = None) -> ModelResult:
"""Train on the training and test data sets, then test
:param description: a description used in the results, which is useful
when making incremental hyperparameter changes to
the model
"""
executor = self.executor
executor.reset()
if self.writer is not None:
executor.write(writer=self.writer)
logger.info('training...')
with time('trained'):
res = executor.train_production(description)
return res
def predict(self, datas: Iterable[Any]) -> Any:
"""Make ad-hoc predictions on batches without labels, and return the results.
:param datas: the data predict on, each as a separate element as a data
point in a batch
"""
executor: ModelExecutor = self.executor
ms: ModelSettings = self.model_settings
if ms.prediction_mapper_name is None:
raise ModelError(
'The model settings ({ms})is not configured to create ' +
"prediction batches: no set 'prediction_mapper'")
pm: PredictionMapper = self.config_factory.new_instance(
ms.prediction_mapper_name, datas, self.batch_stash)
try:
batches: List[Batch] = pm.batches
if not executor.model_exists:
executor.load()
logger.info('predicting...')
with time('predicted'):
res: ModelResult = executor.predict(batches)
eres: EpochResult = res.results[0]
ret: Any = pm.map_results(eres)
finally:
pm.deallocate()
return ret
def stop_training(self):
"""Early stop training if the model is currently training. This invokes the
:meth:`.TrainManager.stop`, communicates to the training process to
stop on the next check.
:return: ``True`` if the application is configured to early stop and
the signal has not already been given
"""
return self.executor.train_manager.stop()
@property
def last_result(self) -> ModelResult:
"""The last recorded result during an :meth:`.ModelExecutor.train` or
:meth:`.ModelExecutor.test` invocation is used.
"""
res = self.executor.model_result
if res is None:
rm: ModelResultManager = self.result_manager
res = rm.load()
if res is None:
raise ModelError('No results found')
return res
def write_result(self, depth: int = 0, writer: TextIOBase = sys.stdout,
include_settings: bool = False,
include_converged: bool = False,
include_config: bool = False):
"""Load the last set of results from the file system and print them out. The
result to print is taken from :obj:`last_result`
:param depth: the number of indentation levels
:param writer: the data sink
:param include_settings: whether or not to include model and network
settings in the output
:param include_config: whether or not to include the configuration in
the output
"""
if logger.isEnabledFor(logging.INFO):
logger.info('load previous results')
res = self.last_result
res.write(depth, writer, include_settings=include_settings,
include_converged=include_converged,
include_config=include_config)
def plot_result(self, result: ModelResult = None, save: bool = False,
show: bool = False) -> ModelResult:
"""Plot results and optionally save and show them. If this is called in a
Jupyter notebook, the plot will be rendered in a cell.
:param result: the result to plot, or if ``None``, use
:py:meth:`last_result`
:param save: if ``True``, save the plot to the results directory with
the same naming as the last data results
:param show: if ``True``, invoke ``matplotlib``'s ``show`` function to
visualize in a non-Jupyter environment
:return: the result used to graph, which comes from the executor when
none is given to the invocation
"""
result = self.last_result if result is None else result
grapher = self.executor.result_manager.get_grapher()
grapher.plot([result])
if save:
grapher.save()
if show:
grapher.show()
return result
def get_predictions_factory(self, column_names: List[str] = None,
transform: Callable[[DataPoint], tuple] = None,
batch_limit: int = sys.maxsize,
name: str = None) \
-> PredictionsDataFrameFactory:
"""Generate a predictions factoty from the test data set.
:param column_names: the list of string column names for each data item
the list returned from ``data_point_transform`` to
be added to the results for each label/prediction
:param transform:
a function that returns a tuple, each with an element respective of
``column_names`` to be added to the results for each
label/prediction; if ``None`` (the default), ``str`` used (see the
`Iris Jupyter Notebook
<https://github.com/plandes/deeplearn/blob/master/notebook/iris.ipynb>`_
example)
:param batch_limit: the max number of batche of results to output
:param name: the key of the previously saved results to fetch the
results, or ``None`` (the default) to get the last result
set saved
"""
rm: ModelResultManager = self.result_manager
res: ModelResult
if name is None:
res = self.last_result
key: str = rm.get_last_key(False)
else:
res = rm.load(name)
key: str = name
if res is None:
raise ModelError(f'No test results found: {name}')
if not res.test.contains_results:
raise ModelError('No test results found')
path: Path = rm.key_to_path(key)
return PredictionsDataFrameFactory(
path, res, self.batch_stash,
column_names, transform, batch_limit)
def get_predictions(self, *args, **kwargs) -> pd.DataFrame:
"""Generate a Pandas dataframe containing all predictions from the test data
set.
:see: :meth:`get_predictions_factory`
"""
df_fac = self.get_predictions_factory(*args, **kwargs)
return df_fac.dataframe
def write_predictions(self, lines: int = 10):
"""Print the predictions made during the test phase of the model execution.
:param lines: the number of lines of the predictions data frame to be
printed
:param writer: the data sink
"""
preds = self.get_predictions()
print(preds.head(lines), file=self.writer)
def get_result_analyzer(self, key: str = None,
cache_previous_results: bool = False) \
-> ResultAnalyzer:
"""Return a results analyzer for comparing in flight training progress.
"""
rm: ModelResultManager = self.result_manager
if key is None:
key = rm.get_last_key()
return ResultAnalyzer(self.executor, key, cache_previous_results)
@property
def class_explorer(self) -> FacadeClassExplorer:
return self._create_facade_explorer()
def _create_facade_explorer(self) -> FacadeClassExplorer:
"""Return a facade explorer used to print the facade's object graph.
"""
return FacadeClassExplorer()
def write(self, depth: int = 0, writer: TextIOBase = None,
include_executor: bool = True, include_metadata: bool = True,
include_settings: bool = True, include_model: bool = True,
include_config: bool = False, include_object_graph: bool = False):
writer = self.writer if writer is None else writer
writer = sys.stdout if writer is None else writer
bmeta = None
try:
bmeta = self.batch_metadata
except AttributeError:
pass
if include_executor:
self._write_line(f'{self.executor.name}:', depth, writer)
self.executor.write(depth + 1, writer,
include_settings=include_settings,
include_model=include_model)
if include_metadata and bmeta is not None:
self._write_line('metadata:', depth, writer)
bmeta.write(depth + 1, writer)
if include_object_graph:
self._write_line('graph:', depth, writer)
ce = self._create_facade_explorer()
ce.write(self, depth=depth + 1, writer=writer)
if include_config:
self._write_line('config:', depth, writer)
self.config.write(depth + 1, writer)
def _deallocate_config_instance(self, inst: Any):
if isinstance(self.config_factory, ImportConfigFactory):
inst = self.config_factory.clear_instance(inst)
dealloc = isinstance(inst, Deallocatable)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'deallocate {inst}: {type(inst)}: {dealloc}')
if dealloc:
inst.deallocate()
def _configure_debug_logging(self):
"""When debuging the model, configure the logging system for output. The
correct loggers need to be set to debug mode to print the model
debugging information such as matrix shapes.
"""
for name in ['zensols.deeplearn.model',
__name__]:
logging.getLogger(name).setLevel(logging.DEBUG)
def _configure_cli_logging(self, info_loggers: List[str],
debug_loggers: List[str]):
info_loggers.extend([
# multi-process (i.e. batch creation)
'zensols.multi.stash',
'zensols.deeplearn.batch.multi',
# validation/training loss messages
'zensols.deeplearn.model.executor.status',
__name__])
if not self.progress_bar:
info_loggers.extend([
# load messages
'zensols.deeplearn.batch.stash',
# save results messages
'zensols.deeplearn.result',
# validation/training loss messages
'zensols.deeplearn.model.executor.progress',
# model save/load
'zensols.deeplearn.model.manager',
# early stop messages
'zensols.deeplearn.model.trainmng',
# model save messages
'zensols.deeplearn.result.manager',
# CLI interface
'zensols.deeplearn.cli.app'])
@staticmethod
def configure_default_cli_logging(log_level: int = logging.WARNING):
"""Configure the logging system with the defaults.
"""
fmt = '%(asctime)s[%(levelname)s]%(name)s: %(message)s'
logging.basicConfig(format=fmt, level=log_level)
def configure_cli_logging(self, log_level: int = None):
""""Configure command line (or Python REPL) debugging. Each facade can turn on
name spaces that make sense as useful information output for long
running training/testing iterations.
This calls "meth:`_configure_cli_logging` to collect the names of
loggers at various levels.
"""
info = []
debug = []
if log_level is not None:
self.configure_default_cli_logging(log_level)
self._configure_cli_logging(info, debug)
for name in info:
logging.getLogger(name).setLevel(logging.INFO)
for name in debug:
logging.getLogger(name).setLevel(logging.DEBUG)
def configure_jupyter(self, log_level: int = logging.WARNING,
progress_bar_cols: int = 120):
"""Configures logging and other configuration related to a Jupyter notebook.
This is just like :py:meth:`configure_cli_logging`, but adjusts logging
for what is conducive for reporting in Jupyter cells.
;param log_level: the default logging level for the logging system
:param progress_bar_cols: the number of columns to use for the progress
bar
"""
self.configure_cli_logging(log_level)
for name in [
# turn off loading messages
'zensols.deeplearn.batch.stash',
# turn off model save messages
'zensols.deeplearn.result.manager']:
logging.getLogger(name).setLevel(logging.WARNING)
# number of columns for the progress bar
self.executor.progress_bar_cols = progress_bar_cols
# turn off console output (non-logging)
self.writer = None
@staticmethod
def get_encode_sparse_matrices() -> bool:
"""Return whether or not sparse matricies are encoded.
:see: :meth:`set_sparse`
"""
return SparseTensorFeatureContext.USE_SPARSE
@staticmethod
def set_encode_sparse_matrices(use_sparse: bool = False):
"""If called before batches are created, encode all tensors the would be
encoded as dense rather than sparse when ``use_sparse`` is ``False``.
Oherwise, tensors will be encoded as sparse where it makes sense on a
per vectorizer basis.
"""
SparseTensorFeatureContext.USE_SPARSE = use_sparse
| 35.618742
| 87
| 0.622698
|
423be0178255ff7a723bba0b5acd380df905f96e
| 3,986
|
py
|
Python
|
openbci/utils/utilities.py
|
vishwas1234567/OpenBCI_Python
|
25232ac7602e0b651796dcaa0bc710c1d9cc700c
|
[
"MIT"
] | 414
|
2015-01-18T21:53:07.000Z
|
2020-04-05T11:50:35.000Z
|
openbci/utils/utilities.py
|
vishwas1234567/OpenBCI_Python
|
25232ac7602e0b651796dcaa0bc710c1d9cc700c
|
[
"MIT"
] | 106
|
2015-02-19T19:18:17.000Z
|
2020-03-14T17:19:43.000Z
|
openbci/utils/utilities.py
|
ikmckenz/OpenBCI_Python
|
25232ac7602e0b651796dcaa0bc710c1d9cc700c
|
[
"MIT"
] | 206
|
2015-01-04T06:27:55.000Z
|
2020-03-03T12:18:54.000Z
|
from openbci.utils.constants import Constants
def make_tail_byte_from_packet_type(packet_type):
"""
Converts a packet type {Number} into a OpenBCI stop byte
:param packet_type: {int} The number to smash on to the stop byte. Must be 0-15,
out of bounds input will result in a 0
:return: A properly formatted OpenBCI stop byte
"""
if packet_type < 0 or packet_type > 15:
packet_type = 0
return Constants.RAW_BYTE_STOP | packet_type
def sample_number_normalize(sample_number=None):
if sample_number is not None:
if sample_number > Constants.SAMPLE_NUMBER_MAX_CYTON:
sample_number = Constants.SAMPLE_NUMBER_MAX_CYTON
else:
sample_number = 0x45
return sample_number
def sample_packet(sample_number=0x45):
return bytearray(
[0xA0, sample_number_normalize(sample_number), 0, 0, 1, 0, 0, 2, 0, 0, 3, 0, 0, 4, 0, 0, 5,
0, 0, 6, 0, 0, 7, 0,
0, 8, 0, 0, 0, 1, 0, 2,
make_tail_byte_from_packet_type(Constants.RAW_PACKET_TYPE_STANDARD_ACCEL)])
def sample_packet_zero(sample_number):
return bytearray(
[0xA0, sample_number_normalize(sample_number), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
make_tail_byte_from_packet_type(Constants.RAW_PACKET_TYPE_STANDARD_ACCEL)])
def sample_packet_real(sample_number):
return bytearray(
[0xA0, sample_number_normalize(sample_number), 0x8F, 0xF2, 0x40, 0x8F, 0xDF, 0xF4, 0x90,
0x2B, 0xB6, 0x8F, 0xBF,
0xBF, 0x7F, 0xFF, 0xFF, 0x7F, 0xFF, 0xFF, 0x94, 0x25, 0x34, 0x20, 0xB6, 0x7D, 0, 0xE0, 0,
0xE0, 0x0F, 0x70,
make_tail_byte_from_packet_type(Constants.RAW_PACKET_TYPE_STANDARD_ACCEL)])
def sample_packet_standard_raw_aux(sample_number):
return bytearray(
[0xA0, sample_number_normalize(sample_number), 0, 0, 1, 0, 0, 2, 0, 0, 3, 0, 0, 4, 0, 0, 5,
0, 0, 6, 0, 0, 7, 0,
0, 8, 0, 1, 2, 3, 4, 5,
make_tail_byte_from_packet_type(Constants.RAW_PACKET_TYPE_STANDARD_RAW_AUX)])
def sample_packet_accel_time_sync_set(sample_number):
return bytearray(
[0xA0, sample_number_normalize(sample_number), 0, 0, 1, 0, 0, 2, 0, 0, 3, 0, 0, 4, 0, 0, 5,
0, 0, 6, 0, 0, 7, 0,
0, 8, 0, 1, 0, 0, 0, 1,
make_tail_byte_from_packet_type(Constants.RAW_PACKET_TYPE_ACCEL_TIME_SYNC_SET)])
def sample_packet_accel_time_synced(sample_number):
return bytearray(
[0xA0, sample_number_normalize(sample_number), 0, 0, 1, 0, 0, 2, 0, 0, 3, 0, 0, 4, 0, 0, 5,
0, 0, 6, 0, 0, 7, 0,
0, 8, 0, 1, 0, 0, 0, 1,
make_tail_byte_from_packet_type(Constants.RAW_PACKET_TYPE_ACCEL_TIME_SYNCED)])
def sample_packet_raw_aux_time_sync_set(sample_number):
return bytearray(
[0xA0, sample_number_normalize(sample_number), 0, 0, 1, 0, 0, 2, 0, 0, 3, 0, 0, 4, 0, 0, 5,
0, 0, 6, 0, 0, 7, 0,
0, 8, 0x00, 0x01, 0, 0, 0, 1,
make_tail_byte_from_packet_type(Constants.RAW_PACKET_TYPE_RAW_AUX_TIME_SYNC_SET)])
def sample_packet_raw_aux_time_synced(sample_number):
return bytearray(
[0xA0, sample_number_normalize(sample_number), 0, 0, 1, 0, 0, 2, 0, 0, 3, 0, 0, 4, 0, 0, 5,
0, 0, 6, 0, 0, 7, 0,
0, 8, 0x00, 0x01, 0, 0, 0, 1,
make_tail_byte_from_packet_type(Constants.RAW_PACKET_TYPE_RAW_AUX_TIME_SYNCED)])
def sample_packet_impedance(channel_number):
return bytearray(
[0xA0, channel_number, 54, 52, 49, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0,
0, make_tail_byte_from_packet_type(Constants.RAW_PACKET_TYPE_IMPEDANCE)])
def sample_packet_user_defined():
return bytearray(
[0xA0, 0x00, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1,
make_tail_byte_from_packet_type(Constants.OBCIStreamPacketUserDefinedType)])
| 38.326923
| 99
| 0.64275
|
e51296583a7589b8dd1b84a687b54794360ac033
| 8,885
|
py
|
Python
|
experimental/single_model_uncertainty/flags.py
|
athon-millane/uncertainty-baselines
|
aa504fc51aac6d4cac47dbd34aa672c670dfbd28
|
[
"Apache-2.0"
] | 1
|
2021-01-22T15:06:24.000Z
|
2021-01-22T15:06:24.000Z
|
experimental/single_model_uncertainty/flags.py
|
gpleiss/uncertainty-baselines
|
60b08e50e8d64cbd4d09689a35d5be81ed15e624
|
[
"Apache-2.0"
] | null | null | null |
experimental/single_model_uncertainty/flags.py
|
gpleiss/uncertainty-baselines
|
60b08e50e8d64cbd4d09689a35d5be81ed15e624
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2020 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Common flags."""
from typing import Any, Dict, List
from absl import flags
from uncertainty_baselines.datasets import datasets
from uncertainty_baselines.models import models
FLAGS = flags.FLAGS
def serialize_flags(flag_list: Dict[str, Any]) -> str:
string = ''
for flag_name, flag_value in flag_list.items():
string += '--{}={}\n'.format(flag_name, flag_value)
# Remove the final trailing newline.
return string[:-1]
def define_flags() -> List[str]:
"""Define common flags."""
predefined_flags = set(FLAGS)
flags.DEFINE_string('experiment_name', None, 'Name of this experiment.')
# TPU Job flags.
flags.DEFINE_string('tpu', None, 'Name of the TPU to use.')
flags.DEFINE_enum(
'mode',
'train_and_eval',
['train', 'eval', 'train_and_eval'],
'Whether to execute train and/or eval.')
flags.DEFINE_integer(
'num_cores', None, 'How many TPU cores or GPUs the job is running on.')
flags.DEFINE_bool('run_ood', False, 'Whether to run OOD jobs with eval job.')
flags.DEFINE_bool('use_cpu', False, 'Whether to run on CPU.')
flags.DEFINE_bool('use_gpu', False, 'Whether to run on GPU or TPU.')
# Train/eval loop flags.
flags.DEFINE_integer(
'checkpoint_step', -1, 'Step of the checkpoint to restore from.')
flags.DEFINE_enum(
'dataset_name',
None,
datasets.get_dataset_names(),
'Name of the dataset to use.')
flags.DEFINE_enum(
'ood_dataset_name',
None,
datasets.get_dataset_names(),
'Name of the OOD dataset to use for evaluation.')
flags.DEFINE_integer(
'eval_frequency',
None,
'How many steps between evaluating on the (validation and) test set.')
flags.DEFINE_string('output_dir', None, 'Base output directory.')
flags.DEFINE_enum(
'model_name',
None,
models.get_model_names(),
'Name of the model to use.')
flags.DEFINE_integer(
'log_frequency',
100,
'How many steps between logging the metrics.')
flags.DEFINE_integer('train_steps', None, 'How many steps to train for.')
# Hyperparamater flags.
flags.DEFINE_integer('batch_size', None, 'Training batch size.')
flags.DEFINE_integer('eval_batch_size', None, 'Validation/test batch size.')
flags.DEFINE_float('learning_rate', None, 'Learning rate.')
flags.DEFINE_string(
'learning_rate_schedule',
'constant',
'Learning rate schedule to use.')
flags.DEFINE_integer('schedule_hparams_warmup_epochs', 1,
'Number of epochs for a linear warmup to the initial '
'learning rate. Use 0 to do no warmup.')
flags.DEFINE_float('schedule_hparams_decay_ratio', 0.2,
'Amount to decay learning rate.')
flags.DEFINE_list('schedule_hparams_decay_epochs', ['60', '120', '160'],
'Epochs to decay learning rate by.')
flags.DEFINE_string('optimizer', 'adam', 'Optimizer to use.')
flags.DEFINE_float('optimizer_hparams_momentum', 0.9, 'SGD momentum.')
flags.DEFINE_float('optimizer_hparams_beta_1', 0.9, 'Adam beta_1.')
flags.DEFINE_float('optimizer_hparams_beta_2', 0.999, 'Adam beta_2.')
flags.DEFINE_float('optimizer_hparams_epsilon', 1e-7, 'Adam epsilon.')
flags.DEFINE_float('weight_decay', 0.0, 'Weight decay for optimizer.')
flags.DEFINE_float('l2_regularization', 1e-4, 'L2 regularization for models.')
flags.DEFINE_float(
'focal_loss_gamma', 0.0, 'The gamma parameter in the focal loss. '
'If gamma=0.0, the focal loss is equivalent to cross entropy loss.')
flags.DEFINE_integer('seed', 42, 'Random seed.')
flags.DEFINE_float(
'validation_percent',
0.0,
'Percent of training data to hold out and use as a validation set.')
flags.DEFINE_integer(
'shuffle_buffer_size', 16384, 'Dataset shuffle buffer size.')
# Model flags, Wide Resnet
flags.DEFINE_integer('wide_resnet_depth', 28,
'Depth of wide resnet model.')
flags.DEFINE_integer('wide_resnet_width_multiplier', 10,
'Width multiplier for wide resnet model.')
flags.DEFINE_integer('num_classes', 10, 'Number of label classes.')
# Flags relating to genomics_cnn model
flags.DEFINE_integer('len_seqs', 250,
'Sequence length, only used for genomics dataset.')
flags.DEFINE_integer('num_motifs', 1024,
'Number of motifs, only used for the genomics dataset.')
flags.DEFINE_integer('len_motifs', 20,
'Length of motifs, only used for the genomics dataset.')
flags.DEFINE_integer('num_denses', 128,
'Number of denses, only used for the genomics dataset.')
# Flags relating to SNGP model
flags.DEFINE_float('dropout_rate', 0.1, 'Dropout rate for dropout layers.')
flags.DEFINE_bool(
'before_conv_dropout', False,
'Whether to use filter wise dropout before convolutionary layers. ')
flags.DEFINE_bool(
'use_mc_dropout', False,
'Whether to use Monte Carlo dropout for the hidden layers.')
flags.DEFINE_bool('use_spec_norm', False,
'Whether to apply spectral normalization.')
flags.DEFINE_bool('use_gp_layer', False,
'Whether to use Gaussian process as the output layer.')
# Model flags, Spectral Normalization.
flags.DEFINE_integer(
'spec_norm_iteration', 1,
'Number of power iterations to perform for estimating '
'the spectral norm of weight matrices.')
flags.DEFINE_float('spec_norm_bound', 6.,
'Upper bound to spectral norm of weight matrices.')
# Model flags, Gaussian Process layer.
flags.DEFINE_float('gp_bias', 0., 'The bias term for GP layer.')
flags.DEFINE_float(
'gp_scale', 2.,
'The length-scale parameter for the RBF kernel of the GP layer.')
flags.DEFINE_integer(
'gp_input_dim', 128,
'The dimension to reduce the neural network input to for the GP layer '
'(via random Gaussian projection which preserves distance by the '
' Johnson-Lindenstrauss lemma). If -1 the no dimension reduction.')
flags.DEFINE_integer(
'gp_hidden_dim', 1024,
'The hidden dimension of the GP layer, which corresponds to the number '
'of random features used to for the approximation ')
flags.DEFINE_bool(
'gp_input_normalization', True,
'Whether to normalize the input using LayerNorm for GP layer.'
'This is similar to automatic relevance determination (ARD) in the '
'classic GP learning.')
flags.DEFINE_float(
'gp_cov_ridge_penalty', 1e-3,
'The Ridge penalty parameter for GP posterior covariance.')
flags.DEFINE_float(
'gp_cov_discount_factor', 0.999,
'The discount factor to compute the moving average of '
'precision matrix.')
flags.DEFINE_float(
'gp_mean_field_factor', 0.001,
'The tunable multiplicative factor used in the mean-field approximation '
'for the posterior mean of softmax Gaussian process. If -1 then use '
'posterior mode instead of posterior mean. See [2] for detail.')
flags.mark_flag_as_required('dataset_name')
flags.mark_flag_as_required('experiment_name')
flags.mark_flag_as_required('model_name')
# Flags relating to OOD metrics
flags.DEFINE_list(
'sensitivity_thresholds', ['0.05', '0.95', '10'],
'List of sensitivities at which to calculate specificity.'
' The list should contains '
'[lower bound, upper bound, num_elements]')
flags.DEFINE_list(
'specificity_thresholds', ['0.05', '0.95', '10'],
'List of specificities at which to calculate sensitivity.'
' The list should contains '
'[lower bound, upper bound, num_elements]')
flags.DEFINE_list(
'precision_thresholds', ['0.05', '0.95', '10'],
'List of precisions at which to calculate recall.'
' The list should contains '
'[lower bound, upper bound, num_elements]')
flags.DEFINE_list(
'recall_thresholds', ['0.05', '0.95', '10'],
'List of recalls at which to calculate precision.'
' The list should contains '
'[lower bound, upper bound, num_elements]')
all_flags = set(FLAGS)
program_flag_names = sorted(list(all_flags - predefined_flags))
return program_flag_names
| 41.134259
| 80
| 0.685988
|
e03c0997de5b42e67d95ebdd0d51ca007999ea17
| 4,285
|
py
|
Python
|
src/util.py
|
xujiajiadexiaokeai/asahi-installer
|
95356ecd2b2492258b73b8f16a136ff59ed255bb
|
[
"MIT"
] | null | null | null |
src/util.py
|
xujiajiadexiaokeai/asahi-installer
|
95356ecd2b2492258b73b8f16a136ff59ed255bb
|
[
"MIT"
] | null | null | null |
src/util.py
|
xujiajiadexiaokeai/asahi-installer
|
95356ecd2b2492258b73b8f16a136ff59ed255bb
|
[
"MIT"
] | null | null | null |
# SPDX-License-Identifier: MIT
import re, logging, sys, os, stat, shutil
def ssize(v):
suffixes = ["B", "KB", "MB", "GB", "TB"]
for i in suffixes:
if v < 1000 or i == suffixes[-1]:
if isinstance(v, int):
return f"{v} {i}"
else:
return f"{v:.2f} {i}"
v /= 1000
def psize(v, align=None):
v = v.upper().replace(" ", "")
base = 1000
if v[-2] == "I":
base = 1024
v = v[:-2] + v[-1]
suffixes = {"TB": 4, "GB": 3, "MB": 2, "KB": 1, "B": 0, "": 0}
for suffix, power in suffixes.items():
if v.endswith(suffix):
val = int(float(v[:-len(suffix)]) * (base ** power))
break
else:
return None
if align is not None:
if isinstance(align, str):
align = psize(align)
assert align is not None
val = align_up(val, align)
return val
def split_ver(s):
parts = re.split(r"[-. ]", s)
parts2 = []
for i in parts:
try:
parts2.append(int(i))
except ValueError:
parts2.append(i)
if len(parts2) > 3 and parts2[-2] == "beta":
parts2[-3] -= 1
parts2[-2] = 99
return tuple(parts2)
def align_up(v, a=16384):
return (v + a - 1) & ~(a - 1)
align = align_up
def align_down(v, a=16384):
return v & ~(a - 1)
BLACK = 30
RED = 31
GREEN = 32
YELLOW = 33
BLUE = 34
MAGENTA = 35
CYAN = 36
WHITE = 37
BRIGHT = 1
DIM = 2
NORMAL = 22
RESET_ALL = 0
def col(*color):
color = ";".join(map(str, color))
return f"\033[{color}m"
def p_style(*args, color=[], **kwargs):
if isinstance(color, int):
color = [color]
text = " ".join(map(str, args))
print(col(*color) + text + col(), **kwargs)
if "\033" in text:
text += col()
logging.info(f"MSG: {text}")
def p_plain(*args):
p_style(*args)
def p_info(*args):
p_style(*args, color=(BRIGHT, BLUE))
def p_progress(*args):
p_style(*args, color=(BRIGHT, MAGENTA))
def p_message(*args):
p_style(*args, color=BRIGHT)
def p_error(*args):
p_style(*args, color=(BRIGHT, RED))
def p_warning(*args):
p_style(*args, color=(BRIGHT, YELLOW))
def p_question(*args):
p_style(*args, color=(BRIGHT, CYAN))
def p_success(*args):
p_style(*args, color=(BRIGHT, GREEN))
def p_prompt(*args):
p_style(*args, color=(BRIGHT, CYAN))
def p_choice(*args):
p_style(*args)
def input_prompt(*args):
p_style(f"{col(BRIGHT, WHITE)}»{col(BRIGHT, CYAN)}", *args, end="")
val = input()
logging.info(f"INPUT: {val!r}")
return val
class PackageInstaller:
def __init__(self):
self.verbose = "-v" in sys.argv
def flush_progress(self):
if self.ucache:
self.ucache.flush_progress()
def extract(self, src, dest):
logging.info(f" {src} -> {dest}/")
self.pkg.extract(src, dest)
def extract_file(self, src, dest, optional=True):
try:
with self.pkg.open(src) as sfd, \
open(dest, "wb") as dfd:
logging.info(f" {src} -> {dest}")
shutil.copyfileobj(sfd, dfd)
except KeyError:
if not optional:
raise
if self.verbose:
self.flush_progress()
def extract_tree(self, src, dest):
if src[-1] != "/":
src += "/"
logging.info(f" {src}* -> {dest}")
infolist = self.pkg.infolist()
if self.verbose:
self.flush_progress()
for info in infolist:
name = info.filename
if not name.startswith(src):
continue
subpath = name[len(src):]
assert subpath[0:1] != "/"
destpath = os.path.join(dest, subpath)
if info.is_dir():
os.makedirs(destpath, exist_ok=True)
elif stat.S_ISLNK(info.external_attr >> 16):
link = self.pkg.open(info.filename).read()
if os.path.lexists(destpath):
os.unlink(destpath)
os.symlink(link, destpath)
else:
self.extract_file(name, destpath)
if self.verbose:
self.flush_progress()
| 24.912791
| 71
| 0.51972
|
af8ba33d77f0f6e676072703aad0d8a9beb9cd00
| 25,348
|
py
|
Python
|
glance/api/v2/router.py
|
qweraqq/glance
|
65b62485dfa336d26b1eae2d26a7b5e6495109a7
|
[
"Apache-2.0"
] | null | null | null |
glance/api/v2/router.py
|
qweraqq/glance
|
65b62485dfa336d26b1eae2d26a7b5e6495109a7
|
[
"Apache-2.0"
] | null | null | null |
glance/api/v2/router.py
|
qweraqq/glance
|
65b62485dfa336d26b1eae2d26a7b5e6495109a7
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from glance.api.v2 import image_actions
from glance.api.v2 import image_data
from glance.api.v2 import image_members
from glance.api.v2 import image_tags
from glance.api.v2 import images
from glance.api.v2 import metadef_namespaces
from glance.api.v2 import metadef_objects
from glance.api.v2 import metadef_properties
from glance.api.v2 import metadef_resource_types
from glance.api.v2 import metadef_tags
from glance.api.v2 import schemas
from glance.api.v2 import tasks
from glance.common import wsgi
class API(wsgi.Router):
"""WSGI router for Glance v2 API requests."""
def __init__(self, mapper):
custom_image_properties = images.load_custom_properties()
reject_method_resource = wsgi.Resource(wsgi.RejectMethodController())
schemas_resource = schemas.create_resource(custom_image_properties)
mapper.connect('/schemas/image',
controller=schemas_resource,
action='image',
conditions={'method': ['GET']},
body_reject=True)
mapper.connect('/schemas/image',
controller=reject_method_resource,
action='reject',
allowed_methods='GET')
mapper.connect('/schemas/images',
controller=schemas_resource,
action='images',
conditions={'method': ['GET']},
body_reject=True)
mapper.connect('/schemas/images',
controller=reject_method_resource,
action='reject',
allowed_methods='GET')
mapper.connect('/schemas/member',
controller=schemas_resource,
action='member',
conditions={'method': ['GET']},
body_reject=True)
mapper.connect('/schemas/member',
controller=reject_method_resource,
action='reject',
allowed_methods='GET')
mapper.connect('/schemas/members',
controller=schemas_resource,
action='members',
conditions={'method': ['GET']},
body_reject=True)
mapper.connect('/schemas/members',
controller=reject_method_resource,
action='reject',
allowed_methods='GET')
mapper.connect('/schemas/task',
controller=schemas_resource,
action='task',
conditions={'method': ['GET']})
mapper.connect('/schemas/task',
controller=reject_method_resource,
action='reject',
allowed_methods='GET')
mapper.connect('/schemas/tasks',
controller=schemas_resource,
action='tasks',
conditions={'method': ['GET']})
mapper.connect('/schemas/tasks',
controller=reject_method_resource,
action='reject',
allowed_methods='GET')
mapper.connect('/schemas/metadefs/namespace',
controller=schemas_resource,
action='metadef_namespace',
conditions={'method': ['GET']},
body_reject=True)
mapper.connect('/schemas/metadefs/namespace',
controller=reject_method_resource,
action='reject',
allowed_methods='GET')
mapper.connect('/schemas/metadefs/namespaces',
controller=schemas_resource,
action='metadef_namespaces',
conditions={'method': ['GET']},
body_reject=True)
mapper.connect('/schemas/metadefs/namespaces',
controller=reject_method_resource,
action='reject',
allowed_methods='GET')
mapper.connect('/schemas/metadefs/resource_type',
controller=schemas_resource,
action='metadef_resource_type',
conditions={'method': ['GET']},
body_reject=True)
mapper.connect('/schemas/metadefs/resource_type',
controller=reject_method_resource,
action='reject',
allowed_methods='GET')
mapper.connect('/schemas/metadefs/resource_types',
controller=schemas_resource,
action='metadef_resource_types',
conditions={'method': ['GET']},
body_reject=True)
mapper.connect('/schemas/metadefs/resource_types',
controller=reject_method_resource,
action='reject',
allowed_methods='GET')
mapper.connect('/schemas/metadefs/property',
controller=schemas_resource,
action='metadef_property',
conditions={'method': ['GET']},
body_reject=True)
mapper.connect('/schemas/metadefs/property',
controller=reject_method_resource,
action='reject',
allowed_methods='GET')
mapper.connect('/schemas/metadefs/properties',
controller=schemas_resource,
action='metadef_properties',
conditions={'method': ['GET']},
body_reject=True)
mapper.connect('/schemas/metadefs/properties',
controller=reject_method_resource,
action='reject',
allowed_methods='GET')
mapper.connect('/schemas/metadefs/object',
controller=schemas_resource,
action='metadef_object',
conditions={'method': ['GET']},
body_reject=True)
mapper.connect('/schemas/metadefs/object',
controller=reject_method_resource,
action='reject',
allowed_methods='GET')
mapper.connect('/schemas/metadefs/objects',
controller=schemas_resource,
action='metadef_objects',
conditions={'method': ['GET']},
body_reject=True)
mapper.connect('/schemas/metadefs/objects',
controller=reject_method_resource,
action='reject',
allowed_methods='GET')
mapper.connect('/schemas/metadefs/tag',
controller=schemas_resource,
action='metadef_tag',
conditions={'method': ['GET']},
body_reject=True)
mapper.connect('/schemas/metadefs/tag',
controller=reject_method_resource,
action='reject',
allowed_methods='GET')
mapper.connect('/schemas/metadefs/tags',
controller=schemas_resource,
action='metadef_tags',
conditions={'method': ['GET']},
body_reject=True)
mapper.connect('/schemas/metadefs/tags',
controller=reject_method_resource,
action='reject',
allowed_methods='GET')
# Metadef resource types
metadef_resource_types_resource = (
metadef_resource_types.create_resource())
mapper.connect('/metadefs/resource_types',
controller=metadef_resource_types_resource,
action='index',
conditions={'method': ['GET']},
body_reject=True)
mapper.connect('/metadefs/resource_types',
controller=reject_method_resource,
action='reject',
allowed_methods='GET')
mapper.connect('/metadefs/namespaces/{namespace}/resource_types',
controller=metadef_resource_types_resource,
action='show',
conditions={'method': ['GET']},
body_reject=True)
mapper.connect('/metadefs/namespaces/{namespace}/resource_types',
controller=metadef_resource_types_resource,
action='create',
conditions={'method': ['POST']})
mapper.connect('/metadefs/namespaces/{namespace}/resource_types',
controller=reject_method_resource,
action='reject',
allowed_methods='GET, POST')
mapper.connect('/metadefs/namespaces/{namespace}/resource_types/'
'{resource_type}',
controller=metadef_resource_types_resource,
action='delete',
conditions={'method': ['DELETE']},
body_reject=True)
mapper.connect('/metadefs/namespaces/{namespace}/resource_types/'
'{resource_type}',
controller=reject_method_resource,
action='reject',
allowed_methods='DELETE')
# Metadef Namespaces
metadef_namespace_resource = metadef_namespaces.create_resource()
mapper.connect('/metadefs/namespaces',
controller=metadef_namespace_resource,
action='index',
conditions={'method': ['GET']})
mapper.connect('/metadefs/namespaces',
controller=metadef_namespace_resource,
action='create',
conditions={'method': ['POST']})
mapper.connect('/metadefs/namespaces',
controller=reject_method_resource,
action='reject',
allowed_methods='GET, POST')
mapper.connect('/metadefs/namespaces/{namespace}',
controller=metadef_namespace_resource,
action='show',
conditions={'method': ['GET']},
body_reject=True)
mapper.connect('/metadefs/namespaces/{namespace}',
controller=metadef_namespace_resource,
action='update',
conditions={'method': ['PUT']})
mapper.connect('/metadefs/namespaces/{namespace}',
controller=metadef_namespace_resource,
action='delete',
conditions={'method': ['DELETE']},
body_reject=True)
mapper.connect('/metadefs/namespaces/{namespace}',
controller=reject_method_resource,
action='reject',
allowed_methods='GET, PUT, DELETE')
# Metadef namespace properties
metadef_properties_resource = metadef_properties.create_resource()
mapper.connect('/metadefs/namespaces/{namespace}/properties',
controller=metadef_properties_resource,
action='index',
conditions={'method': ['GET']},
body_reject=True)
mapper.connect('/metadefs/namespaces/{namespace}/properties',
controller=metadef_properties_resource,
action='create',
conditions={'method': ['POST']})
mapper.connect('/metadefs/namespaces/{namespace}/properties',
controller=metadef_namespace_resource,
action='delete_properties',
conditions={'method': ['DELETE']})
mapper.connect('/metadefs/namespaces/{namespace}/properties',
controller=reject_method_resource,
action='reject',
allowed_methods='GET, POST, DELETE')
mapper.connect('/metadefs/namespaces/{namespace}/properties/{'
'property_name}',
controller=metadef_properties_resource,
action='show',
conditions={'method': ['GET']})
mapper.connect('/metadefs/namespaces/{namespace}/properties/{'
'property_name}',
controller=metadef_properties_resource,
action='update',
conditions={'method': ['PUT']})
mapper.connect('/metadefs/namespaces/{namespace}/properties/{'
'property_name}',
controller=metadef_properties_resource,
action='delete',
conditions={'method': ['DELETE']})
mapper.connect('/metadefs/namespaces/{namespace}/properties/{'
'property_name}',
controller=reject_method_resource,
action='reject',
allowed_methods='GET, PUT, DELETE')
# Metadef objects
metadef_objects_resource = metadef_objects.create_resource()
mapper.connect('/metadefs/namespaces/{namespace}/objects',
controller=metadef_objects_resource,
action='index',
conditions={'method': ['GET']})
mapper.connect('/metadefs/namespaces/{namespace}/objects',
controller=metadef_objects_resource,
action='create',
conditions={'method': ['POST']})
mapper.connect('/metadefs/namespaces/{namespace}/objects',
controller=metadef_namespace_resource,
action='delete_objects',
conditions={'method': ['DELETE']})
mapper.connect('/metadefs/namespaces/{namespace}/objects',
controller=reject_method_resource,
action='reject',
allowed_methods='GET, POST, DELETE')
mapper.connect('/metadefs/namespaces/{namespace}/objects/{'
'object_name}',
controller=metadef_objects_resource,
action='show',
conditions={'method': ['GET']},
body_reject=True)
mapper.connect('/metadefs/namespaces/{namespace}/objects/{'
'object_name}',
controller=metadef_objects_resource,
action='update',
conditions={'method': ['PUT']})
mapper.connect('/metadefs/namespaces/{namespace}/objects/{'
'object_name}',
controller=metadef_objects_resource,
action='delete',
conditions={'method': ['DELETE']},
body_reject=True)
mapper.connect('/metadefs/namespaces/{namespace}/objects/{'
'object_name}',
controller=reject_method_resource,
action='reject',
allowed_methods='GET, PUT, DELETE')
# Metadef tags
metadef_tags_resource = metadef_tags.create_resource()
mapper.connect('/metadefs/namespaces/{namespace}/tags',
controller=metadef_tags_resource,
action='index',
conditions={'method': ['GET']})
mapper.connect('/metadefs/namespaces/{namespace}/tags',
controller=metadef_tags_resource,
action='create_tags',
conditions={'method': ['POST']})
mapper.connect('/metadefs/namespaces/{namespace}/tags',
controller=metadef_namespace_resource,
action='delete_tags',
conditions={'method': ['DELETE']})
mapper.connect('/metadefs/namespaces/{namespace}/tags',
controller=reject_method_resource,
action='reject',
allowed_methods='GET, POST, DELETE')
mapper.connect('/metadefs/namespaces/{namespace}/tags/{tag_name}',
controller=metadef_tags_resource,
action='show',
conditions={'method': ['GET']},
body_reject=True)
mapper.connect('/metadefs/namespaces/{namespace}/tags/{tag_name}',
controller=metadef_tags_resource,
action='create',
conditions={'method': ['POST']},
body_reject=True)
mapper.connect('/metadefs/namespaces/{namespace}/tags/{tag_name}',
controller=metadef_tags_resource,
action='update',
conditions={'method': ['PUT']})
mapper.connect('/metadefs/namespaces/{namespace}/tags/{tag_name}',
controller=metadef_tags_resource,
action='delete',
conditions={'method': ['DELETE']},
body_reject=True)
mapper.connect('/metadefs/namespaces/{namespace}/tags/{tag_name}',
controller=reject_method_resource,
action='reject',
allowed_methods='GET, POST, PUT, DELETE')
images_resource = images.create_resource(custom_image_properties)
mapper.connect('/images',
controller=images_resource,
action='index',
conditions={'method': ['GET']})
mapper.connect('/images',
controller=images_resource,
action='create',
conditions={'method': ['POST']})
mapper.connect('/images',
controller=reject_method_resource,
action='reject',
allowed_methods='GET, POST')
mapper.connect('/images/{image_id}',
controller=images_resource,
action='update',
conditions={'method': ['PATCH']})
mapper.connect('/images/{image_id}',
controller=images_resource,
action='show',
conditions={'method': ['GET']},
body_reject=True)
mapper.connect('/images/{image_id}',
controller=images_resource,
action='delete',
conditions={'method': ['DELETE']},
body_reject=True)
mapper.connect('/images/{image_id}',
controller=reject_method_resource,
action='reject',
allowed_methods='GET, PATCH, DELETE')
image_actions_resource = image_actions.create_resource()
mapper.connect('/images/{image_id}/actions/deactivate',
controller=image_actions_resource,
action='deactivate',
conditions={'method': ['POST']},
body_reject=True)
mapper.connect('/images/{image_id}/actions/reactivate',
controller=image_actions_resource,
action='reactivate',
conditions={'method': ['POST']},
body_reject=True)
mapper.connect('/images/{image_id}/actions/deactivate',
controller=reject_method_resource,
action='reject',
allowed_methods='POST')
mapper.connect('/images/{image_id}/actions/reactivate',
controller=reject_method_resource,
action='reject',
allowed_methods='POST')
image_data_resource = image_data.create_resource()
mapper.connect('/images/{image_id}/file',
controller=image_data_resource,
action='download',
conditions={'method': ['GET']},
body_reject=True)
mapper.connect('/images/{image_id}/file',
controller=image_data_resource,
action='upload',
conditions={'method': ['PUT']})
mapper.connect('/images/{image_id}/file',
controller=reject_method_resource,
action='reject',
allowed_methods='GET, PUT')
image_tags_resource = image_tags.create_resource()
mapper.connect('/images/{image_id}/tags/{tag_value}',
controller=image_tags_resource,
action='update',
conditions={'method': ['PUT']},
body_reject=True)
mapper.connect('/images/{image_id}/tags/{tag_value}',
controller=image_tags_resource,
action='delete',
conditions={'method': ['DELETE']},
body_reject=True)
mapper.connect('/images/{image_id}/tags/{tag_value}',
controller=reject_method_resource,
action='reject',
allowed_methods='PUT, DELETE')
image_members_resource = image_members.create_resource()
mapper.connect('/images/{image_id}/members',
controller=image_members_resource,
action='index',
conditions={'method': ['GET']},
body_reject=True)
mapper.connect('/images/{image_id}/members',
controller=image_members_resource,
action='create',
conditions={'method': ['POST']})
mapper.connect('/images/{image_id}/members',
controller=reject_method_resource,
action='reject',
allowed_methods='GET, POST')
mapper.connect('/images/{image_id}/members/{member_id}',
controller=image_members_resource,
action='show',
conditions={'method': ['GET']},
body_reject=True)
mapper.connect('/images/{image_id}/members/{member_id}',
controller=image_members_resource,
action='update',
conditions={'method': ['PUT']})
mapper.connect('/images/{image_id}/members/{member_id}',
controller=image_members_resource,
action='delete',
conditions={'method': ['DELETE']},
body_reject=True)
mapper.connect('/images/{image_id}/members/{member_id}',
controller=reject_method_resource,
action='reject',
allowed_methods='GET, PUT, DELETE')
tasks_resource = tasks.create_resource()
mapper.connect('/tasks',
controller=tasks_resource,
action='create',
conditions={'method': ['POST']})
mapper.connect('/tasks',
controller=tasks_resource,
action='index',
conditions={'method': ['GET']})
mapper.connect('/tasks',
controller=reject_method_resource,
action='reject',
allowed_methods='GET, POST')
mapper.connect('/tasks/{task_id}',
controller=tasks_resource,
action='get',
conditions={'method': ['GET']})
mapper.connect('/tasks/{task_id}',
controller=tasks_resource,
action='delete',
conditions={'method': ['DELETE']})
mapper.connect('/tasks/{task_id}',
controller=reject_method_resource,
action='reject',
allowed_methods='GET, DELETE')
super(API, self).__init__(mapper)
| 46.853974
| 78
| 0.505129
|
72650d7b36ecabca3bdb10b613a8b3ce6baf7daf
| 92
|
py
|
Python
|
2015/misc/test-table-2/graphic_config.py
|
nprapps/graphics-archive
|
97b0ef326b46a959df930f5522d325e537f7a655
|
[
"FSFAP"
] | 14
|
2015-05-08T13:41:51.000Z
|
2021-02-24T12:34:55.000Z
|
2015/misc/test-table-2/graphic_config.py
|
nprapps/graphics-archive
|
97b0ef326b46a959df930f5522d325e537f7a655
|
[
"FSFAP"
] | null | null | null |
2015/misc/test-table-2/graphic_config.py
|
nprapps/graphics-archive
|
97b0ef326b46a959df930f5522d325e537f7a655
|
[
"FSFAP"
] | 7
|
2015-04-04T04:45:54.000Z
|
2021-02-18T11:12:48.000Z
|
#!/usr/bin/env python
COPY_GOOGLE_DOC_KEY = '1fzxjxXBP0xL-dPJp1NvSKaXbutItrW9pUbIq_fHUDuo'
| 23
| 68
| 0.836957
|
5fd0cb3f94f485ae87b4d3cd349f8c18be456f49
| 16,093
|
py
|
Python
|
ml4tc/machine_learning/neural_net_test.py
|
thunderhoser/ml4tc
|
f01645d014f3cd2e9f24c5791d62e573ad3ab1bd
|
[
"MIT"
] | 2
|
2021-08-24T04:24:22.000Z
|
2021-09-29T07:52:21.000Z
|
ml4tc/machine_learning/neural_net_test.py
|
thunderhoser/ml4tc
|
dd97972675c462634cf43fa9ad486049429095e9
|
[
"MIT"
] | null | null | null |
ml4tc/machine_learning/neural_net_test.py
|
thunderhoser/ml4tc
|
dd97972675c462634cf43fa9ad486049429095e9
|
[
"MIT"
] | null | null | null |
"""Unit tests for neural_net.py."""
import unittest
import numpy
import xarray
from ml4tc.utils import example_utils
from ml4tc.machine_learning import neural_net
TOLERANCE = 1e-6
# The following constants are used to test _find_desired_times.
DESIRED_TIMES_UNIX_SEC = numpy.array(
[0, 900, 1800, 2700, 3600, 4500, 5400, 6300, 7200], dtype=int
)
TOLERANCE_SEC = 1000
FIRST_ACTUAL_TIMES_UNIX_SEC = numpy.array(
[2700, 5700, 1400, 100, 6300, 7100, 3600], dtype=int
)
FIRST_TIME_INDICES = numpy.array([3, 2, 2, 0, 6, 6, 1, 4, 5], dtype=int)
SECOND_ACTUAL_TIMES_UNIX_SEC = numpy.array(
[7200, 4600, 5600, 3600, 1800, 0, 2600], dtype=int
)
SECOND_TIME_INDICES = numpy.array([5, 4, 4, 6, 3, 1, 2, 2, 0], dtype=int)
THIRD_ACTUAL_TIMES_UNIX_SEC = numpy.array(
[3700, 6400, 3900, 1000, 200, 5700, 7100], dtype=int
)
THIRD_TIME_INDICES = numpy.array([4, 3, 3, 0, 0, 2, 5, 1, 6], dtype=int)
FOURTH_ACTUAL_TIMES_UNIX_SEC = numpy.array(
[4300, 5400, 3400, 400, 6000, 2100, 2600], dtype=int
)
FOURTH_TIME_INDICES = numpy.array(
[3, 3, 5, 6, 2, 0, 1, 4, neural_net.MISSING_INDEX], dtype=int
)
# The following constants are used to test _interp_missing_times.
NAN = numpy.nan
TIMES_FOR_INTERP_SEC = numpy.array([0, 1, 2, 3, 4, 5], dtype=float)
THIS_MATRIX = numpy.array([
[NAN, 0, 0, 0],
[1, -5, 2, NAN],
[2, NAN, 4, NAN],
[3, NAN, 6, NAN],
[NAN, -50, 10, NAN],
[NAN, -100, 100, NAN]
], dtype=float)
DATA_MATRIX_NON_SPATIAL_BEFORE_INTERP = numpy.stack(
(THIS_MATRIX, 2 * THIS_MATRIX), axis=0
)
THIS_MATRIX = numpy.array([
[1, 0, 0, 0],
[1, -5, 2, 0],
[2, -20, 4, 0],
[3, -35, 6, 0],
[3, -50, 10, 0],
[3, -100, 100, 0]
], dtype=float)
DATA_MATRIX_NON_SPATIAL_AFTER_INTERP = numpy.stack(
(THIS_MATRIX, 2 * THIS_MATRIX), axis=0
)
DATA_MATRIX_SPATIAL_BEFORE_INTERP = numpy.expand_dims(
DATA_MATRIX_NON_SPATIAL_BEFORE_INTERP, axis=1
)
DATA_MATRIX_SPATIAL_BEFORE_INTERP = numpy.expand_dims(
DATA_MATRIX_SPATIAL_BEFORE_INTERP, axis=1
)
DATA_MATRIX_SPATIAL_BEFORE_INTERP = numpy.repeat(
DATA_MATRIX_SPATIAL_BEFORE_INTERP, axis=1, repeats=480
)
DATA_MATRIX_SPATIAL_BEFORE_INTERP = numpy.repeat(
DATA_MATRIX_SPATIAL_BEFORE_INTERP, axis=2, repeats=640
)
DATA_MATRIX_SPATIAL_AFTER_INTERP = numpy.expand_dims(
DATA_MATRIX_NON_SPATIAL_AFTER_INTERP, axis=1
)
DATA_MATRIX_SPATIAL_AFTER_INTERP = numpy.expand_dims(
DATA_MATRIX_SPATIAL_AFTER_INTERP, axis=1
)
DATA_MATRIX_SPATIAL_AFTER_INTERP = numpy.repeat(
DATA_MATRIX_SPATIAL_AFTER_INTERP, axis=1, repeats=480
)
DATA_MATRIX_SPATIAL_AFTER_INTERP = numpy.repeat(
DATA_MATRIX_SPATIAL_AFTER_INTERP, axis=2, repeats=640
)
# The following constants are used to test _discretize_intensity_change.
INTENSITY_CHANGE_M_S01 = 30.
FIRST_CLASS_CUTOFFS_M_S01 = numpy.array([30.])
FIRST_CLASS_FLAGS = numpy.array([0, 1], dtype=int)
SECOND_CLASS_CUTOFFS_M_S01 = numpy.array([31.])
SECOND_CLASS_FLAGS = numpy.array([1, 0], dtype=int)
THIRD_CLASS_CUTOFFS_M_S01 = numpy.array([-6, 1, 8, 15, 22, 29, 36], dtype=float)
THIRD_CLASS_FLAGS = numpy.array([0, 0, 0, 0, 0, 0, 1, 0], dtype=int)
FOURTH_CLASS_CUTOFFS_M_S01 = numpy.array([-10, 0, 10, 20, 30, 40], dtype=float)
FOURTH_CLASS_FLAGS = numpy.array([0, 0, 0, 0, 0, 1, 0], dtype=int)
# The following constants are used to test _ships_predictors_xarray_to_keras,
# ships_predictors_3d_to_4d, and ships_predictors_4d_to_3d.
LAGGED_PREDICTORS_EXAMPLE1_LAG1 = numpy.array([0, 1, 2, 3, 4, 5], dtype=float)
LAGGED_PREDICTORS_EXAMPLE1_LAG2 = numpy.array(
[0, -1, -2, -3, -4, -5], dtype=float
)
LAGGED_PRED_MATRIX_EXAMPLE1 = numpy.stack(
(LAGGED_PREDICTORS_EXAMPLE1_LAG1, LAGGED_PREDICTORS_EXAMPLE1_LAG2), axis=0
)
LAGGED_PRED_MATRIX_EXAMPLE2 = 2 * LAGGED_PRED_MATRIX_EXAMPLE1
LAGGED_PRED_MATRIX_EXAMPLE3 = 3 * LAGGED_PRED_MATRIX_EXAMPLE1
LAGGED_PRED_MATRIX_STANDARD = numpy.stack((
LAGGED_PRED_MATRIX_EXAMPLE1, LAGGED_PRED_MATRIX_EXAMPLE2,
LAGGED_PRED_MATRIX_EXAMPLE3
), axis=0)
FORECAST_PREDICTORS_EXAMPLE1_HOUR1 = numpy.array([0, 0, 0, 0, 0], dtype=float)
FORECAST_PREDICTORS_EXAMPLE1_HOUR2 = numpy.array([2, 4, 6, 8, 10], dtype=float)
FORECAST_PREDICTORS_EXAMPLE1_HOUR3 = numpy.array([5, 4, 3, 2, 1], dtype=float)
FORECAST_PREDICTORS_EXAMPLE1_HOUR4 = numpy.array(
[-100, -10, 0, 10, 100], dtype=float
)
FORECAST_PRED_MATRIX_EXAMPLE1 = numpy.stack((
FORECAST_PREDICTORS_EXAMPLE1_HOUR1, FORECAST_PREDICTORS_EXAMPLE1_HOUR2,
FORECAST_PREDICTORS_EXAMPLE1_HOUR3, FORECAST_PREDICTORS_EXAMPLE1_HOUR4
), axis=0)
FORECAST_PRED_MATRIX_EXAMPLE2 = 5 * FORECAST_PRED_MATRIX_EXAMPLE1
FORECAST_PRED_MATRIX_EXAMPLE3 = 10 * FORECAST_PRED_MATRIX_EXAMPLE1
FORECAST_PRED_MATRIX_STANDARD = numpy.stack((
FORECAST_PRED_MATRIX_EXAMPLE1, FORECAST_PRED_MATRIX_EXAMPLE2,
FORECAST_PRED_MATRIX_EXAMPLE3
), axis=0)
FORECAST_PRED_MATRIX_STANDARD_0HOURS = FORECAST_PRED_MATRIX_STANDARD[:, [1], :]
METADATA_DICT = {
example_utils.SHIPS_LAG_TIME_DIM: numpy.array([3, 0], dtype=int),
example_utils.SHIPS_PREDICTOR_LAGGED_DIM: ['a', 'b', 'c', 'd', 'e', 'f'],
example_utils.SHIPS_VALID_TIME_DIM:
numpy.array([0, 21600, 43200], dtype=int),
example_utils.SHIPS_FORECAST_HOUR_DIM:
numpy.array([-12, 0, 12, 24], dtype=int),
example_utils.SHIPS_PREDICTOR_FORECAST_DIM: ['A', 'B', 'C', 'D', 'E']
}
LAGGED_DIMENSIONS = (
example_utils.SHIPS_VALID_TIME_DIM, example_utils.SHIPS_LAG_TIME_DIM,
example_utils.SHIPS_PREDICTOR_LAGGED_DIM
)
FORECAST_DIMENSIONS = (
example_utils.SHIPS_VALID_TIME_DIM, example_utils.SHIPS_FORECAST_HOUR_DIM,
example_utils.SHIPS_PREDICTOR_FORECAST_DIM
)
MAIN_DATA_DICT = {
example_utils.SHIPS_PREDICTORS_LAGGED_KEY:
(LAGGED_DIMENSIONS, LAGGED_PRED_MATRIX_STANDARD),
example_utils.SHIPS_PREDICTORS_FORECAST_KEY:
(FORECAST_DIMENSIONS, FORECAST_PRED_MATRIX_STANDARD)
}
EXAMPLE_TABLE_XARRAY = xarray.Dataset(
data_vars=MAIN_DATA_DICT, coords=METADATA_DICT
)
LAGGED_PREDICTOR_INDICES = numpy.linspace(
0, len(LAGGED_PREDICTORS_EXAMPLE1_LAG1) - 1,
num=len(LAGGED_PREDICTORS_EXAMPLE1_LAG1), dtype=int
)
FORECAST_PREDICTOR_INDICES = numpy.linspace(
0, len(FORECAST_PREDICTORS_EXAMPLE1_HOUR1) - 1,
num=len(FORECAST_PREDICTORS_EXAMPLE1_HOUR1), dtype=int
)
SCALAR_PREDICTORS_EXAMPLE1 = numpy.array([
0, 0, 1, -1, 2, -2, 3, -3, 4, -4, 5, -5,
2, 4, 6, 8, 10
], dtype=float)
SCALAR_PREDICTORS_EXAMPLE2 = numpy.array([
0, 0, 2, -2, 4, -4, 6, -6, 8, -8, 10, -10,
10, 20, 30, 40, 50
], dtype=float)
SCALAR_PREDICTORS_EXAMPLE3 = numpy.array([
0, 0, 3, -3, 6, -6, 9, -9, 12, -12, 15, -15,
20, 40, 60, 80, 100
], dtype=float)
SCALAR_PREDICTOR_MATRIX = numpy.stack((
SCALAR_PREDICTORS_EXAMPLE1, SCALAR_PREDICTORS_EXAMPLE2,
SCALAR_PREDICTORS_EXAMPLE3
), axis=0)
SCALAR_PREDICTOR_MATRIX = numpy.expand_dims(SCALAR_PREDICTOR_MATRIX, axis=0)
class NeuralNetTests(unittest.TestCase):
"""Each method is a unit test for neural_net.py."""
def test_find_desired_times_first(self):
"""Ensures correct output from _find_desired_times.
In this case, using first set of actual times.
"""
these_indices = neural_net._find_desired_times(
all_times_unix_sec=FIRST_ACTUAL_TIMES_UNIX_SEC,
desired_times_unix_sec=DESIRED_TIMES_UNIX_SEC,
tolerance_sec=TOLERANCE_SEC, max_num_missing_times=0
)
self.assertTrue(numpy.array_equal(
these_indices, FIRST_TIME_INDICES
))
def test_find_desired_times_second(self):
"""Ensures correct output from _find_desired_times.
In this case, using second set of actual times.
"""
these_indices = neural_net._find_desired_times(
all_times_unix_sec=SECOND_ACTUAL_TIMES_UNIX_SEC,
desired_times_unix_sec=DESIRED_TIMES_UNIX_SEC,
tolerance_sec=TOLERANCE_SEC, max_num_missing_times=0
)
self.assertTrue(numpy.array_equal(
these_indices, SECOND_TIME_INDICES
))
def test_find_desired_times_third(self):
"""Ensures correct output from _find_desired_times.
In this case, using third set of actual times.
"""
these_indices = neural_net._find_desired_times(
all_times_unix_sec=THIRD_ACTUAL_TIMES_UNIX_SEC,
desired_times_unix_sec=DESIRED_TIMES_UNIX_SEC,
tolerance_sec=TOLERANCE_SEC, max_num_missing_times=0
)
self.assertTrue(numpy.array_equal(
these_indices, THIRD_TIME_INDICES
))
def test_find_desired_times_fourth_allow_missing(self):
"""Ensures correct output from _find_desired_times.
In this case, using fourth set of actual times and will allow missing
times.
"""
these_indices = neural_net._find_desired_times(
all_times_unix_sec=FOURTH_ACTUAL_TIMES_UNIX_SEC,
desired_times_unix_sec=DESIRED_TIMES_UNIX_SEC,
tolerance_sec=TOLERANCE_SEC, max_num_missing_times=1
)
self.assertTrue(numpy.array_equal(
these_indices, FOURTH_TIME_INDICES
))
def test_find_desired_times_fourth_no_allow_missing(self):
"""Ensures correct output from _find_desired_times.
In this case, using fourth set of actual times and will *not* allow
missing times.
"""
these_indices = neural_net._find_desired_times(
all_times_unix_sec=FOURTH_ACTUAL_TIMES_UNIX_SEC,
desired_times_unix_sec=DESIRED_TIMES_UNIX_SEC,
tolerance_sec=TOLERANCE_SEC, max_num_missing_times=0
)
self.assertTrue(these_indices is None)
def test_interp_missing_times_non_spatial(self):
"""Ensures correct output from _interp_missing_times.
In this case, data are non-spatial.
"""
this_data_matrix = neural_net._interp_missing_times(
data_matrix=DATA_MATRIX_NON_SPATIAL_BEFORE_INTERP + 0.,
times_sec=TIMES_FOR_INTERP_SEC
)
self.assertTrue(numpy.allclose(
this_data_matrix, DATA_MATRIX_NON_SPATIAL_AFTER_INTERP,
atol=TOLERANCE
))
def test_interp_missing_times_spatial(self):
"""Ensures correct output from _interp_missing_times.
In this case, data are spatial.
"""
this_data_matrix = neural_net._interp_missing_times(
data_matrix=DATA_MATRIX_SPATIAL_BEFORE_INTERP + 0.,
times_sec=TIMES_FOR_INTERP_SEC
)
self.assertTrue(numpy.allclose(
this_data_matrix, DATA_MATRIX_SPATIAL_AFTER_INTERP,
atol=TOLERANCE
))
def test_discretize_intensity_change_first(self):
"""Ensures correct output from _discretize_intensity_change.
In this case, using first set of cutoffs.
"""
these_flags = neural_net._discretize_intensity_change(
intensity_change_m_s01=INTENSITY_CHANGE_M_S01,
class_cutoffs_m_s01=FIRST_CLASS_CUTOFFS_M_S01
)
self.assertTrue(numpy.array_equal(these_flags, FIRST_CLASS_FLAGS))
def test_discretize_intensity_change_second(self):
"""Ensures correct output from _discretize_intensity_change.
In this case, using second set of cutoffs.
"""
these_flags = neural_net._discretize_intensity_change(
intensity_change_m_s01=INTENSITY_CHANGE_M_S01,
class_cutoffs_m_s01=SECOND_CLASS_CUTOFFS_M_S01
)
self.assertTrue(numpy.array_equal(these_flags, SECOND_CLASS_FLAGS))
def test_discretize_intensity_change_third(self):
"""Ensures correct output from _discretize_intensity_change.
In this case, using third set of cutoffs.
"""
these_flags = neural_net._discretize_intensity_change(
intensity_change_m_s01=INTENSITY_CHANGE_M_S01,
class_cutoffs_m_s01=THIRD_CLASS_CUTOFFS_M_S01
)
self.assertTrue(numpy.array_equal(these_flags, THIRD_CLASS_FLAGS))
def test_discretize_intensity_change_fourth(self):
"""Ensures correct output from _discretize_intensity_change.
In this case, using fourth set of cutoffs.
"""
these_flags = neural_net._discretize_intensity_change(
intensity_change_m_s01=INTENSITY_CHANGE_M_S01,
class_cutoffs_m_s01=FOURTH_CLASS_CUTOFFS_M_S01
)
self.assertTrue(numpy.array_equal(these_flags, FOURTH_CLASS_FLAGS))
def test_ships_predictors_standard_to_keras_example1(self):
"""Ensures correct output from _ships_predictors_xarray_to_keras.
In this case, extracting values from first example (init time).
"""
these_predictor_values = neural_net._ships_predictors_xarray_to_keras(
example_table_xarray=EXAMPLE_TABLE_XARRAY, init_time_index=0,
lagged_predictor_indices=LAGGED_PREDICTOR_INDICES,
forecast_predictor_indices=FORECAST_PREDICTOR_INDICES,
max_forecast_hour=0
)
self.assertTrue(numpy.allclose(
these_predictor_values, SCALAR_PREDICTORS_EXAMPLE1, atol=TOLERANCE
))
def test_ships_predictors_standard_to_keras_example2(self):
"""Ensures correct output from _ships_predictors_xarray_to_keras.
In this case, extracting values from second example (init time).
"""
these_predictor_values = neural_net._ships_predictors_xarray_to_keras(
example_table_xarray=EXAMPLE_TABLE_XARRAY, init_time_index=1,
lagged_predictor_indices=LAGGED_PREDICTOR_INDICES,
forecast_predictor_indices=FORECAST_PREDICTOR_INDICES,
max_forecast_hour=0
)
self.assertTrue(numpy.allclose(
these_predictor_values, SCALAR_PREDICTORS_EXAMPLE2, atol=TOLERANCE
))
def test_ships_predictors_standard_to_keras_example3(self):
"""Ensures correct output from _ships_predictors_xarray_to_keras.
In this case, extracting values from third example (init time).
"""
these_predictor_values = neural_net._ships_predictors_xarray_to_keras(
example_table_xarray=EXAMPLE_TABLE_XARRAY, init_time_index=2,
lagged_predictor_indices=LAGGED_PREDICTOR_INDICES,
forecast_predictor_indices=FORECAST_PREDICTOR_INDICES,
max_forecast_hour=0
)
self.assertTrue(numpy.allclose(
these_predictor_values, SCALAR_PREDICTORS_EXAMPLE3, atol=TOLERANCE
))
def test_ships_predictors_3d_to_4d(self):
"""Ensures correct output from ships_predictors_3d_to_4d."""
this_lagged_pred_matrix, this_forecast_pred_matrix = (
neural_net.ships_predictors_3d_to_4d(
predictor_matrix_3d=SCALAR_PREDICTOR_MATRIX,
num_lagged_predictors=LAGGED_PRED_MATRIX_STANDARD.shape[2],
num_builtin_lag_times=LAGGED_PRED_MATRIX_STANDARD.shape[1],
num_forecast_predictors=
FORECAST_PRED_MATRIX_STANDARD_0HOURS.shape[2],
num_forecast_hours=FORECAST_PRED_MATRIX_STANDARD_0HOURS.shape[1]
)
)
self.assertTrue(numpy.allclose(
this_lagged_pred_matrix[0, ...], LAGGED_PRED_MATRIX_STANDARD,
atol=TOLERANCE
))
self.assertTrue(numpy.allclose(
this_forecast_pred_matrix[0, ...],
FORECAST_PRED_MATRIX_STANDARD_0HOURS, atol=TOLERANCE
))
def test_ships_predictors_4d_to_3d(self):
"""Ensures correct output from ships_predictors_4d_to_3d."""
this_scalar_pred_matrix = neural_net.ships_predictors_4d_to_3d(
lagged_predictor_matrix_4d=
numpy.expand_dims(LAGGED_PRED_MATRIX_STANDARD, axis=0),
forecast_predictor_matrix_4d=
numpy.expand_dims(FORECAST_PRED_MATRIX_STANDARD_0HOURS, axis=0)
)
self.assertTrue(numpy.allclose(
this_scalar_pred_matrix[0, ...], SCALAR_PREDICTOR_MATRIX,
atol=TOLERANCE
))
if __name__ == '__main__':
unittest.main()
| 34.758099
| 80
| 0.712235
|
c8a6fa017ce7a74e44f8329c6d8b955d29170888
| 413
|
py
|
Python
|
icv/image/transforms/normalize.py
|
dmxj/icv
|
0b074ec9475f2c70038d2e8b7166414fd5b93e61
|
[
"MIT"
] | 5
|
2019-09-10T04:02:19.000Z
|
2020-07-24T07:46:08.000Z
|
icv/image/transforms/normalize.py
|
dmxj/icv
|
0b074ec9475f2c70038d2e8b7166414fd5b93e61
|
[
"MIT"
] | null | null | null |
icv/image/transforms/normalize.py
|
dmxj/icv
|
0b074ec9475f2c70038d2e8b7166414fd5b93e61
|
[
"MIT"
] | 1
|
2020-03-20T03:44:04.000Z
|
2020-03-20T03:44:04.000Z
|
# -*- coding: UTF-8 -*-
import numpy as np
from .colorspace import bgr2rgb, rgb2bgr
from ..io import imread
def imnormalize(img, mean, std, to_rgb=True):
img = imread(img).astype(np.float32)
if to_rgb:
img = bgr2rgb(img)
return (img - mean) / std
def imdenormalize(img, mean, std, to_bgr=True):
img = (imread(img) * std) + mean
if to_bgr:
img = rgb2bgr(img)
return img
| 21.736842
| 47
| 0.62954
|
896b3ee6f12605da063bf1a452f4b586a38810fa
| 26,469
|
py
|
Python
|
cairis/mio/ModelImport.py
|
RAIJ95/https-github.com-failys-cairis
|
86601347ea016f4a3f90b6942093d63e91de5f74
|
[
"Apache-2.0"
] | null | null | null |
cairis/mio/ModelImport.py
|
RAIJ95/https-github.com-failys-cairis
|
86601347ea016f4a3f90b6942093d63e91de5f74
|
[
"Apache-2.0"
] | null | null | null |
cairis/mio/ModelImport.py
|
RAIJ95/https-github.com-failys-cairis
|
86601347ea016f4a3f90b6942093d63e91de5f74
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from SecurityPatternContentHandler import SecurityPatternContentHandler
from AttackPatternContentHandler import AttackPatternContentHandler
from TVTypeContentHandler import TVTypeContentHandler
from DomainValueContentHandler import DomainValueContentHandler
from DirectoryContentHandler import DirectoryContentHandler
from RiskAnalysisContentHandler import RiskAnalysisContentHandler
from GoalsContentHandler import GoalsContentHandler
from UsabilityContentHandler import UsabilityContentHandler
from AssociationsContentHandler import AssociationsContentHandler
from CairisContentHandler import CairisContentHandler
from ArchitecturalPatternContentHandler import ArchitecturalPatternContentHandler
from SynopsesContentHandler import SynopsesContentHandler
from TemplateAssetsContentHandler import TemplateAssetsContentHandler
from ProcessesContentHandler import ProcessesContentHandler
from LocationsContentHandler import LocationsContentHandler
from cairis.core.Borg import Borg
import xml.sax
from cairis.core.ARM import *
__author__ = 'Shamal Faily'
def importSecurityPatterns(importFile,session_id=None):
try:
parser = xml.sax.make_parser()
handler = SecurityPatternContentHandler()
parser.setContentHandler(handler)
parser.setEntityResolver(handler)
parser.parse(importFile)
taps = handler.assets()
spps = handler.patterns()
vts = handler.metricTypes()
noOfTaps = len(taps)
noOfSpps = len(spps)
b = Borg()
db_proxy = b.get_dbproxy(session_id)
msgStr = 'No patterns imported'
if (noOfTaps > 0):
tapId = 0;
db_proxy.deleteSecurityPattern(-1)
db_proxy.deleteTemplateAsset(-1)
for vt in vts:
db_proxy.addValueType(vt)
for tap in taps:
tap.setId(tapId)
db_proxy.addTemplateAsset(tap)
tapId += 1
if (noOfSpps > 0):
spId = 0;
db_proxy.deleteSecurityPattern(-1)
for sp in spps:
sp.setId(spId)
db_proxy.addSecurityPattern(sp)
spId += 1
msgStr = 'Imported ' + str(noOfTaps) + ' template assets and ' + str(noOfSpps) + ' security patterns'
return msgStr
except xml.sax.SAXException, e:
raise ARMException("Error parsing" + importFile + ": " + e.getMessage())
def importAttackPattern(importFile,session_id = None):
try:
parser = xml.sax.make_parser()
handler = AttackPatternContentHandler(session_id = session_id)
parser.setContentHandler(handler)
parser.setEntityResolver(handler)
parser.parse(importFile)
assets = handler.assets()
attackers = handler.attackers()
vulnerability = handler.vulnerability()
threat = handler.threat()
risk = handler.risk()
raTxt = importRiskAnalysis([],assets,[vulnerability],attackers,[threat],[risk],[],[],session_id)
obsTxt = importRequirements([],[],handler.obstacles(),[],[],session_id)
assocTxt = importAssociations([],handler.obstacleAssociations(),[],session_id)
return obsTxt + assocTxt + raTxt
except xml.sax.SAXException, e:
raise ARMException("Error parsing" + importFile + ": " + e.getMessage())
def importTVTypeFile(importFile,isOverwrite=1,session_id = None):
try:
parser = xml.sax.make_parser()
handler = TVTypeContentHandler()
parser.setContentHandler(handler)
parser.setEntityResolver(handler)
parser.parse(importFile)
vulTypes,threatTypes = handler.types()
return importTVTypes(vulTypes,threatTypes,isOverwrite,session_id)
except xml.sax.SAXException, e:
raise ARMException("Error parsing" + importFile + ": " + e.getMessage())
def importTVTypes(vulTypes,threatTypes,isOverwrite,session_id):
b = Borg()
db_proxy = b.get_dbproxy(session_id)
noOfVts = len(vulTypes)
noOfTts = len(threatTypes)
if (noOfVts > 0):
if (isOverwrite):
db_proxy.deleteVulnerabilityType(-1)
for vt in vulTypes:
db_proxy.addValueType(vt)
if (noOfTts > 0):
if (isOverwrite):
db_proxy.deleteThreatType(-1)
for tt in threatTypes:
db_proxy.addValueType(tt)
msgStr = 'Imported ' + str(noOfVts) + ' vulnerability types and ' + str(noOfTts) + ' threat types.'
return msgStr
def importDirectoryFile(importFile,isOverwrite=1,session_id = None):
try:
parser = xml.sax.make_parser()
handler = DirectoryContentHandler()
parser.setContentHandler(handler)
parser.setEntityResolver(handler)
parser.parse(importFile)
vulDir,threatDir = handler.directories()
vdSize = len(vulDir)
tdSize = len(threatDir)
b = Borg()
db_proxy = b.get_dbproxy(session_id)
if (vdSize > 0):
db_proxy.addVulnerabilityDirectory(vulDir,isOverwrite)
if (tdSize > 0):
db_proxy.addThreatDirectory(threatDir,isOverwrite)
msgStr = 'Imported ' + str(vdSize) + ' template vulnerabilities and ' + str(tdSize) + ' template threats.'
return msgStr
except xml.sax.SAXException, e:
raise ARMException("Error parsing" + importFile + ": " + e.getMessage())
def importRequirementsFile(importFile,session_id = None):
try:
parser = xml.sax.make_parser()
handler = GoalsContentHandler(session_id = session_id)
parser.setContentHandler(handler)
parser.setEntityResolver(handler)
parser.parse(importFile)
return importRequirements(handler.domainProperties(),handler.goals(),handler.obstacles(),handler.requirements(),handler.countermeasures(),session_id = session_id)
except xml.sax.SAXException, e:
raise ARMException("Error parsing" + importFile + ": " + e.getMessage())
def importRequirements(dpParameterSet,goalParameterSet,obsParameterSet,reqParameterSet,cmParameterSet,session_id):
b = Borg()
db_proxy = b.get_dbproxy(session_id)
dpCount = 0
for dpParameters in dpParameterSet:
objtId = db_proxy.existingObject(dpParameters.name(),'domainproperty')
if objtId == -1:
db_proxy.addDomainProperty(dpParameters)
else:
dpParameters.setId(objtId)
db_proxy.updateDomainProperty(dpParameters)
dpCount += 1
goalCount = 0
for goalParameters in goalParameterSet:
objtId = db_proxy.existingObject(goalParameters.name(),'goal')
if objtId == -1:
db_proxy.addGoal(goalParameters)
else:
goalParameters.setId(objtId)
db_proxy.updateGoal(goalParameters)
goalCount += 1
obsCount = 0
for obsParameters in obsParameterSet:
objtId = db_proxy.existingObject(obsParameters.name(),'obstacle')
if objtId == -1:
db_proxy.addObstacle(obsParameters)
else:
obsParameters.setId(objtId)
db_proxy.updateObstacle(obsParameters)
obsCount += 1
reqCount = 0
for req,refName,refType in reqParameterSet:
objtId = db_proxy.existingObject(req.name(),'requirement')
if objtId == -1:
isAsset = True
if (refType == 'environment'):
isAsset = False
db_proxy.addRequirement(req,refName,isAsset)
else:
db_proxy.updateRequirement(req)
reqCount += 1
cmCount = 0
for cmParameters in cmParameterSet:
objtId = db_proxy.existingObject(cmParameters.name(),'countermeasure')
if objtId == -1:
db_proxy.addCountermeasure(cmParameters)
else:
cmParameters.setId(objtId)
db_proxy.updateCountermeasure(cmParameters)
cmCount += 1
msgStr = 'Imported ' + str(dpCount) + ' domain properties, ' + str(goalCount) + ' goals, ' + str(obsCount) + ' obstacles, ' + str(reqCount) + ' requirements, and ' + str(cmCount) + ' countermeasures.'
return msgStr
def importRiskAnalysisFile(importFile,session_id = None):
try:
parser = xml.sax.make_parser()
handler = RiskAnalysisContentHandler()
parser.setContentHandler(handler)
parser.setEntityResolver(handler)
parser.parse(importFile)
return importRiskAnalysis(handler.roles(),handler.assets(),handler.vulnerabilities(),handler.attackers(),handler.threats(),handler.risks(),handler.responses(),handler.associations(),session_id = session_id)
except xml.sax.SAXException, e:
raise ARMException("Error parsing" + importFile + ": " + e.getMessage())
def importRiskAnalysis(roleParameterSet,assetParameterSet,vulParameterSet,attackerParameterSet,threatParameterSet,riskParameterSet,responseParameterSet,assocParameterSet,session_id):
b = Borg()
db_proxy = b.get_dbproxy(session_id)
roleCount = 0
for roleParameters in roleParameterSet:
objtId = db_proxy.existingObject(roleParameters.name(),'role')
if objtId == -1:
db_proxy.addRole(roleParameters)
else:
roleParameters.setId(objtId)
db_proxy.updateRole(roleParameters)
roleCount += 1
assetCount = 0
for assetParameters in assetParameterSet:
objtId = db_proxy.existingObject(assetParameters.name(),'asset')
if objtId == -1:
db_proxy.addAsset(assetParameters)
else:
assetParameters.setId(objtId)
db_proxy.updateAsset(assetParameters)
assetCount += 1
vulCount = 0
for vulParameters in vulParameterSet:
objtId = db_proxy.existingObject(vulParameters.name(),'vulnerability')
if objtId == -1:
db_proxy.addVulnerability(vulParameters)
else:
vulParameters.setId(objtId)
db_proxy.updateVulnerability(vulParameters)
vulCount += 1
attackerCount = 0
for attackerParameters in attackerParameterSet:
objtId = db_proxy.existingObject(attackerParameters.name(),'attacker')
if objtId == -1:
db_proxy.addAttacker(attackerParameters)
else:
attackerParameters.setId(objtId)
db_proxy.updateAttacker(attackerParameters)
attackerCount += 1
threatCount = 0
for threatParameters in threatParameterSet:
objtId = db_proxy.existingObject(threatParameters.name(),'threat')
if objtId == -1:
db_proxy.addThreat(threatParameters)
else:
threatParameters.setId(objtId)
db_proxy.updateThreat(threatParameters)
threatCount += 1
riskCount = 0
for riskParameters in riskParameterSet:
objtId = db_proxy.existingObject(riskParameters.name(),'risk')
if objtId == -1:
db_proxy.addRisk(riskParameters)
else:
riskParameters.setId(objtId)
db_proxy.updateRisk(riskParameters)
riskCount += 1
responseCount = 0
for responseParameters in responseParameterSet:
objtId = db_proxy.existingObject(responseParameters.name(),'response')
if objtId == -1:
db_proxy.addResponse(responseParameters)
else:
responseParameters.setId(objtId)
db_proxy.updateResponse(responseParameters)
responseCount += 1
rshipCount = 0
for assocParameters in assocParameterSet:
db_proxy.addClassAssociation(assocParameters)
rshipCount += 1
msgStr = 'Imported ' + str(roleCount) + ' roles, ' + str(assetCount) + ' assets, ' + str(vulCount) + ' vulnerabilities, ' + str(attackerCount) + ' attackers, ' + str(threatCount) + ' threats, ' + str(riskCount) + ' risks, ' + str(responseCount) + ' responses, and ' + str(rshipCount) + ' asset associations.'
return msgStr
def importUsabilityFile(importFile,session_id = None):
try:
parser = xml.sax.make_parser()
handler = UsabilityContentHandler()
parser.setContentHandler(handler)
parser.setEntityResolver(handler)
parser.parse(importFile)
return importUsability(handler.personas(),handler.externalDocuments(),handler.documentReferences(),handler.conceptReferences(),handler.personaCharacteristics(),handler.taskCharacteristics(),handler.tasks(),handler.usecases(),session_id=session_id)
except xml.sax.SAXException, e:
raise ARMException("Error parsing" + importFile + ": " + e.getMessage())
def importUsability(personaParameterSet,edParameterSet,drParameterSet,crParameterSet,pcParameterSet,tcParameterSet,taskParameterSet,ucParameterSet,session_id):
b = Borg()
db_proxy = b.get_dbproxy(session_id)
personaCount = 0
for personaParameters in personaParameterSet:
objtId = db_proxy.existingObject(personaParameters.name(),'persona')
if objtId == -1:
db_proxy.addPersona(personaParameters)
else:
personaParameters.setId(objtId)
db_proxy.updatePersona(personaParameters)
personaCount += 1
edCount = 0
for edParameters in edParameterSet:
objtId = db_proxy.existingObject(edParameters.name(),'external_document')
if objtId == -1:
db_proxy.addExternalDocument(edParameters)
else:
edParameters.setId(objtId)
db_proxy.updateExternalDocument(edParameters)
edCount += 1
drCount = 0
for drParameters in drParameterSet:
objtId = db_proxy.existingObject(drParameters.name(),'document_reference')
if objtId == -1:
db_proxy.addDocumentReference(drParameters)
else:
drParameters.setId(objtId)
db_proxy.updateDocumentReference(drParameters)
drCount += 1
taskCount = 0
for taskParameters in taskParameterSet:
objtId = db_proxy.existingObject(taskParameters.name(),'task')
if objtId == -1:
db_proxy.addTask(taskParameters)
else:
taskParameters.setId(objtId)
db_proxy.updateTask(taskParameters)
taskCount += 1
ucCount = 0
for ucParameters in ucParameterSet:
objtId = db_proxy.existingObject(ucParameters.name(),'usecase')
if objtId == -1:
db_proxy.addUseCase(ucParameters)
else:
ucParameters.setId(objtId)
db_proxy.updateUseCase(ucParameters)
ucCount += 1
crCount = 0
for crParameters in crParameterSet:
objtId = db_proxy.existingObject(crParameters.name(),'concept_reference')
if objtId == -1:
db_proxy.addConceptReference(crParameters)
else:
crParameters.setId(objtId)
db_proxy.updateConceptReference(crParameters)
crCount += 1
pcCount = 0
for pcParameters in pcParameterSet:
db_proxy.addPersonaCharacteristic(pcParameters)
pcCount += 1
tcCount = 0
for tcParameters in tcParameterSet:
objtId = db_proxy.existingObject(tcParameters.task(),'task_characteristic')
if objtId == -1:
db_proxy.addTaskCharacteristic(tcParameters)
else:
tcParameters.setId(objtId)
db_proxy.updateTaskCharacterisric(tcParameters)
tcCount += 1
msgStr = 'Imported ' + str(personaCount) + ' personas, ' + str(edCount) + ' external documents, ' + str(drCount) + ' document references, ' + str(crCount) + ' concept references, ' + str(pcCount) + ' persona characteristics, ' + str(tcCount) + ' task characteristics, ' + str(taskCount) + ' tasks, and ' + str(ucCount) + ' use cases.'
return msgStr
def importAssociationsFile(importFile,session_id = None):
try:
parser = xml.sax.make_parser()
handler = AssociationsContentHandler(session_id = session_id)
parser.setContentHandler(handler)
parser.setEntityResolver(handler)
parser.parse(importFile)
return importAssociations(handler.manualAssociations(),handler.goalAssociations(),handler.dependencyAssociations(),session_id = session_id)
except xml.sax.SAXException, e:
raise ARMException("Error parsing" + importFile + ": " + e.getMessage())
def importAssociations(maParameterSet,gaParameterSet,depParameterSet,session_id):
b = Borg()
db_proxy = b.get_dbproxy(session_id)
maCount = 0
for tTable,fromId,toId,refType in maParameterSet:
db_proxy.addTrace(tTable,fromId,toId,refType)
maCount += 1
gaCount = 0
for gaParameters in gaParameterSet:
db_proxy.addGoalAssociation(gaParameters)
gaCount += 1
depCount = 0
for depParameters in depParameterSet:
db_proxy.addDependency(depParameters)
depCount += 1
msgStr = 'Imported ' + str(maCount) + ' manual associations, ' + str(gaCount) + ' goal associations, and ' + str(depCount) + ' dependency associations.'
return msgStr
def importProjectFile(importFile,session_id = None):
try:
parser = xml.sax.make_parser()
handler = CairisContentHandler()
parser.setContentHandler(handler)
parser.setEntityResolver(handler)
parser.parse(importFile)
pSettings = handler.settings()
envParameterSet = handler.environments()
return importProjectData(pSettings,envParameterSet,session_id = session_id)
except xml.sax.SAXException, e:
raise ARMException("Error parsing" + importFile + ": " + e.getMessage())
def importProjectData(pSettings,envParameterSet,session_id):
b = Borg()
db_proxy = b.get_dbproxy(session_id)
if (pSettings != None):
db_proxy.updateSettings(pSettings[0],pSettings[1],pSettings[2],pSettings[3],pSettings[4],pSettings[5],pSettings[6],pSettings[7])
envCount = 0
for envParameters in envParameterSet:
objtId = db_proxy.existingObject(envParameters.name(),'environment')
if objtId == -1:
db_proxy.addEnvironment(envParameters)
else:
envParameters.setId(objtId)
db_proxy.updateEnvironment(envParameters)
envCount += 1
msgText = 'Imported ' + str(envCount) + ' environments'
if (pSettings != None):
msgText += ', and project settings'
msgText += '.'
return msgText
def importComponentViewFile(importFile,session_id = None):
try:
parser = xml.sax.make_parser()
handler = ArchitecturalPatternContentHandler()
parser.setContentHandler(handler)
parser.setEntityResolver(handler)
parser.parse(importFile)
view = handler.view()
return importComponentViewData(view,session_id = session_id)
except xml.sax.SAXException, e:
raise ARMException("Error parsing" + importFile + ": " + e.getMessage())
def importAssetsFile(importFile,session_id = None):
try:
parser = xml.sax.make_parser()
handler = TemplateAssetsContentHandler()
parser.setContentHandler(handler)
parser.setEntityResolver(handler)
parser.parse(importFile)
return importAssets(handler.valueTypes(),handler.assets(),session_id)
except xml.sax.SAXException, e:
raise ARMException("Error parsing" + importFile + ": " + e.getMessage())
def importAssets(valueTypes,assets,session_id):
b = Borg()
db_proxy = b.get_dbproxy(session_id)
vtCount = 0
taCount = 0
for vtParameters in valueTypes:
vtId = db_proxy.existingObject(vtParameters.name(),vtParameters.type())
if vtId == -1:
db_proxy.addValueType(vtParameters)
vtCount += 1
for taParameters in assets:
taId = db_proxy.existingObject(taParameters.name(),'template_asset')
if taId == -1:
db_proxy.addTemplateAsset(taParameters)
taCount += 1
return 'Imported ' + str(vtCount) + ' value types, and ' + str(taCount) + ' template assets.'
def importComponentViewData(view,session_id = None):
b = Borg()
db_proxy = b.get_dbproxy(session_id)
db_proxy.addComponentView(view)
msgStr = 'Imported architectural pattern'
return msgStr
def importSynopsesFile(importFile,session_id = None):
try:
parser = xml.sax.make_parser()
handler = SynopsesContentHandler(session_id = session_id)
parser.setContentHandler(handler)
parser.setEntityResolver(handler)
parser.parse(importFile)
charSyns = handler.characteristicSynopses()
refSyns = handler.referenceSynopses()
stepSyns = handler.stepSynopses()
refConts = handler.referenceContributions()
ucConts = handler.useCaseContributions()
return importSynopses(charSyns,refSyns,stepSyns,refConts,ucConts,session_id = session_id)
except xml.sax.SAXException, e:
raise ARMException("Error parsing" + importFile + ": " + e.getMessage())
def importSynopses(charSyns,refSyns,stepSyns,refConts,ucConts,session_id):
b = Borg()
db_proxy = b.get_dbproxy(session_id)
for cs in charSyns:
db_proxy.addCharacteristicSynopsis(cs)
for rs in refSyns:
db_proxy.addReferenceSynopsis(rs)
for ucName,envName,stepNo,synName,aType,aName in stepSyns:
db_proxy.addStepSynopsis(ucName,envName,stepNo,synName,aType,aName)
db_proxy.conn.commit()
for rc in refConts:
db_proxy.addReferenceContribution(rc)
for uc in ucConts:
db_proxy.addUseCaseContribution(uc)
msgStr = 'Imported ' + str(len(charSyns)) + ' characteristic synopses, ' + str(len(refSyns)) + ' reference synopses, ' + str(len(stepSyns)) + ' step synopses, ' + str(len(refConts)) + ' reference contributions, and ' + str(len(ucConts)) + ' use case contributions.'
return msgStr
def importDomainValuesFile(importFile,session_id = None):
try:
parser = xml.sax.make_parser()
handler = DomainValueContentHandler()
parser.setContentHandler(handler)
parser.setEntityResolver(handler)
parser.parse(importFile)
tvValues,rvValues,cvValues,svValues,lvValues,capValues,motValues = handler.values()
return importDomainValues(tvValues,rvValues,cvValues,svValues,lvValues,capValues,motValues,session_id = session_id)
except xml.sax.SAXException, e:
raise ARMException("Error parsing" + importFile + ": " + e.getMessage())
def importDomainValues(tvValues,rvValues,cvValues,svValues,lvValues,capValues,motValues,session_id):
noOfTvs = len(tvValues)
noOfRvs = len(rvValues)
noOfCvs = len(cvValues)
noOfSvs = len(svValues)
noOfLvs = len(lvValues)
noOfCapVs = len(capValues)
noOfMotVs = len(motValues)
b = Borg()
db_proxy = b.get_dbproxy(session_id)
tId = 0
if (noOfTvs > 0):
for tvp in tvValues:
tvp.setId(tId)
db_proxy.updateValueType(tvp)
tId += 1
tId =1
if (noOfRvs > 0):
for rvp in rvValues:
rvp.setId(tId)
db_proxy.updateValueType(rvp)
tId += 1
tId = 0
if (noOfCvs > 0):
for cvp in cvValues:
cvp.setId(tId)
db_proxy.updateValueType(cvp)
tId += 1
tId = 0
if (noOfSvs > 0):
for svp in svValues:
svp.setId(tId)
db_proxy.updateValueType(svp)
tId += 1
tId = 0
if (noOfLvs > 0):
for lvp in lvValues:
lvp.setId(tId)
db_proxy.updateValueType(lvp)
tId += 1
if (noOfCapVs > 0):
for capvp in capValues:
db_proxy.addValueType(capvp)
if (noOfMotVs > 0):
for motvp in motValues:
db_proxy.addValueType(motvp)
msgStr = 'Imported domain values'
return msgStr
def importProcessesFile(importFile,session_id = None):
try:
parser = xml.sax.make_parser()
handler = ProcessesContentHandler()
parser.setContentHandler(handler)
parser.setEntityResolver(handler)
parser.parse(importFile)
docs = handler.internalDocuments()
codes = handler.codes()
memos = handler.memos()
quotations = handler.quotations()
codeNetworks = handler.codeNetworks()
processes = handler.processes()
ics = handler.impliedCharacteristics()
intentions = handler.intentions()
contributions = handler.contributions()
return importProcesses(docs,codes,memos,quotations,codeNetworks,processes,ics,intentions,contributions,session_id = session_id)
except xml.sax.SAXException, e:
raise ARMException("Error parsing" + importFile + ": " + e.getMessage())
def importProcesses(docs,codes,memos,quotations,codeNetworks,processes,ics,intentions,contributions,session_id):
noOfDocs = len(docs)
noOfCodes = len(codes)
noOfMemos = len(memos)
noOfQuotations = len(quotations)
noOfCNs = len(codeNetworks)
noOfProcs = len(processes)
noOfICs = len(ics)
noOfIntentions = len(intentions)
noOfContributions = len(contributions)
b = Borg()
db_proxy = b.get_dbproxy(session_id)
for dp in docs:
db_proxy.addInternalDocument(dp)
for cp in codes:
db_proxy.addCode(cp)
for mp in memos:
db_proxy.addMemo(mp)
for q in quotations:
db_proxy.addQuotation(q)
# Necessary because adding document memos currently overwrites the existing memo text
for mp in memos:
db_proxy.updateMemo(mp)
for cn in codeNetworks:
personaName = cn[0]
rtName = cn[1]
fromCode = cn[2]
toCode = cn[3]
db_proxy.addCodeRelationship(personaName,fromCode,toCode,rtName)
for p in processes:
db_proxy.addImpliedProcess(p)
for ic in ics:
db_proxy.addImpliedCharacteristic(ic)
for intention in intentions:
db_proxy.addIntention(intention)
for contribution in contributions:
db_proxy.addContribution(contribution)
msgStr = 'Imported ' + str(noOfDocs) + ' internal documents, ' + str(noOfCodes) + ' codes, ' + str(noOfMemos) + ' memos, ' + str(noOfQuotations) + ' quotations, ' + str(noOfCNs) + ' code relationships, ' + str(noOfProcs) + ' implied processes, ' + str(noOfIntentions) + ' intentions, and ' + str(noOfContributions) + ' contributions.'
return msgStr
def importLocationsFile(importFile,session_id = None):
try:
parser = xml.sax.make_parser()
handler = LocationsContentHandler()
parser.setContentHandler(handler)
parser.setEntityResolver(handler)
parser.parse(importFile)
locations = handler.locations()
return importLocations(locations,session_id)
except xml.sax.SAXException, e:
raise ARMException("Error parsing" + importFile + ": " + e.getMessage())
def importLocations(locations,session_id):
b = Borg()
db_proxy = b.get_dbproxy(session_id)
db_proxy.addLocations(locations)
msgStr = 'Imported ' + locations.name()
return msgStr
def importModelFile(importFile,isOverwrite = 1,session_id = None):
try:
b = Borg()
db_proxy = b.get_dbproxy(session_id)
modelTxt = ''
if isOverwrite == 1:
db_proxy.clearDatabase(session_id)
modelTxt += importTVTypeFile(importFile,isOverwrite,session_id = session_id) + ' '
modelTxt += importDomainValuesFile(importFile,session_id) + ' '
modelTxt += importProjectFile(importFile,session_id) + ' '
modelTxt += importRiskAnalysisFile(importFile,session_id) + ' '
modelTxt += importUsabilityFile(importFile,session_id) + ' '
modelTxt += importRequirementsFile(importFile,session_id) + ' '
modelTxt += importAssociationsFile(importFile,session_id) + ' '
modelTxt += importSynopsesFile(importFile,session_id)
return modelTxt
except xml.sax.SAXException, e:
raise ARMException("Error parsing" + importFile + ": " + e.getMessage())
| 36.609959
| 336
| 0.726284
|
ea7c4aae3a7e3f510947653303ccff6dd78abceb
| 874
|
py
|
Python
|
server/app/models/meta/pydanticbase.py
|
michael0liver/fullstack-fastapi-vuejs-template
|
a686b7b71dca04f90538d00b350158cb6d7e9db2
|
[
"MIT"
] | 15
|
2020-06-14T05:35:05.000Z
|
2021-08-01T15:30:38.000Z
|
server/app/models/meta/pydanticbase.py
|
michael0liver/fullstack-fastapi-vuejs-template
|
a686b7b71dca04f90538d00b350158cb6d7e9db2
|
[
"MIT"
] | 1
|
2022-02-27T19:32:18.000Z
|
2022-02-27T19:32:18.000Z
|
server/app/models/meta/pydanticbase.py
|
michael0liver/fullstack-fastapi-vuejs-template
|
a686b7b71dca04f90538d00b350158cb6d7e9db2
|
[
"MIT"
] | 1
|
2021-09-06T03:21:51.000Z
|
2021-09-06T03:21:51.000Z
|
from datetime import datetime
import orjson
from pydantic import BaseModel
def orjson_dumps(v, *, default):
"""
orjson.dumps returns bytes, to match standard json.dumps we need to decode.
orjson.dumps option arguments provide many options such as `option=orjson.OPT_SERIALIZE_UUID` to natively encode UUID instances.
"""
return orjson.dumps(v, default=default).decode()
class PydanticBase(BaseModel):
"""PydanticBase with custom JSON implementation.
'orjson' is used here as it takes care of datetime
encoding natively and gives better (de)serialisation performance.
.. seealso::
https://pydantic-docs.helpmanual.io/usage/exporting_models/#custom-json-deserialisation
"""
class Config:
orm_mode = True
validate_assignment = True
json_loads = orjson.loads
json_dumps = orjson_dumps
| 28.193548
| 132
| 0.718535
|
bd28aa813787812a9f859a6e23c64d5b82a682c0
| 961
|
py
|
Python
|
basic/conf/urls.py
|
spinico/django-projects-boilerplates
|
22d47f60d282d0edb9c0f1b84bb3e9e84949bd25
|
[
"MIT"
] | null | null | null |
basic/conf/urls.py
|
spinico/django-projects-boilerplates
|
22d47f60d282d0edb9c0f1b84bb3e9e84949bd25
|
[
"MIT"
] | null | null | null |
basic/conf/urls.py
|
spinico/django-projects-boilerplates
|
22d47f60d282d0edb9c0f1b84bb3e9e84949bd25
|
[
"MIT"
] | null | null | null |
"""conf URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.main, name='main')
Class-based views
1. Add an import: from other_app.views import Main
2. Add a URL to urlpatterns: path('', Main.as_view(), name='main')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf import settings
from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path('', include('demo.urls')),
path('admin/', admin.site.urls),
]
if settings.DEBUG_TOOLBAR:
import debug_toolbar
urlpatterns += [
path('__debug__/', include(debug_toolbar.urls)),
]
| 32.033333
| 77
| 0.699272
|
376f05fee3af0ab766386c378224632b6963da42
| 4,636
|
py
|
Python
|
philoseismos/gprMax/output.py
|
sir-dio/old-philoseismos
|
4c830971641313abd95693b24965ede261c6824b
|
[
"MIT"
] | 1
|
2019-10-27T14:03:00.000Z
|
2019-10-27T14:03:00.000Z
|
philoseismos/gprMax/output.py
|
sir-dio/old-philoseismos
|
4c830971641313abd95693b24965ede261c6824b
|
[
"MIT"
] | null | null | null |
philoseismos/gprMax/output.py
|
sir-dio/old-philoseismos
|
4c830971641313abd95693b24965ede261c6824b
|
[
"MIT"
] | null | null | null |
""" philoseismos: with passion for the seismic method.
This file defines the gprMax.Output class, used to load the output of gprMax.
@author: Dubrovin Ivan
e-mail: dubrovin.io@icloud.com """
import h5py
import numpy as np
import glob
import os
from philoseismos.gprMax.constants import output_attributes
class Output:
""" This object represents a gprMax output file. """
def __init__(self):
pass
@classmethod
def load_Ascan(cls, file):
""" Create a new gprMax.Output object by loading a single A-scan output file. """
new = cls()
with h5py.File(file, 'r') as f:
new.title = f.attrs['Title']
new.iterations = f.attrs['Iterations']
new.dt = f.attrs['dt']
new.dx_dy_dz = f.attrs['dx_dy_dz']
new.gprMax = f.attrs['gprMax']
new.nrx = f.attrs['nrx']
new.nsrc = f.attrs['nsrc']
new.nx_ny_nz = f.attrs['nx_ny_nz']
new.rxsteps = f.attrs['rxsteps']
new.srcsteps = f.attrs['srcsteps']
new.Ex = np.array(f['rxs/rx1/Ex'])
new.Ey = np.array(f['rxs/rx1/Ey'])
new.Ez = np.array(f['rxs/rx1/Ez'])
new.Hx = np.array(f['rxs/rx1/Hx'])
new.Hy = np.array(f['rxs/rx1/Hy'])
new.Hz = np.array(f['rxs/rx1/Hz'])
new.rx_name = f['rxs/rx1'].attrs['Name']
new.rx_position = f['rxs/rx1'].attrs['Position']
new.src_position = f['srcs/src1'].attrs['Position']
new.src_type = f['srcs/src1'].attrs['Type']
new.t = np.arange(new.iterations) * new.dt
return new
@classmethod
def gather_Ascans(cls, basename, remove_files=True):
""" Create a B-scan by loading all the A-scans with a given basename. """
new = cls()
# find all the matching A-scans
files = glob.glob(f'{basename}*.out')
files.sort(key=lambda s: int(s[len(basename):-4]))
new.traces = len(files)
# create the lists to store rx names and positions, src positions and types
new.rx_names = []
new.rx_positions = []
new.src_positions = []
new.src_types = []
# open the first one to load the attributes
with h5py.File(files[0], 'r') as f:
new.title = f.attrs['Title']
new.iterations = f.attrs['Iterations']
new.dt = f.attrs['dt']
new.dx_dy_dz = f.attrs['dx_dy_dz']
new.gprMax = f.attrs['gprMax']
new.nx_ny_nz = f.attrs['nx_ny_nz']
# save the first receiver and source attributes
name, position = f['rxs/rx1'].attrs.values()
new.rx_names.append(name)
new.rx_positions.append(position)
position, type = f['srcs/src1'].attrs.values()
new.src_positions.append(position)
new.src_types.append(type)
# create the arrays to store the data
new.Ex = np.empty(shape=(new.traces, new.iterations), dtype=np.float32)
new.Ey = np.empty(shape=(new.traces, new.iterations), dtype=np.float32)
new.Ez = np.empty(shape=(new.traces, new.iterations), dtype=np.float32)
new.Hx = np.empty(shape=(new.traces, new.iterations), dtype=np.float32)
new.Hy = np.empty(shape=(new.traces, new.iterations), dtype=np.float32)
new.Hz = np.empty(shape=(new.traces, new.iterations), dtype=np.float32)
# save the first trace
new.Ex[0] = f['rxs/rx1/Ex']
new.Ey[0] = f['rxs/rx1/Ey']
new.Ez[0] = f['rxs/rx1/Ez']
new.Hx[0] = f['rxs/rx1/Hx']
new.Hy[0] = f['rxs/rx1/Hy']
new.Hz[0] = f['rxs/rx1/Hz']
# iterate through other files and load the remaining traces
for i, file in enumerate(files[1:], 1):
with h5py.File(file, 'r') as f:
name, position = f['rxs/rx1'].attrs.values()
new.rx_names.append(name)
new.rx_positions.append(position)
position, type = f['srcs/src1'].attrs.values()
new.src_positions.append(position)
new.src_types.append(type)
new.Ex[i] = f['rxs/rx1/Ex']
new.Ey[i] = f['rxs/rx1/Ey']
new.Ez[i] = f['rxs/rx1/Ez']
new.Hx[i] = f['rxs/rx1/Hx']
new.Hy[i] = f['rxs/rx1/Hy']
new.Hz[i] = f['rxs/rx1/Hz']
new.t = np.arange(new.iterations) * new.dt
if remove_files:
for file in files:
os.remove(file)
return new
| 34.857143
| 89
| 0.545298
|
a634950377089ab9eb722cb046a76dc4f340d9a3
| 4,623
|
py
|
Python
|
cvservices/api.py
|
amabdallah/ControlledVocabulariesTemplate
|
751e5a87da0bc036311b4e8aa8408eb69bee716b
|
[
"BSD-3-Clause"
] | null | null | null |
cvservices/api.py
|
amabdallah/ControlledVocabulariesTemplate
|
751e5a87da0bc036311b4e8aa8408eb69bee716b
|
[
"BSD-3-Clause"
] | null | null | null |
cvservices/api.py
|
amabdallah/ControlledVocabulariesTemplate
|
751e5a87da0bc036311b4e8aa8408eb69bee716b
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import csv
import json
import StringIO
from collections import OrderedDict
from django.http.response import HttpResponse
from tastypie.api import Api
from tastypie.resources import ModelResource
from tastypie.serializers import Serializer
from tastypie.utils.mime import build_content_type
from rdfserializer.api import ModelRdfResource
from cvservices.utils import upper_splitted, lfirst
from settings.config import BASE_DIR
cv_models = {}
try:
with open(os.path.join(BASE_DIR, 'cv_models.json')) as data_file:
cv_models = json.load(data_file).get('models')
except IOError:
print("You need to setup the settings cv_models file (see instructions in README.md file.)")
model_resource_template = """class %(uppercase)sResource(ModelRdfResource):
scheme = '%(lfirst)s'
class Meta(ModelRdfResource.Meta):
queryset = %(uppercase)s.objects.filter(ModelRdfResource.vocabulary_filter)
resource_name = '%(lowercase)s'
"""
model_resource_register_template = """v1_api.register(%(uppercase)sResource())
"""
imported_model_template = """%s,
"""
output_resources = """"""
imported_models = """"""
output_register = """"""
for cv_model in cv_models:
imported_models += imported_model_template % cv_model.get('name')
output_resources += model_resource_template % {'lowercase': cv_model.get('name').lower(),
'uppercase': cv_model.get('name'),
'uppersplitted': upper_splitted(cv_model.get('name')),
'lfirst': lfirst(cv_model.get('name'))}
output_register += model_resource_register_template % {'uppercase': cv_model.get('name')}
api_import_template = """from models import (
%s
)""" % imported_models
exec(api_import_template)
class CSVSerializer(Serializer):
formats = ['csv']
content_types = {
'csv': 'text/plain'
}
def to_csv(self, data, options=None, writer=None):
options = options or {}
data = self.to_simple(data, options)
excluded_fields = [u'resource_uri']
raw_data = StringIO.StringIO()
first = True
if "meta" in data.keys():
objects = data.get("objects")
for value in objects:
test = {}
for excluded_field in excluded_fields:
del value[excluded_field]
self.flatten(value, test)
odict = OrderedDict()
odict['Term'] = test['term']
del test['term']
odict['UnitsName'] = test['name']
del test['name']
odict['UnitsTypeCV'] = test['type']
del test['type']
odict['UnitsAbbreviation'] = test['abbreviation']
del test['abbreviation']
odict['UnitsLink'] = test['link']
del test['link']
if first:
writer = csv.DictWriter(raw_data, odict.keys())
writer.writeheader()
writer.writerow(odict)
first = False
else:
writer.writerow({k: (v.encode('utf-8') if isinstance(v, int) is not True and isinstance(v, type(
None)) is not True else v) for k, v in odict.items()})
else:
test = {}
for excluded_field in excluded_fields:
del data[excluded_field]
self.flatten(data, test)
odict = OrderedDict()
odict['Term'] = test['term']
del test['term']
odict['UnitsName'] = test['name']
del test['name']
odict['UnitsTypeCV'] = test['type']
del test['type']
odict['UnitsAbbreviation'] = test['abbreviation']
del test['abbreviation']
odict['UnitsLink'] = test['link']
del test['link']
if first:
writer = csv.DictWriter(raw_data, odict.keys())
writer.writeheader()
writer.writerow(odict)
first = False
else:
writer.writerow(odict)
CSVContent = raw_data.getvalue()
return CSVContent
def flatten(self, data, odict={}):
if isinstance(data, list):
for value in data:
self.flatten(value, odict)
elif isinstance(data, dict):
for (key, value) in data.items():
if not isinstance(value, (dict, list)):
odict[key] = value
else:
self.flatten(value, odict)
exec(output_resources)
v1_api = Api(api_name='v1')
exec(output_register)
| 32.556338
| 116
| 0.58209
|
7b1934590385883f1e8be2e72d2e37a0047a3b78
| 4,430
|
py
|
Python
|
cle/backends/regions.py
|
mattrepl/cle
|
08f5f24d61470da8a99ee0c40f2185397a958313
|
[
"BSD-2-Clause"
] | 1
|
2021-05-11T17:27:28.000Z
|
2021-05-11T17:27:28.000Z
|
cle/backends/regions.py
|
fiberx/cle-fiber
|
08f5f24d61470da8a99ee0c40f2185397a958313
|
[
"BSD-2-Clause"
] | null | null | null |
cle/backends/regions.py
|
fiberx/cle-fiber
|
08f5f24d61470da8a99ee0c40f2185397a958313
|
[
"BSD-2-Clause"
] | null | null | null |
from ..utils import key_bisect_find, key_bisect_insort_left
#
# Container
#
class Regions(object):
"""
A container class acting as a list of regions (sections or segments). Additionally, it keeps an sorted list of
all regions that are mapped into memory to allow fast lookups.
We assume none of the regions overlap with others.
"""
def __init__(self, lst=None):
self._list = lst if lst is not None else []
if self._list:
self._sorted_list = self._make_sorted(self._list)
else:
self._sorted_list = []
@property
def raw_list(self):
"""
Get the internal list. Any change to it is not tracked, and therefore _sorted_list will not be updated.
Therefore you probably does not want to modify the list.
:return: The internal list container.
:rtype: list
"""
return self._list
@property
def max_addr(self):
"""
Get the highest address of all regions.
:return: The highest address of all regions, or None if there is no region available.
:rtype: int or None
"""
if self._sorted_list:
return self._sorted_list[-1].max_addr
return None
def __getitem__(self, idx):
return self._list[idx]
def __setitem__(self, idx, item):
self._list[idx] = item
# update self._sorted_list
self._sorted_list = self._make_sorted(self._list)
def __len__(self):
return len(self._list)
def __repr__(self):
return "<Regions: %s>" % repr(self._list)
def _rebase(self, delta):
"""
Does regions rebasing to other base address.
Modifies state of each internal object, so the list reference doesn't need to be updated,
the same is also valid for sorted list as operation preserves the ordering.
:param delta: Delta offset between an old and a new image bases
:type delta: int
"""
map(lambda x: x._rebase(delta), self._list)
def append(self, region):
"""
Append a new Region instance into the list.
:param Region region: The region to append.
"""
self._list.append(region)
if self._is_region_mapped(region):
key_bisect_insort_left(self._sorted_list, region, keyfunc=lambda r: r.vaddr)
def find_region_containing(self, addr):
"""
Find the region that contains a specific address. Returns None if none of the regions covers the address.
:param addr: The address.
:type addr: int
:return: The region that covers the specific address, or None if no such region is found.
:rtype: Region or None
"""
pos = key_bisect_find(self._sorted_list, addr,
keyfunc=lambda r: r if type(r) in (int, long) else r.vaddr + r.memsize)
if pos >= len(self._sorted_list):
return None
region = self._sorted_list[pos]
if region.contains_addr(addr):
return region
return None
def find_region_next_to(self, addr):
"""
Find the next region after the given address.
:param int addr: The address to test.
:return: The next region that goes after the given address, or None if there is no section after the
address,
:rtype: Region or None
"""
pos = key_bisect_find(self._sorted_list, addr,
keyfunc=lambda r: r if type(r) in (int, long) else r.vaddr + r.memsize)
if pos >= len(self._sorted_list):
return None
return self._sorted_list[pos]
@staticmethod
def _is_region_mapped(region):
# delayed import
from .elf.regions import ELFSection
mapped = True
if region.memsize == 0:
mapped = False
elif isinstance(region, ELFSection) and not region.occupies_memory:
mapped = False
return mapped
@staticmethod
def _make_sorted(lst):
"""
Return a sorted list of regions that are mapped into memory.
:param list lst: A list of regions.
:return: A sorted list of regions.
:rtype: list
"""
return sorted([ r for r in lst if Regions._is_region_mapped(r) ], key=lambda x: x.vaddr)
| 30.136054
| 116
| 0.601129
|
4fd3c4e559b4ea39684ac1d1e20cfc11ef8bafe1
| 88
|
py
|
Python
|
voter/apps.py
|
emre/steeminator
|
1f1a162be838ae5d90db1ea36786a80d362af0ad
|
[
"MIT"
] | 4
|
2018-07-31T20:55:34.000Z
|
2019-05-28T06:39:05.000Z
|
voter/apps.py
|
emre/steeminator
|
1f1a162be838ae5d90db1ea36786a80d362af0ad
|
[
"MIT"
] | 5
|
2018-08-01T07:05:25.000Z
|
2018-08-01T07:11:42.000Z
|
voter/apps.py
|
emre/steeminator
|
1f1a162be838ae5d90db1ea36786a80d362af0ad
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class AccountsConfig(AppConfig):
name = 'voter'
| 14.666667
| 33
| 0.75
|
3c5c699628824fa6bfa22fe26b800300d28360c4
| 1,263
|
py
|
Python
|
test/actions.py
|
ekarpovs/gfsm
|
f338ec8609f8034af746af7c45353103b4870c9a
|
[
"MIT"
] | null | null | null |
test/actions.py
|
ekarpovs/gfsm
|
f338ec8609f8034af746af7c45353103b4870c9a
|
[
"MIT"
] | null | null | null |
test/actions.py
|
ekarpovs/gfsm
|
f338ec8609f8034af746af7c45353103b4870c9a
|
[
"MIT"
] | null | null | null |
# User actions implementation
# Test user action's wrapper example
def fsm_action(implementation):
def execute(context):
__name__ = implementation.__name__
print("test wrapper for action:", __name__)
# Separate user functions from a FSM
# Get a relevant user datacls
data = {}
data = implementation(data)
return context
return execute
def init_context(data):
# Do thomething
return data
def entry_start(data):
# Do thomething
return data
def exit_start(data):
# Do thomething
return data
def entry_1(data):
# Do thomething
return data
def exit_1(data):
# Do thomething
return data
def entry_2(data):
# Do thomething
return data
def exit_2(data):
# Do thomething
return data
def entry_3(data):
# Do thomething
return data
def exit_3(data):
# Do thomething
return data
def entry_end(data):
# Do thomething
return data
def exit_end(data):
# Do thomething
return data
def action_1(data):
# Do thomething
return data
def action_2(data):
# Do thomething
return data
def action_3(data):
# Do thomething
return data
def action_4(data):
# Do thomething
return data
def start_transition(context):
return context
def end_transition(context):
return context
| 15.216867
| 47
| 0.710214
|
eaf4f9c147ed9a703407eb6a6c962f24abee4998
| 3,286
|
py
|
Python
|
test/test_api.py
|
skyformat99/pynng
|
ce1696dafb4b71603a8968fd59364fb784e5d507
|
[
"MIT"
] | null | null | null |
test/test_api.py
|
skyformat99/pynng
|
ce1696dafb4b71603a8968fd59364fb784e5d507
|
[
"MIT"
] | null | null | null |
test/test_api.py
|
skyformat99/pynng
|
ce1696dafb4b71603a8968fd59364fb784e5d507
|
[
"MIT"
] | null | null | null |
import pytest
import trio
import pynng
addr = 'tcp://127.0.0.1:13131'
addr2 = 'tcp://127.0.0.1:13132'
def test_dialers_get_added():
with pynng.Pair0() as s:
assert len(s.dialers) == 0
s.dial(addr, block=False)
assert len(s.dialers) == 1
s.dial(addr2, block=False)
assert len(s.dialers) == 2
def test_listeners_get_added():
with pynng.Pair0() as s:
assert len(s.listeners) == 0
s.listen(addr)
assert len(s.listeners) == 1
s.listen(addr2)
assert len(s.listeners) == 2
def test_closing_listener_works():
with pynng.Pair0(listen=addr) as s:
assert len(s.listeners) == 1
s.listeners[0].close()
assert len(s.listeners) == 0
# if the listener is really closed, we should be able to listen at the
# same address again
s.listen(addr)
assert len(s.listeners) == 1
assert len(s.listeners) == 0
def test_closing_dialer_works():
with pynng.Pair0(dial=addr, block_on_dial=False) as s:
assert len(s.dialers) == 1
s.dialers[0].close()
assert len(s.listeners) == 0
def test_nonblocking_recv_works():
with pynng.Pair0(listen=addr) as s:
with pytest.raises(pynng.TryAgain):
s.recv(block=False)
def test_context():
async def test_them_up(req, rep):
assert isinstance(req, pynng.Context)
assert isinstance(rep, pynng.Context)
request = b'i am requesting'
await req.asend(request)
assert await rep.arecv() == request
response = b'i am responding'
await rep.asend(response)
assert await req.arecv() == response
with pytest.raises(pynng.BadState):
await req.arecv()
# responders can't send before receiving
with pytest.raises(pynng.BadState):
await rep.asend(b'I cannot do this why am I trying')
with pynng.Req0(listen=addr, recv_timeout=1000) as req, \
pynng.Rep0(dial=addr, recv_timeout=1000) as rep:
trio.run(test_them_up, req.new_context(), rep.new_context())
def test_multiple_contexts():
async def recv_and_send(ctx):
data = await ctx.arecv()
await trio.sleep(0.05)
await ctx.asend(data)
async def do_some_stuff(rep, req1, req2):
async with trio.open_nursery() as n:
ctx1, ctx2 = rep.new_contexts(2)
n.start_soon(recv_and_send, ctx1)
n.start_soon(recv_and_send, ctx2)
await req1.asend(b'oh hi')
await req2.asend(b'me toooo')
assert (await req1.arecv() == b'oh hi')
assert (await req2.arecv() == b'me toooo')
with pynng.Rep0(listen=addr, recv_timeout=500) as rep, \
pynng.Req0(dial=addr, recv_timeout=500) as req1, \
pynng.Req0(dial=addr, recv_timeout=500) as req2:
trio.run(do_some_stuff, rep, req1, req2)
def test_synchronous_recv_context():
with pynng.Rep0(listen=addr, recv_timeout=500) as rep, \
pynng.Req0(dial=addr, recv_timeout=500) as req:
req.send(b'oh hello there old pal')
assert rep.recv() == b'oh hello there old pal'
rep.send(b'it is so good to hear from you')
assert req.recv() == b'it is so good to hear from you'
| 30.425926
| 78
| 0.615946
|
050ebde3418a3c5c63c56bdf6448a12576fef650
| 3,373
|
py
|
Python
|
custom_components/senec/config_flow.py
|
Exceptionfault/home-assistant-senec
|
cedb3de9b0251b1680a05c7dd09429c9e62d3de2
|
[
"Apache-2.0"
] | null | null | null |
custom_components/senec/config_flow.py
|
Exceptionfault/home-assistant-senec
|
cedb3de9b0251b1680a05c7dd09429c9e62d3de2
|
[
"Apache-2.0"
] | null | null | null |
custom_components/senec/config_flow.py
|
Exceptionfault/home-assistant-senec
|
cedb3de9b0251b1680a05c7dd09429c9e62d3de2
|
[
"Apache-2.0"
] | null | null | null |
"""Config flow for senec integration."""
import logging
from urllib.parse import ParseResult, urlparse
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_HOST, CONF_NAME
from homeassistant.core import HomeAssistant, callback
from homeassistant.util import slugify
from pysenec import Senec
from requests.exceptions import HTTPError, Timeout
from .const import DOMAIN # pylint:disable=unused-import
from .const import DEFAULT_HOST, DEFAULT_NAME
_LOGGER = logging.getLogger(__name__)
@callback
def senec_entries(hass: HomeAssistant):
"""Return the hosts already configured."""
return {
entry.data[CONF_HOST] for entry in hass.config_entries.async_entries(DOMAIN)
}
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for senec."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL
def _host_in_configuration_exists(self, host) -> bool:
"""Return True if host exists in configuration."""
if host in senec_entries(self.hass):
return True
return False
async def _test_connection(self, host):
"""Check if we can connect to the Senec device."""
websession = self.hass.helpers.aiohttp_client.async_get_clientsession()
try:
senec_client = Senec(host, websession)
await senec_client.update()
return True
except (OSError, HTTPError, Timeout):
self._errors[CONF_HOST] = "cannot_connect"
_LOGGER.error(
"Could not connect to Senec device at %s, check host ip address", host,
)
return False
async def async_step_user(self, user_input=None):
"""Step when user initializes a integration."""
self._errors = {}
if user_input is not None:
# set some defaults in case we need to return to the form
name = slugify(user_input.get(CONF_NAME, DEFAULT_NAME))
host_entry = user_input.get(CONF_HOST, DEFAULT_HOST)
if self._host_in_configuration_exists(host_entry):
self._errors[CONF_HOST] = "already_configured"
else:
if await self._test_connection(host_entry):
return self.async_create_entry(
title=name, data={CONF_HOST: host_entry}
)
else:
user_input = {}
user_input[CONF_NAME] = DEFAULT_NAME
user_input[CONF_HOST] = DEFAULT_HOST
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(
{
vol.Required(
CONF_NAME, default=user_input.get(CONF_NAME, DEFAULT_NAME)
): str,
vol.Required(
CONF_HOST, default=user_input.get(CONF_HOST, DEFAULT_HOST)
): str,
}
),
errors=self._errors,
)
async def async_step_import(self, user_input=None):
"""Import a config entry."""
host_entry = user_input.get(CONF_HOST, DEFAULT_HOST)
if self._host_in_configuration_exists(host_entry):
return self.async_abort(reason="already_configured")
return await self.async_step_user(user_input)
| 35.505263
| 87
| 0.631782
|
2a0d3123c1bf106387534c010a0b9dfec11c4eb7
| 3,141
|
py
|
Python
|
_plugins/compress_images.py
|
Kripner/slama.dev
|
8654b68626b237287b3f662b01f5f6b22a0fea6b
|
[
"MIT"
] | null | null | null |
_plugins/compress_images.py
|
Kripner/slama.dev
|
8654b68626b237287b3f662b01f5f6b22a0fea6b
|
[
"MIT"
] | null | null | null |
_plugins/compress_images.py
|
Kripner/slama.dev
|
8654b68626b237287b3f662b01f5f6b22a0fea6b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import argparse
import os
import hashlib
from typing import *
from subprocess import Popen, PIPE
import yaml
os.chdir(os.path.dirname(os.path.realpath(__file__)))
MAX = 10 # out of 100
CACHE_FOLDER = "../.jekyll-cache/compress_images"
SUB = "Images"
full_size = 0 # total size of images before compression
reduced_size = 0 # total size of images before compression
something_changed = False
def get_file_hashsum(file_name: str):
"""Generate a SHA-256 hashsum of the given file."""
hash_sha256 = hashlib.sha256()
with open(file_name, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_sha256.update(chunk)
return hash_sha256.hexdigest()
def execute_shell_command(command: List[str]):
result = Popen(command, stdout=PIPE, stderr=PIPE).communicate()
print([f for f in result if f != b""][0].decode().strip())
def format_to_mb(size: int) -> str:
"""Format to megabytes."""
return round(size / (1024 ** 2), 2)
config = {}
new_config = {}
if os.path.exists(CACHE_FOLDER):
with open(CACHE_FOLDER, "r") as f:
config = yaml.safe_load(f.read())
for root, dirs, files in os.walk("../photography/"):
for name in files:
# use jpegoptim to optimize raw images
if name.endswith(("jpg", "jpeg")) and "raw" in root:
path = os.path.join(root, name)
reduced_path = os.path.join(root, "..")
reduced_name = os.path.join(reduced_path, name)
# first command is to create a low-res version of the default image
# second command is to strip the metadata from the pictures
commands = [
["jpegoptim", "-s", path],
["jpegoptim", "-s", f"-m{MAX}", path, "-d", reduced_path, "-o"],
]
# strip metadata
force_low_res = False # to possibly force-create a new low-res version
if path not in config or config[path][1] != get_file_hashsum(path):
execute_shell_command(commands[0])
new_config[path] = [100, get_file_hashsum(path)]
force_low_res = True
something_changed = True
else:
new_config[path] = config[path]
# create low-res image
if (
not os.path.exists(reduced_name)
or reduced_name not in config
or config[reduced_name] != [MAX, get_file_hashsum(reduced_name)]
or force_low_res
):
execute_shell_command(commands[1])
new_config[reduced_name] = [MAX, get_file_hashsum(reduced_name)]
something_changed = True
else:
new_config[reduced_name] = config[reduced_name]
full_size += os.path.getsize(path)
reduced_size += os.path.getsize(reduced_name)
with open(CACHE_FOLDER, "w") as f:
f.write(yaml.dump(new_config))
if something_changed:
print(f"size before: {format_to_mb(full_size)} MB")
print(f"size after: {format_to_mb(reduced_size)} MB")
else:
print("no changes.")
| 31.727273
| 83
| 0.606813
|
9d57d843ec24030c14fe25d0c939eb8b7e0860ff
| 127
|
py
|
Python
|
aws_lambda_powertools/middleware_factory/__init__.py
|
Sordie/aws-lambda-powertools-python
|
12c512b78b8830bf2ed09e35d640df14afacfc1a
|
[
"Apache-2.0"
] | 1,208
|
2020-05-20T19:06:29.000Z
|
2022-03-30T14:17:47.000Z
|
aws_lambda_powertools/middleware_factory/__init__.py
|
Sordie/aws-lambda-powertools-python
|
12c512b78b8830bf2ed09e35d640df14afacfc1a
|
[
"Apache-2.0"
] | 859
|
2020-05-22T09:59:54.000Z
|
2022-03-31T08:31:30.000Z
|
aws_lambda_powertools/middleware_factory/__init__.py
|
Sordie/aws-lambda-powertools-python
|
12c512b78b8830bf2ed09e35d640df14afacfc1a
|
[
"Apache-2.0"
] | 163
|
2020-05-18T21:08:25.000Z
|
2022-03-28T12:03:37.000Z
|
""" Utilities to enhance middlewares """
from .factory import lambda_handler_decorator
__all__ = ["lambda_handler_decorator"]
| 25.4
| 45
| 0.795276
|
5718b03b742e76ff3c6c80675aee0c32b18ec5f9
| 5,921
|
py
|
Python
|
telemetry/telemetry/internal/results/artifact_results_unittest.py
|
bopopescu/chromium72-third-party-catapult
|
774e1355b871e13bb858147a136e9cb476f55030
|
[
"BSD-3-Clause"
] | 1
|
2019-01-04T10:08:58.000Z
|
2019-01-04T10:08:58.000Z
|
telemetry/telemetry/internal/results/artifact_results_unittest.py
|
kind-john/catapult
|
29635376119833f172a58a48a3282d353ce55d2b
|
[
"BSD-3-Clause"
] | null | null | null |
telemetry/telemetry/internal/results/artifact_results_unittest.py
|
kind-john/catapult
|
29635376119833f172a58a48a3282d353ce55d2b
|
[
"BSD-3-Clause"
] | 1
|
2019-04-21T23:48:15.000Z
|
2019-04-21T23:48:15.000Z
|
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import unittest
import mock
from telemetry.internal.results import artifact_results
from telemetry.internal.util import file_handle
from py_utils import tempfile_ext
# splitdrive returns '' on systems which don't have drives, like linux.
ROOT_CHAR = os.path.splitdrive(__file__)[0] + os.sep
def _abs_join(*args):
"""Helper to do a path join that's an absolute path."""
return ROOT_CHAR + os.path.join(*args)
class ArtifactResultsUnittest(unittest.TestCase):
def testCreateBasic(self):
with tempfile_ext.NamedTemporaryDirectory(
prefix='artifact_tests') as tempdir:
ar = artifact_results.ArtifactResults(tempdir)
filenames = []
with ar.CreateArtifact('bad//story:name', 'logs') as log_file:
filenames.append(log_file.name)
log_file.write('hi\n')
with ar.CreateArtifact('other_name', 'logs') as log_file:
filenames.append(log_file.name)
log_file.write('hi\n')
for filename in filenames:
with open(filename) as f:
self.assertEqual(f.read(), 'hi\n')
def testCreateDuplicateStoryName(self):
with tempfile_ext.NamedTemporaryDirectory(
prefix='artifact_tests') as tempdir:
ar = artifact_results.ArtifactResults(tempdir)
filenames = []
with ar.CreateArtifact('story_name', 'logs') as log_file:
filenames.append(log_file.name)
log_file.write('hi\n')
with ar.CreateArtifact('story_name', 'logs') as log_file:
filenames.append(log_file.name)
log_file.write('hi\n')
for filename in filenames:
with open(filename) as f:
self.assertEqual(f.read(), 'hi\n')
@mock.patch('telemetry.internal.results.artifact_results.shutil.move')
@mock.patch('telemetry.internal.results.artifact_results.os.makedirs')
def testAddBasic(self, make_patch, move_patch):
ar = artifact_results.ArtifactResults(_abs_join('foo'))
ar.AddArtifact(
'test', 'artifact_name', _abs_join('foo', 'artifacts', 'bar.log'))
move_patch.assert_not_called()
make_patch.assert_called_with(_abs_join('foo', 'artifacts'))
self.assertEqual({k: dict(v) for k, v in ar._test_artifacts.items()}, {
'test': {
'artifact_name': ['artifacts/bar.log'],
}
})
@mock.patch('telemetry.internal.results.artifact_results.shutil.move')
@mock.patch('telemetry.internal.results.artifact_results.os.makedirs')
def testAddNested(self, make_patch, move_patch):
ar = artifact_results.ArtifactResults(_abs_join('foo'))
ar.AddArtifact('test', 'artifact_name', _abs_join(
'foo', 'artifacts', 'baz', 'bar.log'))
move_patch.assert_not_called()
make_patch.assert_called_with(_abs_join('foo', 'artifacts'))
self.assertEqual({k: dict(v) for k, v in ar._test_artifacts.items()}, {
'test': {
'artifact_name': ['artifacts/baz/bar.log'],
}
})
@mock.patch('telemetry.internal.results.artifact_results.shutil.move')
@mock.patch('telemetry.internal.results.artifact_results.os.makedirs')
def testAddFileHandle(self, make_patch, move_patch):
ar = artifact_results.ArtifactResults(_abs_join('foo'))
ar.AddArtifact('test', 'artifact_name', file_handle.FromFilePath(
_abs_join('', 'foo', 'artifacts', 'bar.log')))
move_patch.assert_not_called()
make_patch.assert_called_with(_abs_join('foo', 'artifacts'))
self.assertEqual({k: dict(v) for k, v in ar._test_artifacts.items()}, {
'test': {
'artifact_name': ['artifacts/bar.log'],
}
})
@mock.patch('telemetry.internal.results.artifact_results.shutil.move')
@mock.patch('telemetry.internal.results.artifact_results.os.makedirs')
def testAddAndMove(self, make_patch, move_patch):
ar = artifact_results.ArtifactResults(_abs_join('foo'))
ar.AddArtifact('test', 'artifact_name', _abs_join(
'another', 'directory', 'bar.log'))
move_patch.assert_called_with(
_abs_join('another', 'directory', 'bar.log'),
_abs_join('foo', 'artifacts'))
make_patch.assert_called_with(_abs_join('foo', 'artifacts'))
self.assertEqual({k: dict(v) for k, v in ar._test_artifacts.items()}, {
'test': {
'artifact_name': ['artifacts/bar.log'],
}
})
@mock.patch('telemetry.internal.results.artifact_results.shutil.move')
@mock.patch('telemetry.internal.results.artifact_results.os.makedirs')
def testAddMultiple(self, make_patch, move_patch):
ar = artifact_results.ArtifactResults(_abs_join('foo'))
ar.AddArtifact('test', 'artifact_name', _abs_join(
'foo', 'artifacts', 'bar.log'))
ar.AddArtifact('test', 'artifact_name', _abs_join(
'foo', 'artifacts', 'bam.log'))
move_patch.assert_not_called()
make_patch.assert_called_with(_abs_join('foo', 'artifacts'))
self.assertEqual({k: dict(v) for k, v in ar._test_artifacts.items()}, {
'test': {
'artifact_name': ['artifacts/bar.log', 'artifacts/bam.log'],
}
})
@mock.patch('telemetry.internal.results.artifact_results.shutil.move')
@mock.patch('telemetry.internal.results.artifact_results.os.makedirs')
def testIterTestAndArtifacts(self, make_patch, move_patch):
del make_patch, move_patch # unused
ar = artifact_results.ArtifactResults(_abs_join('foo'))
ar.AddArtifact('foo', 'log', _abs_join(
'artifacts', 'foo.log'))
ar.AddArtifact('bar', 'screenshot', _abs_join(
'artifacts', 'bar.jpg'))
test_artifacts = {}
for test_name, artifacts in ar.IterTestAndArtifacts():
test_artifacts[test_name] = artifacts
self.assertEqual({
'foo': {'log': ['artifacts/foo.log']},
'bar': {'screenshot': ['artifacts/bar.jpg']}
}, test_artifacts)
| 35.668675
| 75
| 0.680122
|
c39e90424293fbe3182a84cdbda5e9922943ef7a
| 903
|
py
|
Python
|
cloudmesh/pi/led_bar.py
|
cloudmesh/cloudmesh.pi
|
bdf706b3763031341c41b811749064c293e73c14
|
[
"Apache-2.0"
] | 2
|
2017-09-18T00:56:36.000Z
|
2018-06-01T23:41:23.000Z
|
cloudmesh/pi/led_bar.py
|
cloudmesh/cloudmesh-pi
|
bdf706b3763031341c41b811749064c293e73c14
|
[
"Apache-2.0"
] | 1
|
2018-04-16T18:37:17.000Z
|
2018-04-16T18:37:17.000Z
|
cloudmesh/pi/led_bar.py
|
cloudmesh/cloudmesh.pi
|
bdf706b3763031341c41b811749064c293e73c14
|
[
"Apache-2.0"
] | 3
|
2017-09-20T11:13:54.000Z
|
2017-11-30T23:48:37.000Z
|
import time
import grovepi
class LedBar:
def __init__(self, pin=3, color = 0):
"""
color = 0 starts counting led 1 from the Red LED end
color = 0 starts counting led 1 from the green LED end
"""
self.ledbar = pin
grovepi.ledBar_init(self.ledbar,color)
def setLevel(self,level = 0):
"""
level = 1-10
level - 5 turns on LEDs 1 to 5
"""
grovepi.ledBar_setLevel(self.ledbar,level)
def setLED(self, led=1, status=0):
"""
led= number of led to set: 1- 10
status 1= on, 0 = off
"""
grovepi.ledBar_setLed(self.ledbar,led,status)
def toggleLED(self, led=0):
"""
Inverts the status of the led
"""
grovepi.ledBar_toggleLed(self.ledbar,led)
if __name__ == '__main__':
ledbar = LedBar()
ledbar.setLevel(5)
time.sleep(0.2)
ledbar.setLED(9,1)
while True:
ledbar.toggleLED(2)
time.sleep(0.2)
| 22.02439
| 55
| 0.61794
|
60daa69403d0d2ec016b501ca295bb328a000dd7
| 4,717
|
py
|
Python
|
netboxapi_client/__main__.py
|
bpetit/netboxapi-client
|
380f0469fd5452997c4a1cf00e0e2dd3f5218edf
|
[
"MIT"
] | 23
|
2017-10-13T12:44:41.000Z
|
2021-04-07T22:27:07.000Z
|
netboxapi_client/__main__.py
|
bpetit/netboxapi-client
|
380f0469fd5452997c4a1cf00e0e2dd3f5218edf
|
[
"MIT"
] | 3
|
2018-02-02T15:14:56.000Z
|
2019-01-10T18:33:50.000Z
|
netboxapi_client/__main__.py
|
bpetit/netboxapi-client
|
380f0469fd5452997c4a1cf00e0e2dd3f5218edf
|
[
"MIT"
] | 6
|
2018-02-01T22:45:08.000Z
|
2019-12-10T21:57:09.000Z
|
#!/usr/bin/python
import argparse
import json
import os, sys
import pprint
from netboxapi_client import Api, create, show, enum, delete, update, patch
import urllib3
def get_configuration(path="{}/netboxapi.json".format(os.getcwd())):
"""get_configuration
Returns a dictionnary containing all elements written as json, in the
configuration file.
:param path:
"""
try:
with open(path) as fd:
return json.load(fd)
except Exception:
print("No configuration file found at {}. Reading environment variables NETBOXAPI_TOKEN and NETBOXAPI_URL.".format(path))
if 'NETBOXAPI_TOKEN' in os.environ and 'NETBOXAPI_URL' in os.environ:
config = { "url": os.environ.get('NETBOXAPI_URL'), "token": os.environ.get('NETBOXAPI_TOKEN') }
return config
else:
print("Configuration not properly defined.")
sys.exit(254)
def main():
parser = argparse.ArgumentParser()
## NOT IMPLEMENTED YET
#parser.add_argument(
# "--url",
# help="URL of the target Netbox instance",
# dest='url'
#)
#parser.add_argument(
# "--token",
# help="Token that should be used for authentication",
# dest='token'
#)
config = get_configuration()
# Disable InsecureRequestWarning
if 'insecure' in config:
if 'True' in config['insecure']:
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
api = Api(
url=config['url'],
token=config['token']
)
subparsers = parser.add_subparsers(dest='model')
# Basic error checks
first_call = api.get('')
if first_call.status_code != 200:
print('{}'.format(first_call.text))
sys.exit(2)
mydict = api.get('').json()
FUNCTION_MAP = {
'show': show,
'list': enum,
'create': create,
'delete': delete,
'update': update,
'patch': patch
}
DESCRIPTION_MAP = {
'show': "Shows an object's data.",
'list': "Lists objects.",
'create': "Creates object.",
'delete': "Deletes object",
'update': "Updates object. It is required to fill all fields you want to be filled for that object.",
'patch': "Updates one or some fields of an object. Unlike update method, you can specify only the fiels you want to update."
}
ARGUMENTS = {
'identifier': {
'option': '-i',
'type': int,
'help': "Numerical id of the object",
'dest': 'id'
},
'data_file': {
'option': '-d',
'type': str,
'help': "Path to the file containing json data.",
'dest': 'data'
},
'name': {
'option': '-n',
'type': str,
'help': "Name of the object.",
'dest': 'name'
},
'configuration_file': {
'option': '-c',
'type': str,
'help': "Path to the json configuration file.",
'dest': 'config'
}
}
for model in mydict.keys():
model_parser = subparsers.add_parser(
model,
help="{0} objects".format(model)
)
model_subparser = model_parser.add_subparsers(dest='object')
for elmt in api.get(model+'/').json():
elmt_parser = model_subparser.add_parser(
elmt,
help="manage {0}".format(elmt)
)
action_subparser = elmt_parser.add_subparsers(dest="action")
for k, v in FUNCTION_MAP.items():
action_parser = action_subparser.add_parser(
k, help="{}".format(DESCRIPTION_MAP[k])
)
for k, arg in ARGUMENTS.items():
action_parser.add_argument(
arg['option'],
type=arg['type'],
help=arg['help'],
dest=arg['dest'],
)
ns = parser.parse_args()
if 'action' in ns:
if 'data' in ns and ns.data:
pprint(
FUNCTION_MAP[ns.action](
api=api,
model=ns.model,
obj=ns.object,
ident=ns.id,
name=ns.name,
data=json.loads(ns.data)
)
)
else:
FUNCTION_MAP[ns.action](
api=api,
model=ns.model,
obj=ns.object,
ident=ns.id,
name=ns.name
)
if __name__ == "__main__":
main()
| 29.48125
| 132
| 0.510282
|
011f26e626261ae0d0f374ebc60108948362473e
| 4,560
|
py
|
Python
|
psaw/download_posts_user.py
|
dbeley/reddit-scraper
|
15c20fc07cac673ff0909f32d3195a21c01aeb53
|
[
"MIT"
] | 8
|
2019-08-24T11:25:13.000Z
|
2022-02-27T18:32:25.000Z
|
psaw/download_posts_user.py
|
dbeley/reddit-scripts
|
15c20fc07cac673ff0909f32d3195a21c01aeb53
|
[
"MIT"
] | 7
|
2020-05-04T04:32:43.000Z
|
2021-06-01T23:45:36.000Z
|
psaw/download_posts_user.py
|
dbeley/reddit-scripts
|
15c20fc07cac673ff0909f32d3195a21c01aeb53
|
[
"MIT"
] | 1
|
2019-06-13T11:30:04.000Z
|
2019-06-13T11:30:04.000Z
|
#!/usr/bin/env python
"""
Download posts from one or several users and export it in xlsx or csv.
"""
from psaw import PushshiftAPI
import argparse
import time
import logging
import pandas as pd
from pathlib import Path
logger = logging.getLogger()
temps_debut = time.time()
COLUMNS = [
"date",
"date_utc",
"author",
# "author_flair_css_class",
# "author_flair_richtext",
# "author_flair_text",
# "author_flair_type",
# "brand_safe",
# "can_mod_post",
# "contest_mode",
"created_utc",
"domain",
"full_link",
# "gilded",
"id",
# "is_crosspostable",
# "is_original_content",
# "is_reddit_media_domain",
# "is_self",
# "is_video",
# "link_flair_background_color",
# "link_flair_css_class",
# "link_flair_richtext",
# "link_flair_template_id",
"link_flair_text",
# "link_flair_text_color",
# "link_flair_type",
# "locked",
# "no_follow",
"num_comments",
# "num_crossposts",
# "over_18",
# "parent_whitelist_status",
"permalink",
# "pinned",
# "post_hint",
# "preview",
# "retrieved_on",
# "rte_mode",
"score",
"selftext",
# "send_replies",
# "spoiler",
# "stickied",
"subreddit",
# "subreddit_id",
"subreddit_subscribers",
# "subreddit_type",
"thumbnail",
# "thumbnail_height",
# "thumbnail_width",
"title",
"url",
# "whitelist_status",
"created",
# "media",
# "media_embed",
# "secure_media",
# "secure_media_embed",
# "approved_at_utc",
# "banned_at_utc",
# "suggested_sort",
# "view_count",
# "author_created_utc",
# "author_fullname",
# "distinguished",
# "author_flair_background_color",
# "author_flair_template_id",
# "author_flair_text_color",
# "author_patreon_flair",
# "gildings",
# "is_meta",
# "is_robot_indexable",
# "media_only",
# "pwls",
# "wls",
# "author_id",
# "all_awardings",
# "allow_live_comments",
# "author_premium",
# "awarders",
# "total_awards_received",
]
def main(args):
api = PushshiftAPI()
folder = "User"
Path(folder).mkdir(parents=True, exist_ok=True)
if args.username:
username = [x.strip() for x in args.username.split(",")]
else:
logger.error("Use -u to set the username")
exit()
for i in username:
try:
df = fetch_posts(api, i)
df["date_utc"] = pd.to_datetime(df["created_utc"], unit="s")
df["date"] = pd.to_datetime(df["created"], unit="s")
df["permalink"] = "https://old.reddit.com" + df["permalink"].astype(str)
df = df[df.columns.intersection(COLUMNS)]
filename = f"{folder}/posts_{int(time.time())}_{i}"
if args.export_format == "xlsx":
writer = pd.ExcelWriter(
f"{filename}.xlsx",
engine="xlsxwriter",
options={"strings_to_urls": False},
)
df.to_excel(writer, sheet_name="Sheet1")
writer.save()
else:
df.to_csv(f"{filename}.csv", index=False, sep="\t")
except Exception as e:
logger.error("Does that user have made any post ? Complete error : %s", e)
logger.info("Runtime : %.2f seconds" % (time.time() - temps_debut))
def fetch_posts(api, username):
res = api.search_submissions(author=username)
df = pd.DataFrame([thing.d_ for thing in res])
return df
def parse_args():
parser = argparse.ArgumentParser(
description="Download all the posts of one or several users"
)
parser.add_argument(
"--debug",
help="Display debugging information",
action="store_const",
dest="loglevel",
const=logging.DEBUG,
default=logging.INFO,
)
parser.add_argument(
"-u",
"--username",
type=str,
help="The users to download posts from (separated by commas)",
required=True,
)
parser.add_argument(
"--export_format",
type=str,
help="Export format (csv or xlsx). Default : csv",
default="csv",
)
args = parser.parse_args()
logging.basicConfig(level=args.loglevel)
return args
if __name__ == "__main__":
main(parse_args())
| 25.909091
| 87
| 0.55307
|
cf438d0f12bdfed02b7ae7b612c40f9070484814
| 140
|
py
|
Python
|
pico/ch_19_digital_input.py
|
simonmonk/raspberrypi_cookbook_ed4
|
dc320dfae4252f70c812af1dd7739d13d09615c1
|
[
"MIT"
] | 7
|
2022-03-19T18:53:39.000Z
|
2022-03-22T13:41:30.000Z
|
pico/ch_19_digital_input.py
|
simonmonk/raspberrypi_cookbook_ed4
|
dc320dfae4252f70c812af1dd7739d13d09615c1
|
[
"MIT"
] | null | null | null |
pico/ch_19_digital_input.py
|
simonmonk/raspberrypi_cookbook_ed4
|
dc320dfae4252f70c812af1dd7739d13d09615c1
|
[
"MIT"
] | null | null | null |
from machine import Pin
from utime import sleep
switch = Pin(16, Pin.IN, Pin.PULL_UP)
while True:
print(switch.value())
sleep(0.1)
| 17.5
| 37
| 0.7
|
76872370a808acd096337adf4b4a9668d69d4641
| 14,298
|
py
|
Python
|
hudson/hudson.py
|
HansGiesen/hudson
|
7642e50279290bf1e4cc930c88eece5ce025b4ed
|
[
"MIT"
] | null | null | null |
hudson/hudson.py
|
HansGiesen/hudson
|
7642e50279290bf1e4cc930c88eece5ce025b4ed
|
[
"MIT"
] | null | null | null |
hudson/hudson.py
|
HansGiesen/hudson
|
7642e50279290bf1e4cc930c88eece5ce025b4ed
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python2
'''
HuDSoN
Created on Jun 17, 2019
@author: Hans Giesen (giesen@seas.upenn.edu)
Copyright 2019 Xilinx, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import argparse
import logging
import os
import numpy as np
import shutil
import signal
import sys
import yaml
tuner_root = os.path.abspath(os.path.join(sys.path[0], '..'))
sys.path.insert(0, os.path.join(tuner_root, 'opentuner'))
sys.path.insert(0, tuner_root)
from measurement.sdsoc_interface import SDSoCInterface
from models.gaussianprocess import GaussianProcess
from models.multifidelitymodels import (FilterModel, MultiFidelityModel,
ScaledSumModel)
from models.randomforest import RandomForest
from opentuner import argparsers
from opentuner.measurement.inputmanager import FixedInputManager
from opentuner.resultsdb.models import HudsonTuningRun, Platform, Test
from opentuner.search.objective import ThresholdAreaMinimizeTime, MinimizeTime
from opentuner.search.technique import register
import opentuner.tuningrunmain
from opentuner.tuningrunmain import TuningRunMain
from search.impactanalysis import ImpactAnalysis
from search.pipelinedbayesopt import PipelinedBayesOpt
from search.singlebuild import SingleBuild
from search.thresbasedbayesopt import ThresBasedBayesOpt
from search.randomsearch import RandomSearch, RandomSearchAvoidErrors
log = logging.getLogger(__name__)
class Hudson(object):
"""
Main class of HuDSoN
Attributes
----------
args : Namespace
Command-line arguments
tuner_cfg : dict
Tuner configuration
"""
def __init__(self):
self.args = None
self.tuner_cfg = None
def main(self):
"""Main function of HuDSoN
"""
signal.signal(signal.SIGUSR2, self.signal_handler)
self.process_args()
self.prepare_output_dir()
self.init_logging()
self.register_techniques()
self.tuner_cfg['tuner_root'] = tuner_root
if self.args.objective == 'MinTime':
objective = MinimizeTime()
else:
resource_types = ['luts', 'regs', 'dsps', 'brams']
constraints = tuple(self.tuner_cfg['platform'][resource_type]
for resource_type in resource_types)
objective = ThresholdAreaMinimizeTime(constraints)
input_manager = FixedInputManager()
interf = SDSoCInterface(self.args, tuner_cfg=self.tuner_cfg,
objective=objective, input_manager=input_manager)
main = TuningRunMain(interf, self.args)
main.fake_commit = False
main.init()
tuning_run = main.tuning_run
tuning_run.seed = self.args.seed
hudson_tuning_run = HudsonTuningRun(args=self.args, cfg=self.tuner_cfg,
tuning_run=tuning_run)
main.session.add(hudson_tuning_run)
main.session.commit()
platform = Platform.get(main.session, self.args.platform)
columns = ['luts', 'regs', 'dsps', 'brams', 'proc_freq', 'axi_bus_width']
for column in columns:
setattr(platform, column, self.tuner_cfg['platform'][column])
main.session.commit()
test = Test.get(main.session, self.args.test, self.args.description)
tuning_run.test = test
tuning_run.platform = platform
main.session.commit()
main.main()
def process_args(self):
"""Process command-line arguments.
"""
parser = argparse.ArgumentParser(description='HuDSoN',
parents=argparsers())
parser.add_argument('cfg_file', type=os.path.abspath,
help='Configuration file')
parser.add_argument('--test', help='Test name')
parser.add_argument('--description', default='', help='Test description')
parser.add_argument('--output-dir', type=os.path.abspath,
help='Output directory')
parser.add_argument('--platform', help='Target platform')
parser.add_argument('--build-host', help='Build host')
parser.add_argument('--seed', type=int, help='Seed')
parser.add_argument('--use-prebuilt-presynth', action='store_true',
help='Use prebuilt presynthesis result')
parser.add_argument('--use-prebuilt-synth', action='store_true',
help='Use prebuilt synthesis result')
parser.add_argument('--use-prebuilt-impl', action='store_true',
help='Use prebuilt implementation result')
parser.add_argument('--original-batching', action='store_true',
help='Do not equalize build times')
parser.add_argument('--explore-offset', type=float, default=0.0,
help='Additive exploration constant')
parser.add_argument('--early-term-thres', type=float, default=1e-8,
help='Early termination threshold')
parser.add_argument('--no-diverse-batches', action='store_true',
help='Disable increasing batch diversity')
parser.add_argument('--relax-bounds', action='store_true',
help='Relax GP kernel bounds')
parser.add_argument('--gp-restarts', type=int, default=2,
help='GP fitting restarts')
parser.add_argument('--objective', default='MinTimeConstrArea',
choices=['MinTime', 'MinTimeConstrArea'],
help='Tuning objective')
parser.add_argument('--no-avoid-errors', action='store_true',
help='Disable error avoidance')
parser.add_argument('--no-corr-clk', action='store_true',
help='No correlation between clock frequencies')
parser.add_argument('--bayes-seed-cnt', type=int, default=8,
help='Seed configurations for Bayesian optimization')
parser.add_argument('--init-div-pen', action='store_true',
help='Enable diversity penalty initially')
parser.add_argument('--opt-log', action='store_true',
help='Optimize log of results.')
parser.add_argument('--no-timeouts', action='store_true',
help='Disable build timeouts.')
parser.add_argument('--no-random-sampling', action='store_true',
help='Disable random sampling.')
parser.add_argument('--adaptive-sampling', action='store_true',
help='Use adaptive sampling.')
parser.add_argument('--no-tool-params', action='store_true',
help='Do not tune tool parameters.')
parser.add_argument('--no-interf-params', action='store_true',
help='Do not tune interface parameters.')
parser.add_argument('--no-csim', action='store_true',
help='Disable C-simulation.')
parser.add_argument('--base-cfg', type=int,
help='Configuration ID for impact analysis')
parser.add_argument('--param-file', help='Configuration file')
parser.add_argument('--use-64-bit-bus', action='store_true',
help='Use a 64-bit data bus.')
parser.add_argument('--no-bugfixes', action='store_true',
help='Disable post-deadline bugfixes.')
args = parser.parse_args()
self.args = args
tuner_cfg = self.load_cfg(args.cfg_file)
self.tuner_cfg = tuner_cfg
tuner_cfg['project_dir'] = os.path.dirname(args.cfg_file)
if args.output_dir:
output_root = args.output_dir
else:
output_root = os.path.join(tuner_cfg['project_dir'], 'output')
tuner_cfg['output_root'] = output_root
if 'log_file' not in tuner_cfg:
tuner_cfg['log_file'] = os.path.join(output_root, 'hudson.log')
if args.platform:
tuner_cfg['platform'] = tuner_cfg['platforms']['types'][args.platform]
else:
tuner_cfg['platform'] = tuner_cfg['platforms']['types']['zcu102']
platform_dir = tuner_cfg['platform']['dir']
if not os.path.isabs(platform_dir):
platform_dir = os.path.join(tuner_root, platform_dir)
tuner_cfg['platform']['dir'] = platform_dir
if not args.database:
if tuner_cfg['database']:
args.database = tuner_cfg['database']
else:
args.database = os.path.join(output_root, 'hudson.db')
if not args.technique:
if 'design_space' in tuner_cfg:
args.technique = ['Bayes']
else:
args.technique = ['MultiFidBayes']
if args.build_host:
queues = ['*@{}'.format(args.build_host)]
tuner_cfg['build_interf']['queues'] = queues
if args.label:
tuner_cfg['job_name'] = args.label
if args.test:
tuner_cfg['job_name'] = '{}_{}'.format(args.test, tuner_cfg['job_name'])
else:
args.test = 'unnamed'
def load_cfg(self, filename):
"""Load the tuner configuration file.
Parameters
----------
filename : str
Configuration filename
Returns
-------
dict
Tuner configuration
"""
with open(filename, 'r') as cfg_file:
data = cfg_file.read()
tuner_cfg = yaml.safe_load(data)
include_file = tuner_cfg.get('include')
if include_file:
if not os.path.isabs(include_file):
include_file = os.path.join(os.path.dirname(filename), include_file)
parent = self.load_cfg(include_file)
parent.update(tuner_cfg)
tuner_cfg = parent
return tuner_cfg
def prepare_output_dir(self):
"""Prepare the output directory.
"""
output_dir = self.tuner_cfg['output_root']
if os.path.isdir(output_dir):
shutil.rmtree(output_dir)
os.makedirs(output_dir)
@staticmethod
def signal_handler(signum, frame):
"""Raise a keyboard interrupt when a USR2 signal is caught.
"""
log.info('Received signal USR2.')
raise KeyboardInterrupt
def init_logging(self):
"""Initialize the logging.
"""
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': False,
'formatters': {'console': {'format': '[%(relativeCreated)6.0fs] '
'%(levelname)7s: '
'%(message)s'},
'file': {'format': '[%(asctime)-15s] '
'%(levelname)7s: '
'%(message)s '
'@%(filename)s:%(lineno)d'}},
'handlers': {'console': {'class': 'logging.StreamHandler',
'formatter': 'console',
'level': 'INFO'},
'file': {'class': 'logging.FileHandler',
'filename': self.tuner_cfg['log_file'],
'formatter': 'file',
'level': 'INFO'}},
'loggers': {'': {'handlers': ['console', 'file'],
'level': 'INFO',
'propagate': True}}})
# Avoid initializing the logging again.
opentuner.tuningrunmain.init_logging = lambda: None
def register_techniques(self):
"""Register search techniques.
"""
register(RandomSearch(name="Random"))
register(RandomSearchAvoidErrors(GaussianProcess(),
name="RandomAvoidErrors"))
register(ImpactAnalysis(name='ImpactAnalysis', base_cfg=self.args.base_cfg))
register(SingleBuild(name='SingleBuild', param_file=self.args.param_file))
gp_params = {'relax_bounds': self.args.relax_bounds,
'restarts': self.args.gp_restarts,
'avoid_errors': not self.args.no_avoid_errors}
if self.args.objective == 'MinTime':
metrics = ['error_prob', 'run_time']
else:
metrics = ['error_prob', 'run_time', 'luts', 'regs', 'dsps', 'brams']
techniques = [('ThresBasedMultiFidBayes', ThresBasedBayesOpt),
('PipelinedMultiFidBayes', PipelinedBayesOpt)]
for name, cls in techniques:
models = []
for metric in metrics:
if metric == 'error_prob':
submodels = [GaussianProcess(binary=True, **gp_params),
FilterModel(GaussianProcess(binary=True, **gp_params)),
FilterModel(GaussianProcess(binary=True, **gp_params))]
else:
submodels = [GaussianProcess(**gp_params),
ScaledSumModel(GaussianProcess(**gp_params)),
ScaledSumModel(GaussianProcess(**gp_params))]
models.append(MultiFidelityModel(metric, submodels))
register(cls(models, name=name, args=self.args))
models = []
for metric in metrics:
if metric == 'error_prob':
submodels = [RandomForest(),
FilterModel(RandomForest()),
FilterModel(RandomForest())]
else:
submodels = [RandomForest(),
ScaledSumModel(RandomForest()),
ScaledSumModel(RandomForest())]
models.append(MultiFidelityModel(metric, submodels))
register(ThresBasedBayesOpt(models, name='RandomForest', args=self.args))
models = []
for metric in metrics:
if metric == 'error_prob':
models.append(GaussianProcess(metric, binary=True, **gp_params))
else:
models.append(GaussianProcess(metric, **gp_params))
register(ThresBasedBayesOpt(models, name="Bayes", args=self.args))
if __name__ == '__main__':
Hudson().main()
| 38.435484
| 80
| 0.634914
|
be69d9f05a9c5a7b416110d70052188d817e8962
| 125
|
py
|
Python
|
ChineseChef.py
|
shyed2001/Python_Programming
|
93ef958e3d8aa77f9191b550972235ce4fe4a6cb
|
[
"bzip2-1.0.6"
] | 2
|
2019-05-01T04:32:14.000Z
|
2019-05-04T11:28:18.000Z
|
ChineseChef.py
|
shyed2001/python-learning-basics
|
93ef958e3d8aa77f9191b550972235ce4fe4a6cb
|
[
"bzip2-1.0.6"
] | null | null | null |
ChineseChef.py
|
shyed2001/python-learning-basics
|
93ef958e3d8aa77f9191b550972235ce4fe4a6cb
|
[
"bzip2-1.0.6"
] | null | null | null |
from Chef import Chef
class ChineseChef(Chef):
def make_Fried_rice(self):
print("The chef makes Fried_rice")
| 25
| 42
| 0.696
|
ba8a2ca27cf007cce547ab126d2ce6a440d074be
| 952
|
py
|
Python
|
dc17/dates.py
|
lamby/dc17.dc.o
|
83cc79ad6f627b8c203fe62b7fcc9ff7abd84ee4
|
[
"0BSD"
] | null | null | null |
dc17/dates.py
|
lamby/dc17.dc.o
|
83cc79ad6f627b8c203fe62b7fcc9ff7abd84ee4
|
[
"0BSD"
] | null | null | null |
dc17/dates.py
|
lamby/dc17.dc.o
|
83cc79ad6f627b8c203fe62b7fcc9ff7abd84ee4
|
[
"0BSD"
] | null | null | null |
import datetime
def meals(orga=False):
day = datetime.date(2017, 7, 31)
if orga:
day = datetime.date(2017, 7, 28)
while day <= datetime.date(2017, 8, 13):
yield ('breakfast', day)
if day < datetime.date(2017, 8, 13):
yield ('lunch', day)
yield ('dinner', day)
day += datetime.timedelta(days=1)
def meal_choices(orga=False):
for meal, date in meals(orga=orga):
date = date.isoformat()
yield ('{}_{}'.format(meal, date),
'{} {}'.format(meal.title(), date))
def nights(orga=False):
day = datetime.date(2016, 7, 31)
if orga:
day = datetime.date(2016, 7, 28)
while day <= datetime.date(2016, 8, 13):
yield day
day += datetime.timedelta(days=1)
def night_choices(orga=False):
for date in nights(orga=orga):
date = date.isoformat()
yield ('night_{}'.format(date), 'Night of {}'.format(date))
| 26.444444
| 67
| 0.565126
|
6d198fd1f6c3cf25bda8b37324395cdda602ba49
| 17,185
|
py
|
Python
|
representation_batch_rl/representation_batch_rl/vpn_pixels.py
|
xxdreck/google-research
|
dac724bc2b9362d65c26747a8754504fe4c615f8
|
[
"Apache-2.0"
] | 23,901
|
2018-10-04T19:48:53.000Z
|
2022-03-31T21:27:42.000Z
|
representation_batch_rl/representation_batch_rl/vpn_pixels.py
|
xxdreck/google-research
|
dac724bc2b9362d65c26747a8754504fe4c615f8
|
[
"Apache-2.0"
] | 891
|
2018-11-10T06:16:13.000Z
|
2022-03-31T10:42:34.000Z
|
representation_batch_rl/representation_batch_rl/vpn_pixels.py
|
admariner/google-research
|
7cee4b22b925581d912e8d993625c180da2a5a4f
|
[
"Apache-2.0"
] | 6,047
|
2018-10-12T06:31:02.000Z
|
2022-03-31T13:59:28.000Z
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Value prediction network implementation.
"""
import typing
from dm_env import specs as dm_env_specs
import numpy as np
import tensorflow as tf
from representation_batch_rl.batch_rl import critic
from representation_batch_rl.batch_rl.encoders import ConvStack
from representation_batch_rl.batch_rl.encoders import ImageEncoder
from representation_batch_rl.batch_rl.encoders import make_impala_cnn_network
from representation_batch_rl.representation_batch_rl import tf_utils
class ValuePredictionNetworkLearner(tf.keras.Model):
"""A learner for model-based representation learning.
Encompasses forward models, inverse models, as well as latent models like
DeepMDP.
"""
def __init__(self,
observation_spec,
action_spec,
embedding_dim = 256,
hidden_dims = (256, 256),
sequence_length = 2,
learning_rate=None,
discount = 0.95,
target_update_period = 1000,
num_augmentations=0,
rep_learn_keywords = 'outer',
batch_size = 256):
"""Creates networks.
Args:
observation_spec: State spec.
action_spec: Action spec.
embedding_dim: Embedding size.
hidden_dims: List of hidden dimensions.
sequence_length: Expected length of sequences provided as input.
learning_rate: Learning rate.
discount: discount factor.
target_update_period: How frequently update target?
num_augmentations: Number of DrQ random crops.
rep_learn_keywords: Representation learning loss to add.
batch_size: batch size.
"""
super().__init__()
action_dim = action_spec.maximum.item()+1
self.observation_spec = observation_spec
self.action_dim = action_dim
self.action_spec = action_spec
self.embedding_dim = embedding_dim
self.sequence_length = sequence_length
self.discount = discount
self.tau = 0.005
self.discount = 0.99
self.target_update_period = target_update_period
self.num_augmentations = num_augmentations
self.rep_learn_keywords = rep_learn_keywords.split('__')
self.batch_size = batch_size
critic_kwargs = {}
if observation_spec.shape == (64, 64, 3):
# IMPALA for Procgen
def conv_stack():
return make_impala_cnn_network(
depths=[16, 32, 32], use_batch_norm=False, dropout_rate=0.)
state_dim = 256
else:
# Reduced architecture for DMC
def conv_stack():
return ConvStack(observation_spec.shape)
state_dim = 50
conv_stack_critic = conv_stack()
conv_target_stack_critic = conv_stack()
if observation_spec.shape == (64, 64, 3):
conv_stack_critic.output_size = state_dim
conv_target_stack_critic.output_size = state_dim
critic_kwargs['encoder'] = ImageEncoder(
conv_stack_critic, feature_dim=state_dim, bprop_conv_stack=True)
critic_kwargs['encoder_target'] = ImageEncoder(
conv_target_stack_critic, feature_dim=state_dim, bprop_conv_stack=True)
self.embedder = tf_utils.EmbedNet(
state_dim,
embedding_dim=self.embedding_dim,
hidden_dims=hidden_dims)
self.f_value = tf_utils.create_mlp(
self.embedding_dim, 1, hidden_dims=hidden_dims,
activation=tf.nn.swish)
self.f_value_target = tf_utils.create_mlp(
self.embedding_dim, 1, hidden_dims=hidden_dims,
activation=tf.nn.swish)
self.f_trans = tf_utils.create_mlp(
self.embedding_dim + self.embedding_dim, self.embedding_dim,
hidden_dims=hidden_dims,
activation=tf.nn.swish)
self.f_out = tf_utils.create_mlp(
self.embedding_dim + self.embedding_dim, 2,
hidden_dims=hidden_dims,
activation=tf.nn.swish)
self.action_encoder = tf.keras.Sequential(
[
tf.keras.layers.Dense(
self.embedding_dim, use_bias=True
), # , kernel_regularizer=tf.keras.regularizers.l2(WEIGHT_DECAY)
tf.keras.layers.ReLU(),
tf.keras.layers.Dense(self.embedding_dim)
],
name='action_encoder')
if self.num_augmentations == 0:
dummy_state = tf.constant(
np.zeros(shape=[1] + list(observation_spec.shape)))
self.obs_spec = list(observation_spec.shape)
else: # account for padding of +4 everywhere and then cropping out 68
dummy_state = tf.constant(np.zeros(shape=[1, 68, 68, 3]))
self.obs_spec = [68, 68, 3]
@tf.function
def init_models():
critic_kwargs['encoder'](dummy_state)
critic_kwargs['encoder_target'](dummy_state)
self.action_encoder(
tf.cast(tf.one_hot([1], depth=action_dim), tf.float32))
init_models()
self.critic = critic.Critic(
state_dim,
action_dim,
hidden_dims=hidden_dims,
encoder=critic_kwargs['encoder'],
discrete_actions=True,
linear='linear_Q' in self.rep_learn_keywords)
self.critic_target = critic.Critic(
state_dim,
action_dim,
hidden_dims=hidden_dims,
encoder=critic_kwargs['encoder_target'],
discrete_actions=True,
linear='linear_Q' in self.rep_learn_keywords)
@tf.function
def init_models2():
dummy_state = tf.zeros((1, 68, 68, 3), dtype=tf.float32)
phi_s = self.critic.encoder(dummy_state)
phi_a = tf.eye(15, dtype=tf.float32)
if 'linear_Q' in self.rep_learn_keywords:
_ = self.critic.critic1.state_encoder(phi_s)
_ = self.critic.critic2.state_encoder(phi_s)
_ = self.critic.critic1.action_encoder(phi_a)
_ = self.critic.critic2.action_encoder(phi_a)
_ = self.critic_target.critic1.state_encoder(phi_s)
_ = self.critic_target.critic2.state_encoder(phi_s)
_ = self.critic_target.critic1.action_encoder(phi_a)
_ = self.critic_target.critic2.action_encoder(phi_a)
init_models2()
critic.soft_update(self.critic, self.critic_target, tau=1.0)
critic.soft_update(self.f_value, self.f_value_target, tau=1.0)
learning_rate = learning_rate or 1e-4
self.optimizer = tf.keras.optimizers.Adam(learning_rate=3e-4)
self.critic_optimizer = tf.keras.optimizers.Adam(learning_rate=1e-3)
self.all_variables = (
self.embedder.trainable_variables + self.f_value.trainable_variables +
self.f_value_target.trainable_variables +
self.f_trans.trainable_variables + self.f_out.trainable_variables +
self.critic.trainable_variables +
self.critic_target.trainable_variables)
self.model_dict = {
'action_encoder': self.action_encoder,
'f_out': self.f_out,
'f_trans': self.f_trans,
'f_value_target': self.f_value_target,
'f_value': self.f_value,
'embedder': self.embedder,
'critic': self.critic,
'critic_target': self.critic_target,
'critic_optimizer': self.critic_optimizer,
'optimizer': self.optimizer
}
@tf.function
def call(self,
states,
actions=None,
stop_gradient = True):
"""Returns embedding.
Args:
states: A batch of states.
actions: batch of actions
stop_gradient: Whether to stop_gradient.
Returns:
Embedding.
"""
features = self.critic.encoder(states)
return self.embedder(features, stop_gradient=stop_gradient)
def compute_energy(self, embeddings,
other_embeddings):
"""Computes matrix of energies between every pair of (embedding, other_embedding)."""
transformed_embeddings = tf.matmul(embeddings, self.weight)
energies = tf.matmul(
transformed_embeddings, other_embeddings, transpose_b=True)
return energies
def fit_embedding(self, states, actions,
next_states, next_actions,
rewards,
discounts):
"""Updates critic parameters.
Args:
states: Batch of states.
actions: Batch of actions.
next_states: Batch of next states.
next_actions: batch of next actions
rewards: Batch of rewards.
discounts: Batch of masks indicating the end of the episodes.
Returns:
Dictionary with information to track.
"""
states = tf.transpose(
tf.stack([states, next_states])[:, 0], (1, 0, 2, 3, 4))
batch_size = tf.shape(states)[0]
with tf.GradientTape(watch_accessed_variables=False) as tape:
tape.watch(self.all_variables)
actions = tf.transpose(
tf.one_hot(tf.stack([actions, next_actions]), depth=self.action_dim),
(1, 0, 2))
actions = tf.reshape(actions,
[batch_size * self.sequence_length, self.action_dim])
actions = self.action_encoder(actions)
actions = tf.reshape(
actions, [batch_size, self.sequence_length, self.embedding_dim])
all_states = tf.reshape(states, [batch_size * self.sequence_length] +
self.obs_spec)
all_features = self.critic.encoder(all_states)
all_embeddings = self.embedder(all_features, stop_gradient=False)
embeddings = tf.reshape(
all_embeddings,
[batch_size, self.sequence_length, self.embedding_dim])[:, 0, :]
all_pred_values = []
all_pred_rewards = []
all_pred_discounts = []
for idx in range(self.sequence_length):
pred_value = self.f_value(embeddings)[Ellipsis, 0]
pred_reward, pred_discount = tf.unstack(
self.f_out(tf.concat([embeddings, actions[:, idx, :]], -1)),
axis=-1)
pred_embeddings = embeddings + self.f_trans(
tf.concat([embeddings, actions[:, idx, :]], -1))
all_pred_values.append(pred_value)
all_pred_rewards.append(pred_reward)
all_pred_discounts.append(pred_discount)
embeddings = pred_embeddings
last_value = tf.stop_gradient(
self.f_value_target(embeddings)[Ellipsis, 0]) / (1 - self.discount)
all_true_values = []
# for idx in range(self.sequence_length - 1, -1, -1):
value = self.discount * discounts * last_value + rewards #[:, idx]
all_true_values.append(value)
last_value = value
all_true_values = all_true_values[::-1]
reward_error = tf.stack(all_pred_rewards, -1)[:, 0] - rewards
value_error = tf.stack(
all_pred_values,
-1) - (1 - self.discount) * tf.stack(all_true_values, -1)
reward_loss = tf.reduce_sum(tf.math.square(reward_error), -1)
value_loss = tf.reduce_sum(tf.math.square(value_error), -1)
loss = tf.reduce_mean(reward_loss + value_loss)
grads = tape.gradient(loss, self.all_variables)
self.optimizer.apply_gradients(
zip(grads, self.all_variables))
if self.optimizer.iterations % self.target_update_period == 0:
critic.soft_update(self.f_value, self.f_value_target, tau=self.tau)
return {
'embed_loss': loss,
'reward_loss': tf.reduce_mean(reward_loss),
'value_loss': tf.reduce_mean(value_loss),
}
def fit_critic(self, states, actions,
next_states, next_actions,
rewards,
discounts):
"""Updates critic parameters.
Args:
states: Batch of states.
actions: Batch of actions.
next_states: Batch of next states.
next_actions: Batch of next actions from training policy.
rewards: Batch of rewards.
discounts: Batch of masks indicating the end of the episodes.
Returns:
Dictionary with information to track.
"""
action_indices = tf.stack(
[tf.range(tf.shape(actions)[0], dtype=tf.int64), actions], axis=-1)
next_action_indices = tf.stack(
[tf.range(tf.shape(next_actions)[0], dtype=tf.int64), next_actions],
axis=-1)
if self.num_augmentations > 0:
target_q = 0.
for i in range(self.num_augmentations):
next_q1_i, next_q2_i = self.critic_target(next_states[i], actions=None)
target_q_i = tf.expand_dims(
rewards, 1) + self.discount * tf.expand_dims(
discounts, 1) * tf.minimum(next_q1_i, next_q2_i)
target_q += target_q_i
target_q /= self.num_augmentations
else:
next_q1, next_q2 = self.critic_target(next_states, actions=None)
target_q = tf.expand_dims(rewards, 1) + self.discount * tf.expand_dims(
discounts, 1) * tf.minimum(next_q1, next_q2)
target_q = tf.gather_nd(target_q, indices=next_action_indices)
with tf.GradientTape(watch_accessed_variables=False) as tape:
tape.watch(self.critic.trainable_variables)
if self.num_augmentations > 0:
critic_loss = 0.
q1 = 0.
q2 = 0.
for i in range(self.num_augmentations):
q1_i, q2_i = self.critic(
states[i], stop_grad_features=True, actions=None)
critic_loss_i = (
tf.losses.mean_squared_error(
target_q, tf.gather_nd(q1_i, indices=action_indices)) +
tf.losses.mean_squared_error(
target_q, tf.gather_nd(q2_i, indices=action_indices)))
q1 += q1_i
q2 += q2_i
critic_loss += critic_loss_i
q1 /= self.num_augmentations
q2 /= self.num_augmentations
critic_loss /= self.num_augmentations
else:
q1, q2 = self.critic(states, stop_grad_features=True, actions=None)
critic_loss = (
tf.losses.mean_squared_error(
target_q, tf.gather_nd(q1, indices=action_indices)) +
tf.losses.mean_squared_error(
target_q, tf.gather_nd(q2, indices=action_indices)))
critic_grads = tape.gradient(critic_loss, self.critic.trainable_variables)
self.critic_optimizer.apply_gradients(
zip(critic_grads, self.critic.trainable_variables))
critic.soft_update(self.critic, self.critic_target, tau=self.tau)
return {
'q1': tf.reduce_mean(q1),
'q2': tf.reduce_mean(q2),
'critic_loss': critic_loss
}
@tf.function
def update_step(self,
replay_buffer_iter,
train_target='both'):
transition = next(replay_buffer_iter)
numpy_dataset = isinstance(replay_buffer_iter, np.ndarray)
if not numpy_dataset:
states = transition.observation[:, 0]
next_states = transition.observation[:, 1]
actions = transition.action[:, 0]
rewards = transition.reward[:, 0]
discounts = transition.discount[:, 0]
if transition.observation.dtype == tf.uint8:
states = tf.cast(states, tf.float32) / 255.
next_states = tf.cast(next_states, tf.float32) / 255.
else:
states, actions, rewards, next_states, discounts = transition
if self.num_augmentations > 0:
states, next_states = tf_utils.image_aug(
states,
next_states,
img_pad=4,
num_augmentations=self.num_augmentations,
obs_dim=64,
channels=3,
cropped_shape=[self.batch_size, 68, 68, 3])
next_actions = self.act(next_states, data_aug=True)
if train_target == 'both':
ssl_dict = self.fit_embedding(states, actions, next_states, next_actions,
rewards, discounts)
critic_dict = self.fit_critic(states, actions, next_states, next_actions,
rewards, discounts)
elif train_target == 'encoder':
ssl_dict = self.fit_embedding(states, actions, next_states, next_actions,
rewards, discounts)
critic_dict = {}
elif train_target == 'rl':
ssl_dict = {}
critic_dict = self.fit_critic(states, actions, next_states, next_actions,
rewards, discounts)
return {**ssl_dict, **critic_dict}
def get_input_state_dim(self):
return self.embedder.embedding_dim
@tf.function
def act(self, states, data_aug=False):
if data_aug and self.num_augmentations > 0:
states = states[0]
if self.num_augmentations > 0:
# use pad of 2 to bump 64 to 68 with 2 + 64 + 2 on each side
img_pad = 2
paddings = tf.constant(
[[0, 0], [img_pad, img_pad], [img_pad, img_pad], [0, 0]],
dtype=tf.int32)
states = tf.cast(
tf.pad(tf.cast(states * 255., tf.int32), paddings, 'SYMMETRIC'),
tf.float32) / 255.
q1, q2 = self.critic(states, stop_grad_features=True, actions=None)
q = tf.minimum(q1, q2)
actions = tf.argmax(q, -1)
return actions
| 35.951883
| 89
| 0.654001
|
3fefd0a4b142e905b89e34411e51075216e4a6ec
| 23,778
|
py
|
Python
|
planemo/galaxy/activity.py
|
martin-raden/planemo
|
e14fc36550934efa9dab42efc1a0db04dd138294
|
[
"CC-BY-3.0"
] | null | null | null |
planemo/galaxy/activity.py
|
martin-raden/planemo
|
e14fc36550934efa9dab42efc1a0db04dd138294
|
[
"CC-BY-3.0"
] | null | null | null |
planemo/galaxy/activity.py
|
martin-raden/planemo
|
e14fc36550934efa9dab42efc1a0db04dd138294
|
[
"CC-BY-3.0"
] | null | null | null |
"""Module provides generic interface to running Galaxy tools and workflows."""
import json
import os
import tempfile
import time
import bioblend
import requests
import yaml
from bioblend.galaxy.client import Client
from bioblend.util import attach_file
from galaxy.tools.cwl.util import (
DirectoryUploadTarget,
FileUploadTarget,
galactic_job_json,
invocation_to_output,
output_properties,
output_to_cwl_json,
path_or_uri_to_uri,
tool_response_to_output,
)
from galaxy.tools.parser import get_tool_source
from galaxy.util import (
safe_makedirs,
unicodify,
)
from planemo.galaxy.api import summarize_history
from planemo.io import wait_on
from planemo.runnable import (
ErrorRunResponse,
get_outputs,
RunnableType,
SuccessfulRunResponse,
)
DEFAULT_HISTORY_NAME = "CWL Target History"
ERR_NO_SUCH_TOOL = ("Failed to find tool with ID [%s] in Galaxy - cannot execute job. "
"You may need to enable verbose logging and determine why the tool did not load. [%s]")
def execute(ctx, config, runnable, job_path, **kwds):
"""Execute a Galaxy activity."""
try:
return _execute(ctx, config, runnable, job_path, **kwds)
except Exception as e:
return ErrorRunResponse(unicodify(e))
def _verified_tool_id(runnable, user_gi):
tool_id = _tool_id(runnable.path)
try:
user_gi.tools.show_tool(tool_id)
except Exception as e:
raise Exception(ERR_NO_SUCH_TOOL % (tool_id, e))
return tool_id
def _inputs_representation(runnable):
if runnable.type == RunnableType.cwl_tool:
inputs_representation = "cwl"
else:
inputs_representation = "galaxy"
return inputs_representation
def log_contents_str(config):
if hasattr(config, "log_contents"):
return config.log_contents
else:
return "No log for this engine type."
def _execute(ctx, config, runnable, job_path, **kwds):
user_gi = config.user_gi
admin_gi = config.gi
history_id = _history_id(user_gi, **kwds)
job_dict, _ = stage_in(ctx, runnable, config, user_gi, history_id, job_path, **kwds)
if runnable.type in [RunnableType.galaxy_tool, RunnableType.cwl_tool]:
response_class = GalaxyToolRunResponse
tool_id = _verified_tool_id(runnable, user_gi)
inputs_representation = _inputs_representation(runnable)
run_tool_payload = dict(
history_id=history_id,
tool_id=tool_id,
inputs=job_dict,
inputs_representation=inputs_representation,
)
ctx.vlog("Post to Galaxy tool API with payload [%s]" % run_tool_payload)
tool_run_response = user_gi.tools._post(run_tool_payload)
job = tool_run_response["jobs"][0]
job_id = job["id"]
try:
final_state = _wait_for_job(user_gi, job_id)
except Exception:
summarize_history(ctx, user_gi, history_id)
raise
if final_state != "ok":
msg = "Failed to run CWL tool job final job state is [%s]." % final_state
summarize_history(ctx, user_gi, history_id)
with open("errored_galaxy.log", "w") as f:
f.write(log_contents_str(config))
raise Exception(msg)
ctx.vlog("Final job state was ok, fetching details for job [%s]" % job_id)
job_info = admin_gi.jobs.show_job(job_id)
response_kwds = {
'job_info': job_info,
'api_run_response': tool_run_response,
}
if ctx.verbose:
summarize_history(ctx, user_gi, history_id)
elif runnable.type in [RunnableType.galaxy_workflow, RunnableType.cwl_workflow]:
response_class = GalaxyWorkflowRunResponse
workflow_id = config.workflow_id(runnable.path)
ctx.vlog("Found Galaxy workflow ID [%s] for path [%s]" % (workflow_id, runnable.path))
# TODO: update bioblend to allow inputs_by.
# invocation = user_gi.worklfows.invoke_workflow(
# workflow_id,
# history_id=history_id,
# inputs=job_dict,
# )
payload = dict(
workflow_id=workflow_id,
history_id=history_id,
inputs=job_dict,
inputs_by="name",
allow_tool_state_corrections=True,
)
invocations_url = "%s/%s/invocations" % (
user_gi._make_url(user_gi.workflows),
workflow_id,
)
invocation = Client._post(user_gi.workflows, payload, url=invocations_url)
invocation_id = invocation["id"]
ctx.vlog("Waiting for invocation [%s]" % invocation_id)
polling_backoff = kwds.get("polling_backoff", 0)
try:
final_invocation_state = _wait_for_invocation(ctx, user_gi, history_id, workflow_id, invocation_id, polling_backoff)
except Exception:
ctx.vlog("Problem waiting on invocation...")
summarize_history(ctx, user_gi, history_id)
raise
ctx.vlog("Final invocation state is [%s]" % final_invocation_state)
final_state = _wait_for_history(ctx, user_gi, history_id, polling_backoff)
if final_state != "ok":
msg = "Failed to run workflow final history state is [%s]." % final_state
summarize_history(ctx, user_gi, history_id)
with open("errored_galaxy.log", "w") as f:
f.write(log_contents_str(config))
raise Exception(msg)
ctx.vlog("Final history state is 'ok'")
response_kwds = {
'workflow_id': workflow_id,
'invocation_id': invocation_id,
}
else:
raise NotImplementedError()
run_response = response_class(
ctx=ctx,
runnable=runnable,
user_gi=user_gi,
history_id=history_id,
log=log_contents_str(config),
**response_kwds
)
output_directory = kwds.get("output_directory", None)
ctx.vlog("collecting outputs from run...")
run_response.collect_outputs(ctx, output_directory)
ctx.vlog("collecting outputs complete")
return run_response
def stage_in(ctx, runnable, config, user_gi, history_id, job_path, **kwds):
files_attached = [False]
def upload_func(upload_target):
def _attach_file(upload_payload, uri, index=0):
uri = path_or_uri_to_uri(uri)
is_path = uri.startswith("file://")
if not is_path or config.use_path_paste:
upload_payload["inputs"]["files_%d|url_paste" % index] = uri
else:
files_attached[0] = True
path = uri[len("file://"):]
upload_payload["files_%d|file_data" % index] = attach_file(path)
if isinstance(upload_target, FileUploadTarget):
file_path = upload_target.path
upload_payload = user_gi.tools._upload_payload(
history_id,
file_type=upload_target.properties.get('filetype', None) or "auto",
)
name = os.path.basename(file_path)
upload_payload["inputs"]["files_0|auto_decompress"] = False
upload_payload["inputs"]["auto_decompress"] = False
_attach_file(upload_payload, file_path)
upload_payload["inputs"]["files_0|NAME"] = name
if upload_target.secondary_files:
_attach_file(upload_payload, upload_target.secondary_files, index=1)
upload_payload["inputs"]["files_1|type"] = "upload_dataset"
upload_payload["inputs"]["files_1|auto_decompress"] = True
upload_payload["inputs"]["file_count"] = "2"
upload_payload["inputs"]["force_composite"] = "True"
ctx.vlog("upload_payload is %s" % upload_payload)
return user_gi.tools._post(upload_payload, files_attached=files_attached[0])
elif isinstance(upload_target, DirectoryUploadTarget):
tar_path = upload_target.tar_path
upload_payload = user_gi.tools._upload_payload(
history_id,
file_type="tar",
)
upload_payload["inputs"]["files_0|auto_decompress"] = False
_attach_file(upload_payload, tar_path)
tar_upload_response = user_gi.tools._post(upload_payload, files_attached=files_attached[0])
convert_response = user_gi.tools.run_tool(
tool_id="CONVERTER_tar_to_directory",
tool_inputs={"input1": {"src": "hda", "id": tar_upload_response["outputs"][0]["id"]}},
history_id=history_id,
)
assert "outputs" in convert_response, convert_response
return convert_response
else:
content = json.dumps(upload_target.object)
return user_gi.tools.paste_content(
content,
history_id,
file_type="expression.json",
)
def create_collection_func(element_identifiers, collection_type):
payload = {
"name": "dataset collection",
"instance_type": "history",
"history_id": history_id,
"element_identifiers": element_identifiers,
"collection_type": collection_type,
"fields": None if collection_type != "record" else "auto",
}
dataset_collections_url = user_gi.url + "/dataset_collections"
dataset_collection = Client._post(user_gi.histories, payload, url=dataset_collections_url)
return dataset_collection
with open(job_path, "r") as f:
job = yaml.safe_load(f)
# Figure out what "." should be here instead.
job_dir = os.path.dirname(job_path)
job_dict, datasets = galactic_job_json(
job,
job_dir,
upload_func,
create_collection_func,
tool_or_workflow="tool" if runnable.type in [RunnableType.cwl_tool, RunnableType.galaxy_tool] else "workflow",
)
if datasets:
final_state = _wait_for_history(ctx, user_gi, history_id)
for (dataset, path) in datasets:
dataset_details = user_gi.histories.show_dataset(
history_id,
dataset["id"],
)
ctx.vlog("Uploaded dataset for path [%s] with metadata [%s]" % (path, dataset_details))
else:
# Mark uploads as ok because nothing to do.
final_state = "ok"
ctx.vlog("final state is %s" % final_state)
if final_state != "ok":
msg = "Failed to run job final job state is [%s]." % final_state
summarize_history(ctx, user_gi, history_id)
with open("errored_galaxy.log", "w") as f:
f.write(log_contents_str(config))
raise Exception(msg)
return job_dict, datasets
class GalaxyBaseRunResponse(SuccessfulRunResponse):
def __init__(
self,
ctx,
runnable,
user_gi,
history_id,
log,
):
self._ctx = ctx
self._runnable = runnable
self._user_gi = user_gi
self._history_id = history_id
self._log = log
self._job_info = None
self._outputs_dict = None
def to_galaxy_output(self, output):
"""Convert runnable output to a GalaxyOutput object.
Subclasses for workflow and tool execution override this.
"""
raise NotImplementedError()
def _get_extra_files(self, dataset_details):
extra_files_url = "%s/%s/contents/%s/extra_files" % (
self._user_gi._make_url(self._user_gi.histories), self._history_id, dataset_details["id"]
)
extra_files = Client._get(self._user_gi.jobs, url=extra_files_url)
return extra_files
def _get_metadata(self, history_content_type, content_id):
if history_content_type == "dataset":
return self._user_gi.histories.show_dataset(
self._history_id,
content_id,
)
elif history_content_type == "dataset_collection":
return self._user_gi.histories.show_dataset_collection(
self._history_id,
content_id,
)
else:
raise Exception("Unknown history content type encountered [%s]" % history_content_type)
def collect_outputs(self, ctx, output_directory):
assert self._outputs_dict is None, "collect_outputs pre-condition violated"
outputs_dict = {}
if not output_directory:
# TODO: rather than creating a directory just use
# Galaxy paths if they are available in this
# configuration.
output_directory = tempfile.mkdtemp()
def get_dataset(dataset_details, filename=None):
parent_basename = dataset_details.get("cwl_file_name")
if not parent_basename:
parent_basename = dataset_details.get("name")
file_ext = dataset_details["file_ext"]
if file_ext == "directory":
# TODO: rename output_directory to outputs_directory because we can have output directories
# and this is confusing...
the_output_directory = os.path.join(output_directory, parent_basename)
safe_makedirs(the_output_directory)
destination = self.download_output_to(dataset_details, the_output_directory, filename=filename)
else:
destination = self.download_output_to(dataset_details, output_directory, filename=filename)
if filename is None:
basename = parent_basename
else:
basename = os.path.basename(filename)
return {"path": destination, "basename": basename}
ctx.vlog("collecting outputs to directory %s" % output_directory)
for runnable_output in get_outputs(self._runnable):
output_id = runnable_output.get_id()
if not output_id:
ctx.vlog("Workflow output identified without an ID (label), skipping")
continue
output_dict_value = None
if self._runnable.type in [RunnableType.cwl_workflow, RunnableType.cwl_tool]:
galaxy_output = self.to_galaxy_output(runnable_output)
cwl_output = output_to_cwl_json(
galaxy_output,
self._get_metadata,
get_dataset,
self._get_extra_files,
pseduo_location=True,
)
output_dict_value = cwl_output
else:
# TODO: deprecate this route for finding workflow outputs,
# it is a brittle and bad approach...
output_dataset_id = self.output_dataset_id(runnable_output)
dataset = self._get_metadata("dataset", output_dataset_id)
dataset_dict = get_dataset(dataset)
ctx.vlog("populated destination [%s]" % dataset_dict["path"])
if dataset["file_ext"] == "expression.json":
with open(dataset_dict["path"], "r") as f:
output_dict_value = json.load(f)
else:
output_dict_value = output_properties(**dataset_dict)
outputs_dict[output_id] = output_dict_value
self._outputs_dict = outputs_dict
ctx.vlog("collected outputs [%s]" % self._outputs_dict)
@property
def log(self):
return self._log
@property
def job_info(self):
if self._job_info is not None:
return dict(
stdout=self._job_info["stdout"],
stderr=self._job_info["stderr"],
command_line=self._job_info["command_line"],
)
return None
@property
def outputs_dict(self):
return self._outputs_dict
def download_output_to(self, dataset_details, output_directory, filename=None):
if filename is None:
local_filename = dataset_details.get("cwl_file_name") or dataset_details.get("name")
else:
local_filename = filename
destination = os.path.join(output_directory, local_filename)
self._history_content_download(
self._history_id,
dataset_details["id"],
to_path=destination,
filename=filename,
)
return destination
def _history_content_download(self, history_id, dataset_id, to_path, filename=None):
user_gi = self._user_gi
url = user_gi.url + "/histories/%s/contents/%s/display" % (history_id, dataset_id)
data = {}
if filename:
data["filename"] = filename
r = requests.get(url, params=data, verify=user_gi.verify, stream=True, timeout=user_gi.timeout)
r.raise_for_status()
with open(to_path, 'wb') as fp:
for chunk in r.iter_content(chunk_size=bioblend.CHUNK_SIZE):
if chunk:
fp.write(chunk)
class GalaxyToolRunResponse(GalaxyBaseRunResponse):
def __init__(
self,
ctx,
runnable,
user_gi,
history_id,
log,
job_info,
api_run_response,
):
super(GalaxyToolRunResponse, self).__init__(
ctx=ctx,
runnable=runnable,
user_gi=user_gi,
history_id=history_id,
log=log,
)
self._job_info = job_info
self.api_run_response = api_run_response
def is_collection(self, output):
# TODO: Make this more rigorous - search both output and output
# collections - throw an exception if not found in either place instead
# of just assuming all non-datasets are collections.
return self.output_dataset_id(output) is None
def to_galaxy_output(self, runnable_output):
output_id = runnable_output.get_id()
return tool_response_to_output(self.api_run_response, self._history_id, output_id)
def output_dataset_id(self, output):
outputs = self.api_run_response["outputs"]
output_id = output.get_id()
output_dataset_id = None
self._ctx.vlog("Looking for id [%s] in outputs [%s]" % (output_id, outputs))
for output in outputs:
if output["output_name"] == output_id:
output_dataset_id = output["id"]
return output_dataset_id
class GalaxyWorkflowRunResponse(GalaxyBaseRunResponse):
def __init__(
self,
ctx,
runnable,
user_gi,
history_id,
log,
workflow_id,
invocation_id,
):
super(GalaxyWorkflowRunResponse, self).__init__(
ctx=ctx,
runnable=runnable,
user_gi=user_gi,
history_id=history_id,
log=log,
)
self._workflow_id = workflow_id
self._invocation_id = invocation_id
def to_galaxy_output(self, runnable_output):
output_id = runnable_output.get_id()
self._ctx.vlog("checking for output in invocation [%s]" % self._invocation)
return invocation_to_output(self._invocation, self._history_id, output_id)
def output_dataset_id(self, output):
invocation = self._invocation
if "outputs" in invocation:
# Use newer workflow outputs API.
output_name = output.get_id()
if output_name in invocation["outputs"]:
return invocation["outputs"][output.get_id()]["id"]
else:
raise Exception("Failed to find output [%s] in invocation outputs [%s]" % (output_name, invocation["outputs"]))
else:
# Assume the output knows its order_index and such - older line of
# development not worth persuing.
workflow_output = output.workflow_output
order_index = workflow_output.order_index
invocation_steps = invocation["steps"]
output_steps = [s for s in invocation_steps if s["order_index"] == order_index]
assert len(output_steps) == 1, "More than one step matching outputs, behavior undefined."
output_step = output_steps[0]
job_id = output_step["job_id"]
assert job_id, "Output doesn't define a job_id, behavior undefined."
job_info = self._user_gi.jobs.show_job(job_id, full_details=True)
job_outputs = job_info["outputs"]
output_name = workflow_output.output_name
assert output_name in job_outputs, "No output [%s] found for output job."
job_output = job_outputs[output_name]
assert "id" in job_output, "Job output [%s] does not contain 'id'." % job_output
return job_output["id"]
@property
def _invocation(self):
invocation = self._user_gi.workflows.show_invocation(
self._workflow_id,
self._invocation_id,
)
return invocation
def _tool_id(tool_path):
tool_source = get_tool_source(tool_path)
return tool_source.parse_id()
def _history_id(gi, **kwds):
history_id = kwds.get("history_id", None)
if history_id is None:
history_name = kwds.get("history_name", DEFAULT_HISTORY_NAME)
history_id = gi.histories.create_history(history_name)["id"]
return history_id
def _wait_for_invocation(ctx, gi, history_id, workflow_id, invocation_id, polling_backoff=0):
def state_func():
if _retry_on_timeouts(ctx, gi, lambda gi: has_jobs_in_states(gi, history_id, ["error", "deleted", "deleted_new"])):
raise Exception("Problem running workflow, one or more jobs failed.")
return _retry_on_timeouts(ctx, gi, lambda gi: gi.workflows.show_invocation(workflow_id, invocation_id))
return _wait_on_state(state_func, polling_backoff)
def _retry_on_timeouts(ctx, gi, f):
gi.timeout = 60
try_count = 5
try:
for try_num in range(try_count):
start_time = time.time()
try:
return f(gi)
except Exception:
end_time = time.time()
if end_time - start_time > 45 and (try_num + 1) < try_count:
ctx.vlog("Galaxy seems to have timedout, retrying to fetch status.")
continue
else:
raise
finally:
gi.timeout = None
def has_jobs_in_states(gi, history_id, states):
params = {"history_id": history_id}
jobs_url = gi._make_url(gi.jobs)
jobs = Client._get(gi.jobs, params=params, url=jobs_url)
target_jobs = [j for j in jobs if j["state"] in states]
return len(target_jobs) > 0
def _wait_for_history(ctx, gi, history_id, polling_backoff=0):
def has_active_jobs(gi):
if has_jobs_in_states(gi, history_id, ["new", "upload", "waiting", "queued", "running"]):
return True
else:
return None
timeout = 60 * 60 * 24
wait_on(lambda: _retry_on_timeouts(ctx, gi, has_active_jobs), "active jobs", timeout, polling_backoff)
def state_func():
return _retry_on_timeouts(ctx, gi, lambda gi: gi.histories.show_history(history_id))
return _wait_on_state(state_func, polling_backoff)
def _wait_for_job(gi, job_id):
def state_func():
return gi.jobs.show_job(job_id, full_details=True)
return _wait_on_state(state_func)
def _wait_on_state(state_func, polling_backoff=0):
def get_state():
response = state_func()
state = response["state"]
if str(state) not in ["running", "queued", "new", "ready"]:
return state
else:
return None
timeout = 60 * 60 * 24
final_state = wait_on(get_state, "state", timeout, polling_backoff)
return final_state
__all__ = (
"execute",
)
| 36.191781
| 128
| 0.624989
|
cd428d944b5bd9280b5ba81f4c37e183df917776
| 908
|
py
|
Python
|
megatron/model/__init__.py
|
igor0/gpt-neox
|
3ad61952c290669d3741c01f767d41fdee5215c5
|
[
"Apache-2.0"
] | 1,871
|
2020-12-22T14:44:29.000Z
|
2022-03-31T14:21:40.000Z
|
megatron/model/__init__.py
|
igor0/gpt-neox
|
3ad61952c290669d3741c01f767d41fdee5215c5
|
[
"Apache-2.0"
] | 300
|
2020-12-23T17:51:43.000Z
|
2022-03-30T17:34:42.000Z
|
megatron/model/__init__.py
|
igor0/gpt-neox
|
3ad61952c290669d3741c01f767d41fdee5215c5
|
[
"Apache-2.0"
] | 235
|
2020-12-23T19:45:19.000Z
|
2022-03-31T20:33:47.000Z
|
# coding=utf-8
#
# Copyright 2021 Biderman et al. This file is based on code by the authors denoted below and has been modified from its original version.
#
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .gpt2_model import GPT2ModelPipe
from .utils import get_params_for_weight_decay_optimization
from .word_embeddings import SoftEmbedding
| 43.238095
| 137
| 0.781938
|
4c4bbe43198de20a5336118ceeafa1035ed1e252
| 36,135
|
py
|
Python
|
inversionson/components/project.py
|
xiaolongma/Inversionson
|
d8a065d862bd554b073d2a72767a359635163ba1
|
[
"MIT"
] | 2
|
2021-08-28T14:08:23.000Z
|
2021-08-30T00:35:53.000Z
|
inversionson/components/project.py
|
xiaolongma/Inversionson
|
d8a065d862bd554b073d2a72767a359635163ba1
|
[
"MIT"
] | null | null | null |
inversionson/components/project.py
|
xiaolongma/Inversionson
|
d8a065d862bd554b073d2a72767a359635163ba1
|
[
"MIT"
] | null | null | null |
"""
A class which includes information regarding inversion
and sets up all the components that are needed inside
the inversion itself.
"""
# from __future__ import absolute_import
import os
import toml
import pprint
import shutil
from inversionson import InversionsonError, InversionsonWarning
import warnings
from .communicator import Communicator
from .component import Component
from .lasif_comp import LasifComponent
from .multimesh_comp import MultiMeshComponent
from .flow_comp import SalvusFlowComponent
from .mesh_comp import SalvusMeshComponent
from .opt_comp import SalvusOptComponent
from .storyteller import StoryTellerComponent
from .batch_comp import BatchComponent
from .smooth_comp import SalvusSmoothComponent
class ProjectComponent(Component):
def __init__(self, information_dict: dict):
"""
Initiate everything to make it work correctly. Make sure that
a running inversion can be restarted although this is done.
"""
self.info = information_dict
self.__comm = Communicator()
super(ProjectComponent, self).__init__(self.__comm, "project")
self.simulation_dict = self._read_config_file()
self.get_inversion_attributes(first=True)
self.__setup_components()
self.get_inversion_attributes(first=False)
self._validate_inversion_project()
self.remote_gradient_processing = True
def _read_config_file(self) -> dict:
"""
Parse the Lasif config file to use it in the inversion.
I might set this up to just be some parameters in the class
:return: Simulation dictionary
:rtype: dict
"""
with open(
os.path.join(self.info["lasif_root"], "lasif_config.toml"), "r"
) as fh:
config_dict = toml.load(fh)
simulation_info = {}
solver_settings = config_dict["simulation_settings"]
simulation_info["start_time"] = solver_settings["start_time_in_s"]
simulation_info["number_of_time_steps"] = int(
round(
(
solver_settings["end_time_in_s"]
- simulation_info["start_time"]
)
/ solver_settings["time_step_in_s"]
)
)
simulation_info["end_time"] = solver_settings["end_time_in_s"]
simulation_info["time_step"] = solver_settings["time_step_in_s"]
simulation_info["min_period"] = solver_settings["minimum_period_in_s"]
simulation_info["max_period"] = solver_settings["maximum_period_in_s"]
simulation_info["attenuation"] = config_dict["salvus_settings"][
"attenuation"
]
simulation_info["ocean_loading"] = config_dict["salvus_settings"][
"ocean_loading"
]
simulation_info["absorbing_boundaries_length"] = config_dict[
"salvus_settings"
]["absorbing_boundaries_in_km"]
simulation_info["domain_file"] = config_dict["lasif_project"][
"domain_settings"
]["domain_file"]
return simulation_info
def get_communicator(self):
return self.__comm
def _validate_inversion_project(self):
"""
Make sure everything is correctly set up in order to perform inversion.
:param info_dict: Information needed
:type info_dict: dict
:param simulation_dict: Information regarding simulations
:type simulation_dict: dict
"""
import pathlib
allowed_interp_modes = ["gll_2_gll"]
if "inversion_id" not in self.info.keys():
raise ValueError("The inversion needs a name, Key: inversion_id")
if "inversion_path" not in self.info.keys():
raise InversionsonError(
"We need a given path for the inversion root directory."
" Key: inversion_path"
)
if "model_interpolation_mode" not in self.info.keys():
raise InversionsonError(
"We need information on how you want to interpolate "
"the model to simulation meshes. "
"Key: model_interpolation_mode "
)
if self.info["model_interpolation_mode"] not in allowed_interp_modes:
raise InversionsonError(
f"The allowable model_interpolation_modes are: "
f" {allowed_interp_modes}"
)
if "meshes" not in self.info.keys():
raise InversionsonError(
"We need information on which sorts of meshes you use. "
"Options are multi-mesh or mono-mesh. "
"Key: meshes"
)
if "gradient_interpolation_mode" not in self.info.keys():
raise InversionsonError(
"We need information on how you want to interpolate "
"the model to simulation meshes. "
"Key: gradient_interpolation_mode "
)
if (
self.info["gradient_interpolation_mode"]
not in allowed_interp_modes
):
raise InversionsonError(
f"The allowable model_interpolation_modes are: "
f" {allowed_interp_modes}"
)
if "HPC" not in self.info.keys():
raise InversionsonError(
"We need information regarding your computational resources."
" run create_dummy_info_file.py for an example"
)
if "wave_propagation" not in self.info["HPC"].keys():
raise InversionsonError(
"We need specific computational info on wave_propagation"
)
if "diffusion_equation" not in self.info["HPC"].keys():
raise InversionsonError(
"We need specific computational info on diffusion_equation"
)
if "site_name" not in self.info["HPC"]["wave_propagation"].keys():
raise InversionsonError(
"We need information on the site where jobs are submitted. "
"Key: HPC.wave_propagation.site_name"
)
if "wall_time" not in self.info["HPC"]["wave_propagation"].keys():
raise InversionsonError(
"We need information on the site where jobs are submitted. "
"Key: HPC.wave_propagation.site_name"
)
if "ranks" not in self.info["HPC"]["wave_propagation"].keys():
raise InversionsonError(
"We need information on the amount of ranks you want to "
"run your simulations. Key: HPC.wave_propagation.ranks"
)
if "site_name" not in self.info["HPC"]["diffusion_equation"].keys():
raise InversionsonError(
"We need information on the site where jobs are submitted. "
"Key: HPC.diffusion_equation.site_name"
)
if "wall_time" not in self.info["HPC"]["diffusion_equation"].keys():
raise InversionsonError(
"We need information on the site where jobs are submitted. "
"Key: HPC.diffusion_equation.site_name"
)
if "ranks" not in self.info["HPC"]["diffusion_equation"].keys():
raise InversionsonError(
"We need information on the amount of ranks you want to "
"run your simulations. Key: HPC.diffusion_equation.ranks"
)
if "inversion_parameters" not in self.info.keys():
raise InversionsonError(
"We need information on the parameters you want to invert for."
" Key: inversion_parameters"
)
if "modelling_parameters" not in self.info.keys():
raise InversionsonError(
"We need information on the parameters you keep in your mesh "
"for forward modelling. Key: modelling_parameters"
)
if "random_event_fraction" not in self.info.keys():
raise InversionsonError(
"We need information regarding how many events should be "
"randomly picked when all events have been used. "
"Key: random_event_fraction"
)
if (
self.info["random_event_fraction"] > 1
or self.info["random_event_fraction"] < 0
or not isinstance(self.info["random_event_fraction"], float)
):
raise InversionsonError(
"random_event_fraction should be a float"
"and lie between 0.0 and 1.0"
)
if "min_ctrl_group_size" not in self.info.keys():
raise InversionsonError(
"We need information regarding minimum control group size."
" Key: min_ctrl_group_size"
)
if "inversion_mode" not in self.info.keys():
raise InversionsonError(
"We need information on inversion mode. mini-batch or normal"
)
if self.info["inversion_mode"] not in ["mini-batch", "mono-batch"]:
raise InversionsonError(
"Only implemented inversion modes are mini-batch or mono-batch"
)
if "meshes" not in self.info.keys():
raise InversionsonError(
"We need to know what sorts of meshes you use. "
"Either mono-mesh for simulation mesh = inversion mesh "
"or multi-mesh for wavefield adapted meshes. "
"Key: meshes"
)
if self.info["meshes"] not in ["mono-mesh", "multi-mesh"]:
raise InversionsonError(
"We only accept 'mono-mesh' or 'multi-mesh'"
)
# Smoothing
if "Smoothing" not in self.info.keys():
raise InversionsonError(
"Please specify smoothing parameters in info file. "
"Key: Smoothing"
)
if "smoothing_mode" not in self.info["Smoothing"].keys():
raise InversionsonError(
"Please specify smoothing mode under Smoothing in info file. "
"Key: Smoothing.smoothing_mode"
)
if self.info["Smoothing"]["smoothing_mode"] not in [
"anisotropic",
"isotropic",
"none",
]:
raise InversionsonError(
"Only implemented smoothing modes are 'anisotropic', "
"'isotropic' and 'none'"
)
if "timestep" not in self.info["Smoothing"].keys():
raise InversionsonError(
"Please specify the timestep you want for your smoothing "
"The total time is 1 second so it needs to be a fraction of "
"that. Key: Smoothing.timestep"
)
if self.info["Smoothing"]["timestep"] > 0.5:
raise InversionsonError(
"Smoothing timestep can not be larger than 0.5 seconds"
)
if not self.info["Smoothing"]["smoothing_mode"] == "none":
if "smoothing_lengths" not in self.info["Smoothing"].keys():
raise InversionsonError(
"Please specify smoothing lengths under Smoothing in info "
"file. Key: Smoothing.smoothing_lengths"
)
if self.info["Smoothing"]["smoothing_mode"] == "anisotropic":
if not isinstance(
self.info["Smoothing"]["smoothing_lengths"], list
):
raise InversionsonError(
"Make sure you input a list as smoothing_lengths if you "
"want to smooth anisotropically. List of length 3. "
"Order: r, theta, phi."
)
if not len(self.info["Smoothing"]["smoothing_lengths"]) == 3:
raise InversionsonError(
"Make sure your smoothing_lengths are a list of length 3."
"Order: r, theta, phi."
)
if self.info["Smoothing"]["smoothing_mode"] == "isotropic":
if isinstance(self.info["Smoothing"]["smoothing_lengths"], list):
if len(self.info["Smoothing"]["smoothing_lengths"]) == 1:
self.info["Smoothing"]["smoothing_lengths"] = self.info[
"Smoothing"
]["smoothing_lengths"][0]
else:
raise InversionsonError(
"If you give a list of isotropic lengths, you can only"
" give a list of length one, as all dimensions will "
"be smoothed with equally many wavelengths. You can "
"also just give a number."
)
if (
"Meshing" not in self.info.keys()
and self.info["meshes"] == "multi-mesh"
):
raise InversionsonError(
"We need some information regarding your meshes. "
"We need to know how many elements you want per azimuthal "
"quarter. Key: Meshing"
)
if "elements_per_azimuthal_quarter" not in self.info["Meshing"].keys():
raise InversionsonError(
"We need to know how many elements you need per azimuthal "
"quarter. Key: Meshing.elements_per_azimuthal_quarter"
)
if not isinstance(
self.info["Meshing"]["elements_per_azimuthal_quarter"], int
):
raise InversionsonError(
"Elements per azimuthal quarter need to be an integer."
)
# Lasif
if "lasif_root" not in self.info.keys():
raise InversionsonError(
"Information on lasif_project is missing from information. "
"Key: lasif_root"
)
else:
folder = pathlib.Path(self.info["lasif_root"])
if not (folder / "lasif_config.toml").exists():
raise InversionsonError("Lasif project not initialized")
# Simulation parameters:
if "end_time" not in self.simulation_dict.keys():
raise InversionsonError(
"Information regarding end time of simulation missing"
)
if "time_step" not in self.simulation_dict.keys():
raise InversionsonError(
"Information regarding time step of simulation missing"
)
if "start_time" not in self.simulation_dict.keys():
raise InversionsonError(
"Information regarding start time of simulation missing"
)
if "inversion_monitoring" not in self.info.keys():
raise InversionsonError(
"Information regarding inversion monitoring is missing"
)
if (
self.info["inversion_monitoring"][
"iterations_between_validation_checks"
]
!= 0
):
if (
len(
self.info["inversion_monitoring"]["validation_dataset"]
)
== 0
):
raise InversionsonError(
"You need to specify a validation dataset if you want"
" to check it regularly."
)
def __setup_components(self):
"""
Setup the different components that need to be used in the inversion.
These are wrappers around the main libraries used in the inversion.
"""
LasifComponent(communicator=self.comm, component_name="lasif")
SalvusOptComponent(communicator=self.comm, component_name="salvus_opt")
MultiMeshComponent(communicator=self.comm, component_name="multi_mesh")
SalvusFlowComponent(
communicator=self.comm, component_name="salvus_flow"
)
SalvusMeshComponent(
communicator=self.comm, component_name="salvus_mesher"
)
StoryTellerComponent(
communicator=self.comm, component_name="storyteller"
)
BatchComponent(communicator=self.comm, component_name="minibatch")
SalvusSmoothComponent(
communicator=self.comm, component_name="smoother"
)
def arrange_params(self, parameters: list) -> list:
"""
Re-arrange list of parameters in order to have
them conveniently aranged when called upon. This can be an annoying
problem when working with hdf5 files.
This method can only handle a few cases. If it doesn't
recognize the case it will return an unmodified list.
:param parameters: parameters to be arranged
:type parameters: list
"""
case_tti_inv = set(["VSV", "VSH", "VPV", "VPH", "RHO"])
case_tti_mod = set(
["VSV", "VSH", "VPV", "VPH", "RHO", "QKAPPA", "QMU", "ETA"]
)
case_iso_mod = set(["QKAPPA", "QMU", "VP", "VS", "RHO"])
case_iso_inv = set(["VP", "VS"])
case_iso_inv_dens = set(["VP", "VS", "RHO"])
case_tti_inv_norho = set(["VSV", "VSH", "VPV", "VPH"])
if set(parameters) == case_tti_inv:
parameters = ["VPV", "VPH", "VSV", "VSH", "RHO"]
elif set(parameters) == case_tti_inv_norho:
parameters = ["VPV", "VPH", "VSV", "VSH"]
elif set(parameters) == case_tti_mod:
parameters = [
"VPV",
"VPH",
"VSV",
"VSH",
"RHO",
"QKAPPA",
"QMU",
"ETA",
]
elif set(parameters) == case_iso_inv:
parameters = ["VP", "VS"]
elif set(parameters) == case_iso_inv_dens:
parameters = ["RHO", "VP", "VS"]
elif set(parameters) == case_iso_mod:
parameters = ["QKAPPA", "QMU", "RHO", "VP", "VS"]
else:
raise InversionsonError(
f"Parameter list {parameters} not "
f"a recognized set of parameters"
)
return parameters
def get_inversion_attributes(self, first=False):
"""
Read crucial components into memory to keep them easily accessible.
:param first: Befor components are set up, defaults to False
:type first: bool, optional
"""
# Simulation attributes
self.time_step = self.simulation_dict["time_step"]
self.start_time = self.simulation_dict["start_time"]
self.end_time = self.simulation_dict["end_time"]
self.min_period = self.simulation_dict["min_period"]
self.max_period = self.simulation_dict["max_period"]
self.attenuation = self.simulation_dict["attenuation"]
self.abs_bound_length = self.simulation_dict[
"absorbing_boundaries_length"
]
self.absorbing_boundaries = self.info["absorbing_boundaries"]
self.ocean_loading = self.simulation_dict["ocean_loading"]
self.domain_file = self.simulation_dict["domain_file"]
# Inversion attributes
self.inversion_root = self.info["inversion_path"]
self.lasif_root = self.info["lasif_root"]
self.inversion_id = self.info["inversion_id"]
self.inversion_mode = self.info["inversion_mode"]
self.meshes = self.info["meshes"]
if self.meshes == "multi-mesh":
self.elem_per_quarter = self.info["Meshing"][
"elements_per_azimuthal_quarter"
]
self.model_interpolation_mode = self.info["model_interpolation_mode"]
self.gradient_interpolation_mode = self.info[
"gradient_interpolation_mode"
]
self.cut_source_radius = self.info[
"cut_source_region_from_gradient_in_km"
]
self.cut_receiver_radius = self.info[
"cut_receiver_region_from_gradient_in_km"
]
self.clip_gradient = self.info["clip_gradient"]
self.site_name = self.info["HPC"]["wave_propagation"]["site_name"]
self.ranks = self.info["HPC"]["wave_propagation"]["ranks"]
self.wall_time = self.info["HPC"]["wave_propagation"]["wall_time"]
self.smoothing_site_name = self.info["HPC"]["diffusion_equation"][
"site_name"
]
self.smoothing_ranks = self.info["HPC"]["diffusion_equation"]["ranks"]
self.smoothing_wall_time = self.info["HPC"]["diffusion_equation"][
"wall_time"
]
self.smoothing_mode = self.info["Smoothing"]["smoothing_mode"]
self.smoothing_lengths = self.info["Smoothing"]["smoothing_lengths"]
self.smoothing_timestep = self.info["Smoothing"]["timestep"]
self.initial_batch_size = self.info["initial_batch_size"]
self.random_event_fraction = self.info["random_event_fraction"]
self.min_ctrl_group_size = self.info["min_ctrl_group_size"]
self.maximum_grad_divergence_angle = self.info["max_angular_change"]
self.dropout_probability = self.info["dropout_probability"]
self.when_to_validate = self.info["inversion_monitoring"][
"iterations_between_validation_checks"
]
self.validation_dataset = self.info["inversion_monitoring"][
"validation_dataset"
]
self.test_dataset = self.info["inversion_monitoring"]["test_dataset"]
if not first:
self.current_iteration = (
self.comm.salvus_opt.get_newest_iteration_name()
)
print(f"Current Iteration: {self.current_iteration}")
self.event_quality = toml.load(
self.comm.storyteller.events_quality_toml
)
self.inversion_params = self.arrange_params(
self.info["inversion_parameters"]
)
self.modelling_params = self.arrange_params(
self.info["modelling_parameters"]
)
# Some useful paths
self.paths = {}
self.paths["inversion_root"] = self.inversion_root
self.paths["lasif_root"] = self.lasif_root
self.paths["salvus_opt"] = os.path.join(
self.inversion_root, "SALVUS_OPT"
)
if not os.path.exists(self.paths["salvus_opt"]):
raise InversionsonError(
"Please make a folder for Salvus opt and initialize it in there"
)
self.paths["documentation"] = os.path.join(
self.inversion_root, "DOCUMENTATION"
)
if not os.path.exists(self.paths["documentation"]):
os.makedirs(self.paths["documentation"])
os.mkdir(os.path.join(self.paths["documentation"], "BACKUP"))
self.paths["iteration_tomls"] = os.path.join(
self.paths["documentation"], "ITERATIONS"
)
if not os.path.exists(self.paths["iteration_tomls"]):
os.makedirs(self.paths["iteration_tomls"])
# self.paths["salvus_smoother"] = self.info["salvus_smoother"]
self.paths["control_group_toml"] = os.path.join(
self.paths["documentation"], "control_groups.toml"
)
def create_iteration_toml(self, iteration: str):
"""
Create the toml file for an iteration. This toml file is then updated.
To create the toml, we need the events and the control group
:param iteration: Name of iteration
:type iteration: str
"""
iteration_toml = os.path.join(
self.paths["iteration_tomls"], iteration + ".toml"
)
validation = False
if "validation" in iteration:
validation = True
if os.path.exists(iteration_toml):
warnings.warn(
f"Iteration toml for iteration: {iteration} already exists. backed it up",
InversionsonWarning,
)
backup = os.path.join(
self.paths["iteration_tomls"], f"backup_{iteration}.toml"
)
shutil.copyfile(iteration_toml, backup)
it_dict = {}
it_dict["name"] = iteration
it_dict["events"] = {}
if self.meshes == "mono-mesh":
it_dict["remote_simulation_mesh"] = None
last_control_group = []
if (
iteration != "it0000_model"
and not validation
and self.inversion_mode == "mini-batch"
):
ctrl_grps = toml.load(
self.comm.project.paths["control_group_toml"]
)
prev_iter = self.comm.salvus_opt.get_previous_iteration_name()
last_control_group = ctrl_grps[prev_iter]["new"]
if not validation and self.inversion_mode == "mini-batch":
it_dict["last_control_group"] = last_control_group
it_dict["new_control_group"] = []
f_job_dict = {
"name": "",
"submitted": False,
"retrieved": False,
"reposts": 0,
}
if validation:
f_job_dict["windows_selected"] = False
if not validation:
a_job_dict = {
"name": "",
"submitted": False,
"retrieved": False,
"reposts": 0,
}
s_job_dict = {
"name": "",
"submitted": False,
"retrieved": False,
"reposts": 0,
}
if self.meshes == "multi-mesh":
f_job_dict["interpolated"] = False
if not validation:
a_job_dict["interpolated"] = False
for _i, event in enumerate(
self.comm.lasif.list_events(iteration=iteration)
):
if validation:
jobs = {"forward": f_job_dict}
if self.inversion_mode == "mini-batch":
if not validation:
jobs = {
"forward": f_job_dict,
"adjoint": a_job_dict,
"smoothing": s_job_dict,
}
it_dict["events"][str(_i)] = {
"name": event,
"job_info": jobs,
}
# it_dict["events"][event] = {
# "job_info": jobs,
# }
else:
if not validation:
jobs = {
"forward": f_job_dict,
"adjoint": a_job_dict,
}
it_dict["events"][str(_i)] = {
"name": event,
"job_info": jobs,
}
if not validation:
it_dict["events"][str(_i)]["misfit"] = 0.0
it_dict["events"][str(_i)]["usage_updated"] = False
if self.inversion_mode == "mono-batch" and not validation:
it_dict["smoothing"] = s_job_dict
with open(iteration_toml, "w") as fh:
toml.dump(it_dict, fh)
def change_attribute(self, attribute: str, new_value):
"""
Not possible to change attributes from another class.
This method should take care of it
:param attribute: Name of attribute
:type attribute: str
:param new_value: The new value to assign to the attribute
:type new_value: whatever the attribure needs
"""
if isinstance(new_value, str):
command = f'self.{attribute} = "{new_value}"'
elif isinstance(new_value, list):
command = f"self.{attribute} = {new_value}"
elif isinstance(new_value, bool):
command = f"self.{attribute} = {new_value}"
elif isinstance(new_value, dict):
command = f"self.{attribute} = {new_value}"
elif isinstance(new_value, float):
command = f"self.{attribute} = {new_value}"
elif isinstance(new_value, int):
command = f"self.{attribute} = {new_value}"
else:
raise InversionsonError(
f"Method not implemented for type {new_value.type}"
)
exec(command)
def update_control_group_toml(self, new=False, first=False):
"""
A toml file for monitoring which control group is used in each
iteration.
Structure: dict[iteration] = {old: [], new: []}
:param new: Should the new control group be updated?
:type new: bool, optional
:param first: Does the toml need to be created?
:type first: bool, optional
"""
iteration = self.current_iteration
print(f"Iteration: {iteration}")
if first:
cg_dict = {}
cg_dict[iteration] = {"old": [], "new": []}
with open(self.paths["control_group_toml"], "w") as fh:
toml.dump(cg_dict, fh)
return
else:
cg_dict = toml.load(self.paths["control_group_toml"])
if not new:
prev_iter = self.comm.salvus_opt.get_previous_iteration_name()
cg_dict[iteration] = {}
cg_dict[iteration]["old"] = cg_dict[prev_iter]["new"]
if new not in cg_dict[iteration].keys():
cg_dict[iteration]["new"] = []
if new:
if iteration not in cg_dict.keys():
cg_dict[iteration] = {}
cg_dict[iteration]["new"] = self.new_control_group
with open(self.paths["control_group_toml"], "w") as fh:
toml.dump(cg_dict, fh)
def update_iteration_toml(self, iteration="current", validation=False):
"""
Use iteration parameters to update iteration toml file
:param iteration: Name of iteration
:type iteration: str
"""
if iteration == "current":
iteration = self.current_iteration
if "validation" in iteration:
validation = True
if validation and "validation" not in iteration:
iteration = f"validation_{iteration}"
iteration_toml = os.path.join(
self.paths["iteration_tomls"], iteration + ".toml"
)
if not os.path.exists(iteration_toml):
raise InversionsonError(
f"Iteration toml for iteration: {iteration} does not exists"
)
if os.path.exists(self.paths["control_group_toml"]) and not validation:
control_group_dict = toml.load(self.paths["control_group_toml"])
control_group_dict = control_group_dict[iteration]
elif self.inversion_mode == "mini-batch":
control_group_dict = {"old": [], "new": []}
it_dict = {}
it_dict["name"] = iteration
it_dict["events"] = {}
if self.meshes == "mono-mesh":
it_dict["remote_simulation_mesh"] = self.remote_mesh
# I need a way to figure out what the controlgroup is
# This definitely needs improvement
if not validation and self.inversion_mode == "mini-batch":
it_dict["last_control_group"] = control_group_dict["old"]
it_dict["new_control_group"] = control_group_dict["new"]
for _i, event in enumerate(
self.comm.lasif.list_events(iteration=iteration)
):
jobs = {"forward": self.forward_job[event]}
if not validation:
jobs["adjoint"] = self.adjoint_job[event]
if self.inversion_mode == "mini-batch":
if not validation:
jobs["smoothing"] = self.smoothing_job[event]
it_dict["events"][str(_i)] = {
"name": event,
"job_info": jobs,
}
else:
it_dict["events"][str(_i)] = {
"job_info": jobs,
}
if not validation:
it_dict["events"][str(_i)]["misfit"] = self.misfits[event]
it_dict["events"][str(_i)]["usage_updated"] = self.updated[
event
]
if self.inversion_mode == "mono-batch" and not validation:
it_dict["smoothing"] = self.smoothing_job
with open(iteration_toml, "w") as fh:
toml.dump(it_dict, fh)
def get_iteration_attributes(self, validation=False):
"""
Save the attributes of the current iteration into memory
:param iteration: Name of iteration
:type iteration: str
"""
iteration = self.comm.salvus_opt.get_newest_iteration_name()
if validation:
iteration = f"validation_{iteration}"
iteration_toml = os.path.join(
self.paths["iteration_tomls"], iteration + ".toml"
)
if not os.path.exists(iteration_toml):
raise InversionsonError(
f"No toml file exists for iteration: {iteration}"
)
it_dict = toml.load(iteration_toml)
self.iteration_name = it_dict["name"]
self.current_iteration = self.iteration_name
self.events_in_iteration = self.comm.lasif.list_events(
iteration=iteration
)
if not validation:
if self.inversion_mode == "mini-batch":
self.old_control_group = it_dict["last_control_group"]
self.new_control_group = it_dict["new_control_group"]
self.adjoint_job = {}
self.smoothing_job = {}
self.misfits = {}
self.updated = {}
self.forward_job = {}
if self.meshes == "mono-mesh":
if "remote_simulation_mesh" not in it_dict.keys():
self.remote_mesh = None
else:
self.remote_mesh = it_dict["remote_simulation_mesh"]
else:
self.remote_mesh = None
# Not sure if it's worth it to include station misfits
for _i, event in enumerate(self.events_in_iteration):
if not validation:
self.updated[event] = it_dict["events"][str(_i)][
"usage_updated"
]
self.misfits[event] = it_dict["events"][str(_i)]["misfit"]
self.adjoint_job[event] = it_dict["events"][str(_i)][
"job_info"
]["adjoint"]
if self.inversion_mode == "mini-batch":
self.smoothing_job[event] = it_dict["events"][str(_i)][
"job_info"
]["smoothing"]
self.forward_job[event] = it_dict["events"][str(_i)]["job_info"][
"forward"
]
if self.inversion_mode == "mono-batch" and not validation:
self.smoothing_job = it_dict["smoothing"]
def get_old_iteration_info(self, iteration: str) -> dict:
"""
For getting information about something else than current iteration
:param iteration: Name of iteration
:type iteration: str
:return: Information regarding that iteration
:rtype: dict
"""
iteration_toml = os.path.join(
self.paths["iteration_tomls"], iteration + ".toml"
)
if not os.path.exists(iteration_toml):
raise InversionsonError(
f"No toml file exists for iteration: {iteration}"
)
with open(iteration_toml, "r") as fh:
it_dict = toml.load(fh)
return it_dict
def get_key_number_for_event(
self, event: str, iteration: str = "current"
) -> str:
"""
Due to an annoying problem with toml. We can not use event names
as keys in the iteration dictionaries. This is a function to find
the index.
Lasif returns a sorted list which should always be the same.
:param event: Name of event
:type event: str
:return: The correct key for the event
:rtype: str
"""
if iteration == "current":
iteration = self.current_iteration
events = self.comm.lasif.list_events(iteration=iteration)
return str(events.index(event))
| 39.319913
| 90
| 0.565878
|
f003eb8cc6a5821323d04dfdee79d8576eaa7dd0
| 7,821
|
py
|
Python
|
test/querystring_test.py
|
vartagg/urlblocks-new
|
b6c34ea9cd46ead2b0b1e5479b5b69e3a781cbfe
|
[
"Unlicense"
] | null | null | null |
test/querystring_test.py
|
vartagg/urlblocks-new
|
b6c34ea9cd46ead2b0b1e5479b5b69e3a781cbfe
|
[
"Unlicense"
] | null | null | null |
test/querystring_test.py
|
vartagg/urlblocks-new
|
b6c34ea9cd46ead2b0b1e5479b5b69e3a781cbfe
|
[
"Unlicense"
] | null | null | null |
# -*- coding: utf-8 -*-
import unittest
from urlblocks.query_string import QueryString
from urlblocks.six import u
class QueryStringTest(unittest.TestCase):
def test_preserves_equality_with_original_string(self):
assert QueryString('abc=123') == 'abc=123'
def test_preserves_hash_value_of_original_string(self):
assert hash(QueryString('abc=123')) == hash('abc=123')
def test_list_returns_an_empty_list_for_empty_QueryStrings(self):
assert QueryString('').list == []
def test_list_correctly_splits_on_ampersands(self):
assert QueryString('abc=123&def=456').list == [
('abc', '123'), ('def', '456')]
def test_list_correctly_splits_on_semicolons(self):
assert QueryString('abc=123;def=456').list == [
('abc', '123'), ('def', '456')]
def test_list_correctly_decodes_special_chars(self):
assert QueryString('a%20b=c%20d').list == [('a b', 'c d')]
assert QueryString('a+b=c+d').list == [('a b', 'c d')]
assert (QueryString('my%20weird%20field=q1!2%22\'w%245%267%2Fz8)%3F').list ==
[('my weird field', 'q1!2"\'w$5&7/z8)?')])
def test_list_correctly_decodes_utf_8(self):
assert QueryString('foo=%EF%BF%BD').list == [('foo', u('\ufffd'))]
def test_list_doesnt_split_on_percent_encoded_special_chars(self):
assert QueryString('a%26b%3Dc%3F=a%26b%3Dc%3F').list == [
('a&b=c?', 'a&b=c?')]
def test_list_doesnt_break_if_two_parameters_have_the_same_name(self):
assert QueryString('abc=123;abc=456').list == [
('abc', '123'), ('abc', '456')]
def test_list_uses_none_as_the_value_for_valueless_parameters(self):
assert QueryString('abc').list == [('abc', None)]
assert QueryString('abc=123&def&ghi=456').list == [
('abc', '123'), ('def', None), ('ghi', '456')]
def test_list_uses_empty_string_for_empty_valued_parameters(self):
assert QueryString('abc=').list == [('abc', '')]
assert QueryString('abc=123&def=&ghi=456').list == [
('abc', '123'), ('def', ''), ('ghi', '456')]
def test_list_uses_empty_string_for_anonymous_parameters(self):
assert QueryString('=123').list == [('', '123')]
assert QueryString('abc=123&=456&ghi=789').list == [
('abc', '123'), ('', '456'), ('ghi', '789')]
def test_list_can_handle_void_parameters(self):
assert QueryString('abc=123&&def=456').list == [
('abc', '123'), ('', None), ('def', '456')]
assert QueryString('abc=123&=&def=456').list == [
('abc', '123'), ('', ''), ('def', '456')]
def test_dict_returns_a_dictionary_with_one_value_per_key(self):
assert QueryString('abc=123&abc=456').dict == {'abc': '456'}
def test_multi_dict_returns_a_dictionary_with_all_values_per_key(self):
assert QueryString('abc=123&abc=456').multi_dict == {
'abc': ['123', '456']}
def test_add_param_encodes_and_adds_the_given_parameter_to_the_QueryString(self):
s = QueryString('')
assert s.add_param('abc', '123') == 'abc=123'
assert (s.add_param('abc', '123')
.add_param('def', '456') == 'abc=123&def=456')
def test_add_param_can_add_valueless_parameters(self):
s = QueryString('abc=123')
assert s.add_param('def', None) == 'abc=123&def'
def test_add_param_can_add_empty_valued_parameters(self):
s = QueryString('abc=123')
assert s.add_param('def', '') == 'abc=123&def='
def test_add_param_can_add_anonymous_parameters(self):
s = QueryString('abc=123')
assert s.add_param('', '456') == 'abc=123&=456'
def test_add_param_encodes_utf8(self):
s = QueryString('abc=123')
assert s.add_param('foo', u('\ufffd')) == 'abc=123&foo=%EF%BF%BD'
def test_add_param_accepts_int(self):
s = QueryString('')
assert s.add_param('abc', 123) == 'abc=123'
def test_add_param_allows_the_same_parameter_name_to_be_added_twice(self):
s = QueryString('abc=123')
assert s.add_param('abc', '456') == 'abc=123&abc=456'
def test_add_param_encodes_special_characters(self):
s = QueryString('abc=123')
assert s.add_param('d e f', '4+5#6') == 'abc=123&d+e+f=4%2B5%236'
def test_set_param_replaces_existing_parameter_names(self):
s = QueryString('abc=123&abc=456')
assert s.set_param('abc', '789') == 'abc=789'
def test_del_param_removes_all_instances_of_the_parameter_from_the_QueryString(self):
s = QueryString('abc=123&def=456&abc=789')
assert s.del_param('abc') == 'def=456'
assert s.del_param('def') == 'abc=123&abc=789'
def test_del_param_can_remove_valueless_parameters(self):
valueless = QueryString('abc=123&def&abc=456')
empty_valued = QueryString('abc=123&def=&abc=456')
assert valueless.del_param('def') == 'abc=123&abc=456'
assert empty_valued.del_param('def') == 'abc=123&abc=456'
def test_del_param_can_remove_anonymous_parameters(self):
s = QueryString('abc=123&=456&def=789')
assert s.del_param('') == 'abc=123&def=789'
def test_add_params_is_equivalent_to_calling_add_param_multiple_times(self):
s = QueryString('')
assert (s.add_params([('abc', '123'), ('def', '456')]) ==
s.add_param('abc', '123').add_param('def', '456'))
def test_add_params_accepts_the_same_args_as_dict(self):
s = QueryString('')
added = s.add_params({'abc': '123'}, foo='bar', xyz='456')
assert added.dict == {'abc': '123', 'foo': 'bar', 'xyz': '456'}
added2 = s.add_params([('abc', '123')], foo='bar', xyz='456')
assert added2.dict == {'abc': '123', 'foo': 'bar', 'xyz': '456'}
# It also has to fail in the same way as `dict`. If you pass more than
# one positional argument it should raise a TypeError.
self.assertRaises(TypeError,
s.add_params, {'abc': '123'}, {'foo': 'bar'})
def test_add_params_accepts_the_same_parameter_name_multiple_times(self):
s = (QueryString('')
.add_params([('abc', '123'), ('abc', '456')]))
assert s.list == [('abc', '123'), ('abc', '456')]
def test_add_params_with_multiple_values_adds_the_same_parameter_multiple_times(self):
s = QueryString('')
assert (s.add_params({'foo': ['bar', 'baz']}) ==
s.add_param('foo', 'bar').add_param('foo', 'baz'))
def test_set_params_is_equivalent_to_calling_set_param_multiple_times(self):
s = QueryString('')
assert (s.set_params([('abc', '123'), ('def', '456')]) ==
s.set_param('abc', '123').set_param('def', '456'))
def test_set_params_accepts_the_same_args_as_dict(self):
s = QueryString('')
added = s.set_params({'abc': '123'}, abc='456')
assert added.dict == {'abc': '456'}
added2 = s.set_params([('abc', '123')], abc='456')
assert added2.dict == {'abc': '456'}
def test_set_params_accepts_the_same_parameter_name_multiple_times(self):
s = (QueryString('')
.set_params([('abc', '123'), ('abc', '456')]))
assert s.list == [('abc', '456')]
def test_set_params_with_multiple_values_sets_the_same_name_multiple_times(self):
s = QueryString('foo=spam')
assert (s.set_params({'foo': ['bar', 'baz']}) ==
'foo=bar&foo=baz')
s2 = QueryString('foo=bar&foo=baz')
assert (s2.set_params({'foo': ['spam', 'ham']}) ==
'foo=spam&foo=ham')
def test_del_params_accepts_an_iterable_and_removes_all_listed_parameters(self):
s = QueryString('abc=123&def=456&xyz=789')
assert s.del_params(('abc', 'xyz')) == 'def=456'
| 43.45
| 90
| 0.611942
|
83645ae779f551c4016ef6046dd6651a4741537e
| 1,176
|
py
|
Python
|
code/cuboid_route/sol_86.py
|
bhavinjawade/project-euler-solutions
|
56bf6a282730ed4b9b875fa081cf4509d9939d98
|
[
"Apache-2.0"
] | 2
|
2020-07-16T08:16:32.000Z
|
2020-10-01T07:16:48.000Z
|
code/cuboid_route/sol_86.py
|
Psingh12354/project-euler-solutions
|
56bf6a282730ed4b9b875fa081cf4509d9939d98
|
[
"Apache-2.0"
] | null | null | null |
code/cuboid_route/sol_86.py
|
Psingh12354/project-euler-solutions
|
56bf6a282730ed4b9b875fa081cf4509d9939d98
|
[
"Apache-2.0"
] | 1
|
2021-05-07T18:06:08.000Z
|
2021-05-07T18:06:08.000Z
|
# -*- coding: utf-8 -*-
'''
File name: code\cuboid_route\sol_86.py
Author: Vaidic Joshi
Date created: Oct 20, 2018
Python Version: 3.x
'''
# Solution to Project Euler Problem #86 :: Cuboid route
#
# For more information see:
# https://projecteuler.net/problem=86
# Problem Statement
'''
A spider, S, sits in one corner of a cuboid room, measuring 6 by 5 by 3, and a fly, F, sits in the opposite corner. By travelling on the surfaces of the room the shortest "straight line" distance from S to F is 10 and the path is shown on the diagram.
However, there are up to three "shortest" path candidates for any given cuboid and the shortest route doesn't always have integer length.
It can be shown that there are exactly 2060 distinct cuboids, ignoring rotations, with integer dimensions, up to a maximum size of M by M by M, for which the shortest route has integer length when M = 100. This is the least value of M for which the number of solutions first exceeds two thousand; the number of solutions when M = 99 is 1975.
Find the least value of M such that the number of solutions first exceeds one million.
'''
# Solution
# Solution Approach
'''
'''
| 39.2
| 341
| 0.733844
|
e1357de3acc6500f0b0e8c1ab62aa06d2e96bc43
| 266
|
py
|
Python
|
app/api/__init__.py
|
jakuboskera/guestbook
|
78d154560f669caf1f81561feb389c9aaaa6a24e
|
[
"Apache-2.0"
] | null | null | null |
app/api/__init__.py
|
jakuboskera/guestbook
|
78d154560f669caf1f81561feb389c9aaaa6a24e
|
[
"Apache-2.0"
] | 1
|
2022-02-07T21:19:13.000Z
|
2022-02-07T21:19:13.000Z
|
app/api/__init__.py
|
jakuboskera/guestbook
|
78d154560f669caf1f81561feb389c9aaaa6a24e
|
[
"Apache-2.0"
] | null | null | null |
from flask import Blueprint
from flask_restx import Api
from app.api.entries import api as entries
blueprint = Blueprint("api", __name__)
api = Api(
blueprint, title="Guestbook", version="1.0", description="Guestbook simple API"
)
api.add_namespace(entries)
| 20.461538
| 83
| 0.759398
|
54de2dd9f1a670b4f039fb697bd512d0ad59bb51
| 553
|
py
|
Python
|
examples/animations/nonvarinterp.py
|
ruohoruotsi/coldtype
|
13993e5a4fa3f99c6800fed2496bd5a374e4f53f
|
[
"Apache-2.0"
] | null | null | null |
examples/animations/nonvarinterp.py
|
ruohoruotsi/coldtype
|
13993e5a4fa3f99c6800fed2496bd5a374e4f53f
|
[
"Apache-2.0"
] | null | null | null |
examples/animations/nonvarinterp.py
|
ruohoruotsi/coldtype
|
13993e5a4fa3f99c6800fed2496bd5a374e4f53f
|
[
"Apache-2.0"
] | null | null | null |
from coldtype import *
mdpb = Font.Cacheable("~/Type/fonts/fonts/MDNichrome0.7-Black.otf")
mdpl = Font.Cacheable("~/Type/fonts/fonts/MDNichrome0.7-Light.otf")
r = Rect(1080, 1080)
def build(font, **kwargs):
return (StSt("Inter-\npolation",
font, 250, leading=50, **kwargs)
.align(r)
.pen())
a = build(mdpb)
b = build(mdpl)
@animation(r, timeline=Timeline(120))
def nonvarinterp(f):
return (a.interpolate(0, b).f(None).s(hsl(0.9, 1)).sw(3)
.mod_contour(8, lambda p:
p.rotate(-360*f.e("l", 7, 0))))
| 26.333333
| 67
| 0.614828
|
0676be4a445469b6f9436f6c893398c720812ae4
| 109
|
py
|
Python
|
exercises/en/solution_penguins.py
|
UBC-MDS/exploratory-data-viz
|
83b704ce10d1ff5e10bfd4cdfa872ac52993fd54
|
[
"CC-BY-4.0"
] | null | null | null |
exercises/en/solution_penguins.py
|
UBC-MDS/exploratory-data-viz
|
83b704ce10d1ff5e10bfd4cdfa872ac52993fd54
|
[
"CC-BY-4.0"
] | 88
|
2020-12-04T06:56:51.000Z
|
2021-05-10T22:02:45.000Z
|
exercises/en/solution_penguins.py
|
flor14/ds-toolbox
|
5ff3730b114a96a3ea4491af74d0a0ba33385054
|
[
"CC-BY-4.0"
] | 4
|
2021-01-13T09:30:57.000Z
|
2021-08-03T20:49:31.000Z
|
import altair as alt
import pandas as pd
penguins_df = pd.read_csv('data/penguins.csv')
penguins_df.info()
| 15.571429
| 46
| 0.770642
|
24446d8805320eebfca7c4364ee1cccd06a10218
| 371
|
py
|
Python
|
scripts/NYU/_solarized.py
|
amueller/curfil
|
47c97be43abe62035f4da290276176f0120c0be0
|
[
"MIT"
] | 2
|
2015-04-14T13:43:43.000Z
|
2020-03-13T20:56:42.000Z
|
scripts/NYU/_solarized.py
|
ferasha/curfil
|
f8c257dcb3a74aaa5c25eaa91c29a6dcbad04211
|
[
"MIT"
] | null | null | null |
scripts/NYU/_solarized.py
|
ferasha/curfil
|
f8c257dcb3a74aaa5c25eaa91c29a6dcbad04211
|
[
"MIT"
] | null | null | null |
colors = [
(0, 43, 54),
(7, 54, 66), # floor
(88, 110, 117),
(101, 123, 131),
(131, 148, 150),
(147, 161, 161), # structure
(238, 232, 213),
(253, 246, 227),
(181, 137, 0), # prop
(203, 75, 22), # furniture
(220, 50, 47),
(211, 54, 130),
(108, 113, 196),
(38, 139, 210),
(42, 161, 152),
(133, 153, 0)
]
| 19.526316
| 33
| 0.423181
|
53da66bc12a9ca7bb175e4dcb7e4b5f385b5ef19
| 86
|
py
|
Python
|
workflower/plugins/tableau_document/__init__.py
|
dmenezesgabriel/workflower
|
db2358abdd2d133b85baea726e013e71171e5cf3
|
[
"MIT"
] | null | null | null |
workflower/plugins/tableau_document/__init__.py
|
dmenezesgabriel/workflower
|
db2358abdd2d133b85baea726e013e71171e5cf3
|
[
"MIT"
] | null | null | null |
workflower/plugins/tableau_document/__init__.py
|
dmenezesgabriel/workflower
|
db2358abdd2d133b85baea726e013e71171e5cf3
|
[
"MIT"
] | null | null | null |
from workflower.plugins.tableau_document.plugin import (
TableauDocumentPlugin,
)
| 21.5
| 56
| 0.813953
|
fe3562bfcc48bef27b69eb221c9228c50913cb31
| 3,933
|
py
|
Python
|
ai_project.py
|
chinmaypatil2442/Sentiment-Analysis
|
99faf7ee1f8fe56b28364e4b5118f38f0bfa308d
|
[
"MIT"
] | null | null | null |
ai_project.py
|
chinmaypatil2442/Sentiment-Analysis
|
99faf7ee1f8fe56b28364e4b5118f38f0bfa308d
|
[
"MIT"
] | null | null | null |
ai_project.py
|
chinmaypatil2442/Sentiment-Analysis
|
99faf7ee1f8fe56b28364e4b5118f38f0bfa308d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""AI_Project.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/168iKzn1_I4CBDVbzs895IgK5wFh0EDSE
"""
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.corpus import twitter_samples, stopwords
from nltk.tag import pos_tag
from nltk.tokenize import word_tokenize
from nltk import FreqDist, classify, NaiveBayesClassifier
import nltk
import re, string, random
def remove_noise(tweet_tokens, stop_words = ()):
cleaned_tokens = []
for token, tag in pos_tag(tweet_tokens):
token = re.sub('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+#]|[!*\(\),]|'\
'(?:%[0-9a-fA-F][0-9a-fA-F]))+','', token)
token = re.sub("(@[A-Za-z0-9_]+)","", token)
if tag.startswith("NN"):
pos = 'n'
elif tag.startswith('VB'):
pos = 'v'
else:
pos = 'a'
lemmatizer = WordNetLemmatizer()
token = lemmatizer.lemmatize(token, pos)
if len(token) > 0 and token not in string.punctuation and token.lower() not in stop_words:
cleaned_tokens.append(token.lower())
return cleaned_tokens
def get_all_words(cleaned_tokens_list):
for tokens in cleaned_tokens_list:
for token in tokens:
yield token
def get_tweets_for_model(cleaned_tokens_list):
for tweet_tokens in cleaned_tokens_list:
yield dict([token, True] for token in tweet_tokens)
def non_sarcasm_detection():
nltk.download('wordnet')
nltk.download('averaged_perceptron_tagger')
nltk.download('twitter_samples')
nltk.download('punkt')
nltk.download('stopwords')
positive_tweets = twitter_samples.strings('positive_tweets.json')
negative_tweets = twitter_samples.strings('negative_tweets.json')
text = twitter_samples.strings('tweets.20150430-223406.json')
tweet_tokens = twitter_samples.tokenized('positive_tweets.json')[0]
stop_words = stopwords.words('english')
positive_tweet_tokens = twitter_samples.tokenized('positive_tweets.json')
negative_tweet_tokens = twitter_samples.tokenized('negative_tweets.json')
positive_cleaned_tokens_list = []
negative_cleaned_tokens_list = []
for tokens in positive_tweet_tokens:
positive_cleaned_tokens_list.append(remove_noise(tokens, stop_words))
for tokens in negative_tweet_tokens:
negative_cleaned_tokens_list.append(remove_noise(tokens, stop_words))
all_pos_words = get_all_words(positive_cleaned_tokens_list)
freq_dist_pos = FreqDist(all_pos_words)
print(freq_dist_pos.most_common(10))
positive_tokens_for_model = get_tweets_for_model(positive_cleaned_tokens_list)
negative_tokens_for_model = get_tweets_for_model(negative_cleaned_tokens_list)
positive_dataset = [(tweet_dict, "Positive")
for tweet_dict in positive_tokens_for_model]
negative_dataset = [(tweet_dict, "Negative")
for tweet_dict in negative_tokens_for_model]
dataset = positive_dataset + negative_dataset
random.shuffle(dataset)
train_data = dataset[:7000]
test_data = dataset[7000:]
classifier = NaiveBayesClassifier.train(train_data)
print("Accuracy is:", classify.accuracy(classifier, test_data))
# print(classifier.show_most_informative_features(10))
# custom_tweet = "Happy to meet you at last, Yully."
# custom_tokens = remove_noise(word_tokenize(custom_tweet))
# print(custom_tweet, classifier.classify(dict([token, True] for token in custom_tokens)))
# Positive Emotion – Happy, Excited, Surprised, Joy, Love, Pride
# Negative Emotion – Anger, Sadness, Frustration, Scare, Shame, etc.
custom_tweet = "The children are sad today."
custom_tokens = remove_noise(word_tokenize(custom_tweet))
print(custom_tweet, classifier.classify(dict([token, True] for token in custom_tokens)))
| 33.615385
| 98
| 0.709636
|
0ae5e292197060f32eea30fcda0a47fabccc41e8
| 2,684
|
py
|
Python
|
paddlenlp/transformers/distilbert/tokenizer.py
|
pangyoki/PaddleNLP
|
2afd760d0e548aae443b5c313f421e08de7398c3
|
[
"Apache-2.0"
] | 1
|
2022-01-13T00:54:33.000Z
|
2022-01-13T00:54:33.000Z
|
paddlenlp/transformers/distilbert/tokenizer.py
|
pangyoki/PaddleNLP
|
2afd760d0e548aae443b5c313f421e08de7398c3
|
[
"Apache-2.0"
] | null | null | null |
paddlenlp/transformers/distilbert/tokenizer.py
|
pangyoki/PaddleNLP
|
2afd760d0e548aae443b5c313f421e08de7398c3
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..bert.tokenizer import BertTokenizer
__all__ = ['DistilBertTokenizer']
class DistilBertTokenizer(BertTokenizer):
"""
Constructs a DistilBert tokenizer. The usage of DistilBertTokenizer is the same as
`BertTokenizer <https://paddlenlp.readthedocs.io/zh/latest/source/paddlenlp.transformers.bert.tokenizer.html>`__.
For more information regarding those methods, please refer to this superclass.
"""
resource_files_names = {"vocab_file": "vocab.txt"} # for save_pretrained
pretrained_resource_files_map = {
"vocab_file": {
"distilbert-base-uncased":
"https://paddlenlp.bj.bcebos.com/models/transformers/distilbert/distilbert-base-uncased-vocab.txt",
"distilbert-base-cased":
"https://paddlenlp.bj.bcebos.com/models/transformers/distilbert/distilbert-base-cased-vocab.txt",
}
}
pretrained_init_configuration = {
"distilbert-base-uncased": {
"do_lower_case": True
},
"distilbert-base-cased": {
"do_lower_case": False
},
}
def __call__(self,
text,
text_pair=None,
max_seq_len=None,
stride=0,
is_split_into_words=False,
pad_to_max_seq_len=False,
truncation_strategy="longest_first",
return_position_ids=False,
return_token_type_ids=False,
return_attention_mask=False,
return_length=False,
return_overflowing_tokens=False,
return_special_tokens_mask=False):
return super(DistilBertTokenizer, self).__call__(
text, text_pair, max_seq_len, stride, is_split_into_words,
pad_to_max_seq_len, truncation_strategy, return_position_ids,
return_token_type_ids, return_attention_mask, return_length,
return_overflowing_tokens, return_special_tokens_mask)
| 41.9375
| 117
| 0.672131
|
19f03d2a5bd26fca75c5110a73249cdff7a6600a
| 3,406
|
py
|
Python
|
039_Combination_Sum.py
|
joshlyman/Josh-LeetCode
|
cc9e2cc406d2cbd5a90ee579efbcaeffb842c5ed
|
[
"MIT"
] | null | null | null |
039_Combination_Sum.py
|
joshlyman/Josh-LeetCode
|
cc9e2cc406d2cbd5a90ee579efbcaeffb842c5ed
|
[
"MIT"
] | null | null | null |
039_Combination_Sum.py
|
joshlyman/Josh-LeetCode
|
cc9e2cc406d2cbd5a90ee579efbcaeffb842c5ed
|
[
"MIT"
] | null | null | null |
# Backtracking
class Solution:
def combinationSum(self, candidates: List[int], target: int) -> List[List[int]]:
if not candidates:
return
candidates.sort()
paths = []
results = []
index = 0
cursum = 0
# paths, results, index, candidates, cursum, target
self.dfs(paths,results,index,candidates,cursum, target)
return results
def dfs(self,paths, results,index,candidates,cursum,target):
if cursum > target:
return
# append path must use list to new a paths
if cursum == target:
results.append(list(paths))
return
for i in range(index,len(candidates)):
paths.append(candidates[i])
cursum += candidates[i]
self.dfs(paths,results,i,candidates,cursum,target)
paths.pop()
cursum -= candidates[i]
# https://www.jiuzhang.com/problem/combination-sum/
class Solution:
def combinationSum(self, candidates: List[int], target: int) -> List[List[int]]:
results = []
def backtrack(remain,comb,start):
if remain == 0:
# make a deep copy of the current combination
results.append(list(comb))
return
# not satify condition
elif remain <0:
# exceed the scope, stop exploration.
return
for i in range(start,len(candidates)):
# add the number into the combination
comb.append(candidates[i])
# give the current number another chance, rather than moving on
backtrack(remain-candidates[i],comb,i)
# backtrack, remove the number from the combination
comb.pop()
backtrack(target,[],0)
return results
# Refer from
# https://leetcode.com/problems/combination-sum/solution/
# Time: O(N^(T/M)+1)
# Let N be the number of candidates, T be the target value, and M be the minimal value among the candidates.
# Space:O(T/M)
# V2
class Solution(object):
def combinationSum(self, candidates, target):
ret = []
self.dfs(candidates, target, [], ret)
return ret
def dfs(self, nums, target, path, ret):
if target < 0:
return
if target == 0:
ret.append(path)
return
for i in range(len(nums)):
# Here we have to use concatenation because if we use append, then path will be passed by
# reference and it will cause allocation problem
self.dfs(nums[i:], target-nums[i], path+[nums[i]], ret)
# V3
class Solution:
def combinationSum(self, candidates: List[int], target: int) -> List[List[int]]:
res = []
self.backtrack(0,candidates,target,[],res)
return res
def backtrack(self,start,candidates,target,path,res):
if target <0:
return
if target ==0:
res.append(path)
return
for i in range(start,len(candidates)):
self.backtrack(i,candidates,target-candidates[i],path+[candidates[i]],res)
| 27.918033
| 108
| 0.53347
|
657a153b4fc667a1d7e8530a7b8862d6ca0e3327
| 885
|
py
|
Python
|
plugins/qrcode.py
|
PouriaDev/Python-Project
|
537c78a2099374e3280c61ce20458399d258ec60
|
[
"MIT"
] | 15
|
2016-09-25T12:19:52.000Z
|
2020-08-29T00:15:22.000Z
|
plugins/qrcode.py
|
PouriaDev/Python-Project
|
537c78a2099374e3280c61ce20458399d258ec60
|
[
"MIT"
] | 1
|
2017-12-18T01:36:48.000Z
|
2017-12-18T04:57:36.000Z
|
plugins/qrcode.py
|
PouriaDev/Python-Project
|
537c78a2099374e3280c61ce20458399d258ec60
|
[
"MIT"
] | 20
|
2016-09-29T08:50:56.000Z
|
2021-10-03T19:50:00.000Z
|
# -*- coding: utf-8 -*-
@bot.message_handler(commands=['qrcode', 'Qrcode'])
def qr_image(message):
userlang = redisserver.get("settings:user:language:" + str(message.from_user.id))
userid = message.from_user.id
banlist = redisserver.sismember('zigzag_banlist', '{}'.format(userid))
if banlist:
return
if len(message.text.replace("◻️ QR Code", "", 1).split()) < 2:
bot.reply_to(message, language[userlang]["QRCODE_NEA_MSG"], parse_mode="Markdown")
return
argus = message.text.replace("/qrcode ","").replace(" ", "%20")
bot.reply_to(message, "Processing..")
urllib.urlretrieve("http://api.qrserver.com/v1/create-qr-code/?data={}&size=600x600".format(argus), 'qrcode.png')
# print("http://apimeme.com/meme?meme={}&top={}&bottom={}".format(args[0], args[1], args[2]))
bot.send_photo(message.chat.id, open('qrcode.png'), caption=" QR Code by @TheZigZagBot")
| 49.166667
| 115
| 0.680226
|
ac30ba323327540a506c86bb551be81fc5f2667d
| 2,057
|
py
|
Python
|
aoc/d04/main.py
|
klittlepage/aoc2020
|
7135ac08263480a8cc9d6536d7caeb26bf85ae4f
|
[
"MIT"
] | null | null | null |
aoc/d04/main.py
|
klittlepage/aoc2020
|
7135ac08263480a8cc9d6536d7caeb26bf85ae4f
|
[
"MIT"
] | null | null | null |
aoc/d04/main.py
|
klittlepage/aoc2020
|
7135ac08263480a8cc9d6536d7caeb26bf85ae4f
|
[
"MIT"
] | null | null | null |
import itertools as it
import re
from typing import cast, Dict, IO, Iterator, Tuple
import aoc.common as common
_REQUIRED_FIELDS = set(['byr', 'iyr', 'eyr', 'hgt', 'hcl', 'ecl', 'pid'])
def parse(input_file: IO) -> Iterator[Dict[str, str]]:
yield from (dict(cast(Tuple[str, str], tuple(pair.split(':'))) for pair in
it.chain(*(line.split(' ') for line in chunk))) for
chunk in common.read_chunked(input_file))
def fields_present(x: Dict[str, str]) -> bool:
res = _REQUIRED_FIELDS - set(x.keys())
return not res or res == set(['cid'])
def p_1(input_file: IO,
debug=False): # pylint: disable=unused-argument
return sum((fields_present(x) for x in parse(input_file)))
def p_2(input_file: IO,
debug=False): # pylint: disable=unused-argument
year_regex = re.compile(r"^[0-9]{4,4}$")
height_regex = re.compile(r"^([0-9]{2,3})(in|cm)$")
hair_regex = re.compile(r"^#[0-9a-f]{6,6}$")
eye_regex = re.compile(r"^(amb|blu|brn|gry|grn|hzl|oth)$")
pid_regex = re.compile(r"^[0-9]{9,9}$")
# pylint: disable=too-many-return-statements
def is_valid(x: Dict[str, str]) -> bool:
if not (year_regex.match(x['byr']) and 1920 <= int(x['byr']) <= 2002):
return False
if not (year_regex.match(x['byr']) and 2010 <= int(x['iyr']) <= 2020):
return False
if not (year_regex.match(x['byr']) and 2020 <= int(x['eyr']) <= 2030):
return False
if not height_regex.match(x['hgt']):
return False
if x['hgt'].endswith('in') and not 59 <= int(x['hgt'][:-2]) <= 76:
return False
if x['hgt'].endswith('cm') and not 150 <= int(x['hgt'][:-2]) <= 193:
return False
if not hair_regex.match(x['hcl']):
return False
if not eye_regex.match(x['ecl']):
return False
if not pid_regex.match(x['pid']):
return False
return True
return sum((is_valid(x) for x in parse(input_file) if fields_present(x)))
| 34.864407
| 78
| 0.576082
|
dfad5cc708c191bd8b5ee4c551e65196854f86b2
| 3,059
|
py
|
Python
|
bcp/files.py
|
mikealfare/bcp
|
5d43acaa89474cf31648b80f3f0c5b0d4ed2bb2f
|
[
"MIT"
] | 3
|
2019-08-14T15:38:40.000Z
|
2019-11-13T06:15:53.000Z
|
bcp/files.py
|
mikealfare/bcp
|
5d43acaa89474cf31648b80f3f0c5b0d4ed2bb2f
|
[
"MIT"
] | 7
|
2019-08-24T17:16:20.000Z
|
2019-12-27T12:46:33.000Z
|
bcp/files.py
|
mikealfare/bcp
|
5d43acaa89474cf31648b80f3f0c5b0d4ed2bb2f
|
[
"MIT"
] | null | null | null |
"""
This module contains data structures required to create and access files. Users will generally only need to use
DataFile directly. LogFile and ErrorFile are used indirectly by the BCP classes.
Example:
.. code-block:: python
from bcp import DataFile
# create a csv to write out my data
my_file = DataFile(delimiter=',')
print(my_file.path) # %HOME%/bcp/data/<timestamp>.csv
"""
import abc
import datetime
from pathlib import Path
from .config import BCP_LOGGING_DIR, BCP_DATA_DIR
class File(abc.ABC):
"""
This data structure creates a file handle given a file path.
If the file path is not provided:
- the current timestamp is used so that unique error, log, and data files can be created
- the file will be created in the BCP_ROOT_DIR directory specified in config.py
"""
_default_extension = None
_default_directory = None
_file = None
@property
def file(self) -> Path:
return self._file
@file.setter
def file(self, value: Path = None):
"""
This method generates a default file path object if none is provided
Returns:
a Path object that points to the file
"""
if value is not None:
self._file = value
else:
timestamp_format: str = '%Y_%m_%d_%H_%M_%S_%f'
timestamp = datetime.datetime.now()
file_name: str = '.'.join([timestamp.strftime(timestamp_format), self._default_extension])
self._file = self._default_directory / Path(file_name)
@property
def path(self) -> Path:
return self.file.absolute()
class DataFile(File):
"""
This is a handle to a data file.
Args:
file_path: the path object to the file, if not provided, a default using the current timestamp will be created
delimiter: the field delimiter for the data file
"""
def __init__(self, file_path: Path = None, delimiter: str = None):
self._default_directory = BCP_DATA_DIR
self.delimiter = delimiter or '\t'
if self.delimiter == '\t':
self._default_extension = 'tsv'
elif self.delimiter == ',':
self._default_extension = 'csv'
else:
self._default_extension = 'dat'
self.file = file_path
class LogFile(File):
"""
This is a handle to a log file.
Args:
file_path: the path object to the file, if not provided, a default using the current timestamp will be created
"""
def __init__(self, file_path: Path = None):
self._default_directory = BCP_LOGGING_DIR
self._default_extension = 'log'
self.file = file_path
class ErrorFile(File):
"""
This is a handle to an error file.
Args:
file_path: the path object to the file, if not provided, a default using the current timestamp will be created
"""
def __init__(self, file_path: Path = None):
self._default_directory = BCP_DATA_DIR
self._default_extension = 'err'
self.file = file_path
| 29.699029
| 118
| 0.645309
|
da714ae373960a4b32b956c6a70855cb170d489b
| 10,527
|
py
|
Python
|
google/ads/googleads/v9/services/services/keyword_plan_idea_service/transports/grpc.py
|
JakobSteixner/google-ads-python
|
df2b802cc7e78295a4ece21cc7ef3787cd35dab0
|
[
"Apache-2.0"
] | null | null | null |
google/ads/googleads/v9/services/services/keyword_plan_idea_service/transports/grpc.py
|
JakobSteixner/google-ads-python
|
df2b802cc7e78295a4ece21cc7ef3787cd35dab0
|
[
"Apache-2.0"
] | null | null | null |
google/ads/googleads/v9/services/services/keyword_plan_idea_service/transports/grpc.py
|
JakobSteixner/google-ads-python
|
df2b802cc7e78295a4ece21cc7ef3787cd35dab0
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import grpc_helpers
from google.api_core import gapic_v1
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.ads.googleads.v9.services.types import keyword_plan_idea_service
from .base import KeywordPlanIdeaServiceTransport, DEFAULT_CLIENT_INFO
class KeywordPlanIdeaServiceGrpcTransport(KeywordPlanIdeaServiceTransport):
"""gRPC backend transport for KeywordPlanIdeaService.
Service to generate keyword ideas.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._ssl_channel_credentials = ssl_channel_credentials
if channel:
# Sanity check: Ensure that channel and credentials are not both
# provided.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
elif api_mtls_endpoint:
warnings.warn(
"api_mtls_endpoint and client_cert_source are deprecated",
DeprecationWarning,
)
host = (
api_mtls_endpoint
if ":" in api_mtls_endpoint
else api_mtls_endpoint + ":443"
)
if credentials is None:
credentials, _ = google.auth.default(
scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
)
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
ssl_credentials = SslCredentials().ssl_credentials
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
ssl_credentials=ssl_credentials,
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._ssl_channel_credentials = ssl_credentials
else:
host = host if ":" in host else host + ":443"
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES)
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
ssl_credentials=ssl_channel_credentials,
scopes=self.AUTH_SCOPES,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._stubs = {} # type: Dict[str, Callable]
# Run the base constructor.
super().__init__(
host=host, credentials=credentials, client_info=client_info,
)
@classmethod
def create_channel(
cls,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
scopes: Optional[Sequence[str]] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
address (Optionsl[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
scopes=scopes or cls.AUTH_SCOPES,
**kwargs,
)
def close(self):
self.grpc_channel.close()
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def generate_keyword_ideas(
self,
) -> Callable[
[keyword_plan_idea_service.GenerateKeywordIdeasRequest],
keyword_plan_idea_service.GenerateKeywordIdeaResponse,
]:
r"""Return a callable for the generate keyword ideas method over gRPC.
Returns a list of keyword ideas.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `CollectionSizeError <>`__
`HeaderError <>`__ `InternalError <>`__
`KeywordPlanIdeaError <>`__ `QuotaError <>`__
`RequestError <>`__
Returns:
Callable[[~.GenerateKeywordIdeasRequest],
~.GenerateKeywordIdeaResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "generate_keyword_ideas" not in self._stubs:
self._stubs[
"generate_keyword_ideas"
] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v9.services.KeywordPlanIdeaService/GenerateKeywordIdeas",
request_serializer=keyword_plan_idea_service.GenerateKeywordIdeasRequest.serialize,
response_deserializer=keyword_plan_idea_service.GenerateKeywordIdeaResponse.deserialize,
)
return self._stubs["generate_keyword_ideas"]
__all__ = ("KeywordPlanIdeaServiceGrpcTransport",)
| 41.608696
| 104
| 0.624204
|
d6d97c623bb58f49ff38bfceec2bf7a18332e877
| 1,053
|
py
|
Python
|
neptune/new/integrations/pytorch_lightning/__init__.py
|
Raalsky/neptune-client
|
24ac58581774e61056d49cd1a22727799c14ad54
|
[
"Apache-2.0"
] | 254
|
2020-01-27T14:18:57.000Z
|
2022-03-31T21:40:33.000Z
|
neptune/new/integrations/pytorch_lightning/__init__.py
|
Raalsky/neptune-client
|
24ac58581774e61056d49cd1a22727799c14ad54
|
[
"Apache-2.0"
] | 160
|
2020-02-05T11:00:22.000Z
|
2022-03-31T08:50:24.000Z
|
neptune/new/integrations/pytorch_lightning/__init__.py
|
Raalsky/neptune-client
|
24ac58581774e61056d49cd1a22727799c14ad54
|
[
"Apache-2.0"
] | 23
|
2020-02-07T09:19:50.000Z
|
2022-02-15T09:52:56.000Z
|
#
# Copyright (c) 2021, Neptune Labs Sp. z o.o.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
try:
# pylint: disable=import-error
from pytorch_lightning.loggers import NeptuneLogger
except ModuleNotFoundError as e:
if e.name == "pytorch_lightning":
from neptune.new.exceptions import NeptuneIntegrationNotInstalledException
raise NeptuneIntegrationNotInstalledException(
integration_package_name="pytorch-lightning",
framework_name="pytorch-lightning",
) from None
else:
raise
| 35.1
| 82
| 0.733143
|
df7c555603b8d0443283d4e4880ee6ed5aba9ae0
| 1,344
|
py
|
Python
|
setup.py
|
morganbye/finances_at_home
|
03896e5e84112843f88c99cb4639af7f9bf81550
|
[
"MIT"
] | null | null | null |
setup.py
|
morganbye/finances_at_home
|
03896e5e84112843f88c99cb4639af7f9bf81550
|
[
"MIT"
] | null | null | null |
setup.py
|
morganbye/finances_at_home
|
03896e5e84112843f88c99cb4639af7f9bf81550
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from setuptools import setup, find_packages
with open('README.md') as readme_file:
readme = readme_file.read()
with open('HISTORY.md') as history_file:
history = history_file.read()
requirements = [ ]
setup_requirements = ['pytest-runner', ]
test_requirements = ['pytest', ]
setup(
author="Morgan Bye",
author_email='morgan@morganbye.com',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
description="Everything to do my finances at home",
install_requires=requirements,
license="GNU General Public License v3",
long_description=readme + '\n\n' + history,
include_package_data=True,
keywords='finances_at_home',
name='finances_at_home',
packages=find_packages(include=['finances_at_home']),
setup_requires=setup_requirements,
test_suite='tests',
tests_require=test_requirements,
url='https://github.com/morganbye/finances_at_home',
version='0.1.0',
zip_safe=False,
)
| 28.595745
| 75
| 0.665923
|
c338be8224a07c2e4ae1e1e5cd96992f6f9b1ab2
| 21,294
|
py
|
Python
|
open_worm_analysis_toolbox/statistics/histogram.py
|
suzil/open-worm-analysis-toolbox
|
ea1d3cddcefb6724e3d531cbced32092ac431411
|
[
"MIT"
] | 35
|
2016-03-02T06:32:16.000Z
|
2021-12-17T22:57:22.000Z
|
open_worm_analysis_toolbox/statistics/histogram.py
|
suzil/open-worm-analysis-toolbox
|
ea1d3cddcefb6724e3d531cbced32092ac431411
|
[
"MIT"
] | 80
|
2015-12-30T21:55:57.000Z
|
2021-03-13T10:38:47.000Z
|
open_worm_analysis_toolbox/statistics/histogram.py
|
suzil/open-worm-analysis-toolbox
|
ea1d3cddcefb6724e3d531cbced32092ac431411
|
[
"MIT"
] | 17
|
2016-02-19T09:25:39.000Z
|
2021-12-17T22:57:33.000Z
|
# -*- coding: utf-8 -*-
"""
TODO: Most of the attributes here are not computed initially.
We should add a paragraph or two regarding design decisions made here
This code is generally interfaced with by the HistogramManager.
The Histogram class, and its subclass, MergedHistogram:
MergedHistoram contains a constructor that accepts a list
of Histogram objects
Notes
-------------------------
Formerly SegwormMatlabClasses / +seg_worm / +stats / @hist / hist.m
https://github.com/JimHokanson/SegwormMatlabClasses/blob/master/
%2Bseg_worm/%2Bstats/%40hist/hist.m
"""
import scipy as sp
import numpy as np
from .. import config, utils
#%%
class Histogram(object):
"""
Encapsulates the notion of a single histogram for a single feature.
All bins in this histogram have an equal bin width.
A Histogram object can be created by one of two class methods:
1. create_histogram factory method, which accepts raw data.
2. merged_histogram_factory method, which accepts a list of
histograms and then returns a new, merged, histogram from them.
Attributes
-----------------
name
data: numpy array - this may contain NaN values
specs: open_worm_analysis_toolbox.features.worm_features.FeatureProcessingSpec
histogram_type: str
motion_type: str
data_type: str
counts: numpy array of ints
pdf: numpy array of floats
mean: float
num_samples: int
bin_boundaries: numpy array
bin_midpoints: numpy array
first_bin_midpoint: float
last_bin_midpoint: float
Notes
-----------------
TODO: Missing Features:
- saving to disk
- version comparison
- allow loading from a saved file
"""
#%%
def __init__(self, feature):
"""
Initializer
Parameters
----------
data: numpy array
The data to be counted for the histogram
specs: instance of Specs class
histogram_type: string
histogram type # 'motion', 'simple', 'event
motion_type: string
# 'all', 'forward', 'paused', 'backward'
data_type: string
# 'all', 'absolute', 'postive', 'negative'
This is an additional filter on the values of the data.
Either all values are included or:
- the absolute values are used
- only positive values are used
- only negative values are used
"""
"""
Example Feature:
name: morphology.length
value: Type::ndarray, Len: 4642
computation_time: 0.0
is_temporary: False
spec: Type::FeatureProcessingSpec, Len: 1
is_user_requested: True
missing_from_disk: False
missing_dependency: False
empty_video: False
no_events: False
"""
# The underlying data itself
self.data = feature.value
#JAH: Any requirements on the data???
# - ideally we would have a NaN version here
# - self.valid_data
#open_worm_analysis_toolbox.features.worm_features.FeatureProcessingSpec
self.specs = feature.spec
self.name = self.specs.name
# JAH TODO: Should this be added to the spec in the expansion?
# TODO: This should also work without expanded features, so this
# would need to be in the default spec as well (or have defaults in
# the code)
#
# Maybe this could be made generic such as "feature_manipulations"
# with a list of strings that get concatenated together
#
# Yes - although the name has been made unique by feature expansion
# (if done)
if self.data is not None:
# Find a set of bins that will cover the data
# i.e. populate self.bin_boundaries
self.compute_covering_bins()
#%%
@classmethod
def create_histogram(cls, feature):
"""
Factory method to create a Histogram instance.
The only thing this does beyond the Histogram constructor is
to check if the data is empty.
Parameters
------------------
data: numpy array
specs: instance of Specs class
histogram_type:
motion_type:
data_type:
Returns
------------------
An instance of the Histogram class, prepared with
the data provided.
Notes
------------------
Formerly located in HistogramManager, and called:
function obj = h__createIndividualObject(self, data, specs,
histogram_type, motion_type,
data_type)
"""
data = feature.value
if data is None or not isinstance(data, np.ndarray) or data.size == 0:
#Is this what we want??? - what about just keeping meta data???
return None
else:
return cls(feature)
#%%
@property
def num_samples(self):
try:
return self._num_samples
except AttributeError:
self._num_samples = len(self.data)
return self._num_samples
def __repr__(self):
return utils.print_object(self)
@property
def description(self):
"""
A longer version of the name, suitable for use as the title
of a histogram plot.
"""
return (self.specs.long_field + ' ' +
', motion_type:' + self.motion_type +
', data_type: ' + self.data_type)
@property
def first_bin_midpoint(self):
return self.bin_midpoints[0]
@property
def last_bin_midpoint(self):
return self.bin_midpoints[-1]
@property
def num_bins(self):
"""
An integer; the number of bins.
"""
return len(self.bin_midpoints)
#%%
def compute_covering_bins(self):
"""
Compute histogram bin boundaries that will be enough to cover
the given data
Parameters
----------
None, but we will use member variables:
self.data: numpy array
This is the data for which we must have enough bins to cover
self.bin_width: float
The width of the bins
Returns
-------
None
However, self.bin_boundaries, a numpy array, is populated.
The bin_boundaries are the boundaries of the bins that will
accomodate the data given.
All bins are right half-open except the last, which is closed.
i.e. if the array edges = (a1, a2, ..., a(n+1) was returned,
there are n bins and
bin #1 = [a1, a2)
bin #2 = [a2, a3)
...
bin #n = [an, an+1]
Notes
-----
This version may have an extra bin than the previous version but
this one is MUCH simpler and merging should be much simpler as edges
should always align ...
- min -65.4
- max 20.01
- bin_width 1
- Old:
- boundaries -65.5 to 20.5
- New:
- boundaries -70 to 21
Formerly:
function [bins,edges] = h__computeBinInfo(data,bin_width)
"""
bin_width = self.specs.bin_width
# Compute the data range. We apply np.ravel because for some reason
# with posture.bends.head.mean the data was coming in like:
# >> self.data
# array([[-33.1726576 ], [-33.8501644 ],[-32.60058523], ...])
# Applying ravel removes any extraneous array structure so it becomes:
# array([-33.1726576, -33.8501644, -32.60058523, ...])
min_data = np.nanmin(np.ravel(self.data))
max_data = np.nanmax(np.ravel(self.data))
#Is this valid??????
#JAH 2018/09 - this is a bit of a patch ...
#No valid data gives min and max as NaNs
if np.isnan(min_data):
#Not for arange exact stopping is not respected ...
self.bin_boundaries = np.arange(0,1.01*bin_width,bin_width)
return
# Let's "snap the bins to a grid" if you will, so that they will
# line up when we try to merge multiple histograms later.
# so if the bin_width = 2 and the min_data = 11, we will
# start the first bin at 10, since that is a multiple of the
# bin width.
min_boundary = np.floor(min_data / bin_width) * bin_width
max_boundary = np.ceil(max_data / bin_width) * bin_width
# If we have a singular value, then we will get a singular edge,
# which isn't good for binning. We always need to make sure that
# our data is bounded by a high and low end. Given how hist works
# (it is inclusive on the low end, when we only have one edge we
# add a second edge by increasing the high end, NOT by decreasing
# the low end.
#
# i.e. In Matlab you can't bound 3 by having edges at 2 & 3, the
# edges would need to be at 3 & 4
if min_boundary == max_boundary:
max_boundary = min_boundary + bin_width
num_bins = (max_boundary - min_boundary) / bin_width
if num_bins > config.MAX_NUMBER_BINS:
raise Exception("Given the specified resolution of " +
str(bin_width) + ", the number of data " +
"bins exceeds the maximum, which has been " +
"set to MAX_NUMBER_BINS = " +
str(config.MAX_NUMBER_BINS))
self.bin_boundaries = np.arange(min_boundary,
max_boundary + bin_width,
step=bin_width)
# Because of the nature of floating point figures we can't guarantee
# that these asserts work without the extra buffer of + self.bin_width
# (though this bound could probably be greatly improved)
assert(min_data >= self.bin_boundaries[0] - bin_width)
assert(max_data <= self.bin_boundaries[-1] + bin_width)
@property
def bin_width(self):
"""
float
"""
return self.specs.bin_width
@property
def bin_midpoints(self):
"""
Return a numpy array of the midpoints of all the bins.
"""
try:
return self._bin_midpoints
except AttributeError:
self._bin_midpoints = (self.bin_boundaries[:-1] +
self.specs.bin_width / 2)
return self._bin_midpoints
@property
def counts(self):
"""
The actual counts for the bins given the data.
Returns
----------------
numpy array of int
The values of the histogram
"""
try:
return self._counts
except AttributeError:
try:
self._counts, _ = np.histogram(self.data,
bins=self.bin_boundaries)
except ValueError:
self._counts, _ = np.histogram(self.data[~np.isnan(self.data)],
bins=self.bin_boundaries)
return self._counts
@property
def pdf(self):
"""
The probability distribution function (PDF).
A numpy array.
"""
try:
return self._pdf
except AttributeError:
if sum(self.counts) == 0:
# Handle the divide-by-zero case
self._pdf = None
else:
self._pdf = self.counts / sum(self.counts)
return self._pdf
#%%
@property
def mean(self):
"""
A float. The mean of the data.
"""
try:
return self._mean
except AttributeError:
self._mean = np.nanmean(self.data)
return self._mean
@property
def std(self):
"""
The standard deviation.
"""
try:
return self._std
except AttributeError:
num_samples = len(self.data)
if num_samples == 1:
self._std = 0
else:
self._std = np.nanstd(self.data)
#JAH 2018/09 => not handling nan properly, using nanstd instead
# We can optimize standard deviation computation
# since we've already calculated the mean above
#self._std = np.sqrt \
# (
# (1 / (num_samples - 1)) *
# sum((self.data - self.mean)**2)
# )
return self._std
@property
def num_videos(self):
"""
Return 1 since this is a simple Histogram, not one created from
the merging of multiple underlying Histograms.
"""
return 1
###############################################################################
#%%
class MergedHistogram(Histogram):
"""
A Histogram, plus some extra data about the individual histograms
that make it up.
Extra attributes:
--------------------
name
mean_per_video: numpy array of floats
The means of the original constituent histograms making up
this merged histogram.
std_per_video: numpy array of floats
Same as mean_per_video but for standard deviation.
num_samples_per_video: numpy array of ints
num_videos: int
num_valid_videos: int
all_videos_valid: bool
no_valid_videos: bool
p_normal: float
"""
def __init__(self, specs):
self.data = None
self.specs = specs
#super(MergedHistogram, self).__init__(data, specs)
#%%
@classmethod
def merged_histogram_factory(cls, histograms):
"""
Given a list of histograms, return a new Histogram instance
with all the bin counts merged.
This method assumes the specs and histogram_type, motion_type and
data_type for all the histograms in histograms are the same.
This method can merge histograms that are computed using different
bins. IMPORTANTLY, because of the way that we do the bin
definitions, bin edges will always match if they are close, i.e.
we'll NEVER have:
edges 1: 1,2,3,4,5
edges 2: 3.5,4.5,5.5,6.5
Instead we might have:
edges 2: 3,4,5,6,7,8 (i.e. edges align - in this case they share
data between 3-4 and 4-5)
This simplifies the merging process a bit. This is accomplished by
always setting bin edges at multiples of the bin_width. This was
not done in the original Schafer Lab code.
Parameters
------------------
histograms: a list of Histogram objects
Returns
------------------
A Histogram object or None
See Also
--------
HistogramManager.merge_histograms
"""
#Note: Some histograms may be none ...
histograms = [x for x in histograms if x is not None]
if len(histograms) == 0:
return None
# Create an output object with same meta properties
merged_hist = cls(specs=histograms[0].specs)
merged_hist.name = histograms[0].name
# Let's concatenate all the underlying data in case anyone downstream
# wants to see it. It's not needed for the bin and count calculation,
# since we do that efficiently by aligning the bins.
merged_hist.data = np.concatenate([x.data for x in histograms])
# Align all bins
# ---------------------------------------------------------------
num_bins = [x.num_bins for x in histograms]
first_bin_midpoints = [x.first_bin_midpoint for x in histograms]
min_bin_midpoint = min(first_bin_midpoints)
max_bin_midpoint = max([x.last_bin_midpoint for x in histograms])
cur_bin_width = merged_hist.specs.bin_width
new_bin_midpoints = np.arange(min_bin_midpoint,
max_bin_midpoint + cur_bin_width,
step=cur_bin_width)
# Colon operator was giving warnings about non-integer indices :/
# - @JimHokanson
start_indices = ((first_bin_midpoints - min_bin_midpoint) /
cur_bin_width)
start_indices = start_indices.round()
end_indices = start_indices + num_bins
num_histograms = len(histograms)
new_counts = np.zeros((num_histograms, len(new_bin_midpoints)))
start_indices = start_indices.astype(np.int64)
end_indices = end_indices.astype(np.int64)
for i in range(num_histograms):
cur_start = start_indices[i]
if not np.isnan(cur_start):
cur_end = end_indices[i]
new_counts[i, cur_start:cur_end] = histograms[i].counts
num_samples_array = np.array([x.num_samples for x in histograms])
# Update final properties
# Note that each of these is now no longer a scalar as in the
# single-video case; it is now a numpy array
# ---------------------------------------------------------------
merged_hist._bin_midpoints = new_bin_midpoints
merged_hist._counts = new_counts
merged_hist._pdf = (sum(merged_hist._counts, 0) /
sum(num_samples_array))
merged_hist.mean_per_video = \
np.array([x.mean for x in histograms])
merged_hist.std_per_video = \
np.array([x.std for x in histograms])
merged_hist.num_samples_per_video = \
np.array([x.num_samples for x in histograms])
merged_hist._num_samples = sum(merged_hist.num_samples_per_video)
return merged_hist
@property
def mean(self):
try:
return self._mean
except AttributeError:
self._mean = np.mean(self.mean_per_video)
return self._mean
@property
def std(self):
try:
return self._std
except AttributeError:
# TODO
# I'm pretty sure this is not very good; we should be calculating
# standard deviation from a concatenation of the underlying data,
# not from just the means of the videos. - @MichaelCurrie
self._std = np.std(self.mean_per_video)
return self._std
@property
def valid_videos_mask(self):
return ~np.isnan(self.mean_per_video)
@property
def valid_mean_per_video(self):
return self.mean_per_video[self.valid_videos_mask]
@property
def num_videos(self):
"""
The number of videos that this instance contains.
"""
return len(self.mean_per_video)
@property
def num_valid_videos(self):
"""
Returns
-----------
int
The number of non-NaN means across all stored video means.
Notes
-----------
Also known as num_samples, or n_samples.
"""
return sum(~np.isnan(self.mean_per_video))
@property
def all_videos_valid(self):
"""
Returns
------------------
boolean
True if there are no NaN means
False if there is even one NaN mean
"""
return all(~np.isnan(self.mean_per_video))
@property
def no_valid_videos(self):
"""
Returns
-----------
bool
True if there was at least one video that wasn't NaN for this
Histogram.
"""
return self.num_valid_videos == 0
@property
def p_normal(self):
"""
Shapiro-Wilk normality test:
Estimate of the probability that the video means were drawn from
a normal distribution.
Returns
-----------
float
Notes
-----------
Formerly:
seg_worm.fex.swtest(data(i).dataMeans, 0.05, 0)
"""
# The try-except structure allows for lazy evaluation: we only
# compute the value the first time it's asked for, and then never
# again.
try:
return self._p_normal
except AttributeError:
if self.num_valid_measurements < 3:
self._p_normal = np.NaN
else:
# Shapiro-Wilk parametric hypothsis test of composite normality.
# i.e. test the null hypothesis that the data was drawn from
# a normal distribution.
# Note: this is a two-sided test.
# The previous method was to use swtest(x, 0.05, 0) from
# Matlab Central: http://www.mathworks.com/matlabcentral/
# fileexchange/46548-hockey-stick-and-climate-change/
# content/Codes_data_publish/Codes/swtest.m
_, self._p_normal = sp.stats.shapiro(self.mean_per_video)
return self._p_normal
| 30.995633
| 82
| 0.563821
|
c3aa9597fa73edb1c24f7041d3d166875333b97b
| 18,773
|
py
|
Python
|
google/cloud/dialogflow/cx/v3beta1/dialogflow-cx-v3beta1-py/google/cloud/dialogflowcx_v3beta1/types/environment.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 7
|
2021-02-21T10:39:41.000Z
|
2021-12-07T07:31:28.000Z
|
google/cloud/dialogflow/cx/v3beta1/dialogflow-cx-v3beta1-py/google/cloud/dialogflowcx_v3beta1/types/environment.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 6
|
2021-02-02T23:46:11.000Z
|
2021-11-15T01:46:02.000Z
|
google/cloud/dialogflow/cx/v3beta1/dialogflow-cx-v3beta1-py/google/cloud/dialogflowcx_v3beta1/types/environment.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 4
|
2021-01-28T23:25:45.000Z
|
2021-08-30T01:55:16.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.dialogflowcx_v3beta1.types import test_case
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(
package='google.cloud.dialogflow.cx.v3beta1',
manifest={
'Environment',
'ListEnvironmentsRequest',
'ListEnvironmentsResponse',
'GetEnvironmentRequest',
'CreateEnvironmentRequest',
'UpdateEnvironmentRequest',
'DeleteEnvironmentRequest',
'LookupEnvironmentHistoryRequest',
'LookupEnvironmentHistoryResponse',
'ContinuousTestResult',
'RunContinuousTestRequest',
'RunContinuousTestResponse',
'RunContinuousTestMetadata',
'ListContinuousTestResultsRequest',
'ListContinuousTestResultsResponse',
'DeployFlowRequest',
'DeployFlowResponse',
'DeployFlowMetadata',
},
)
class Environment(proto.Message):
r"""Represents an environment for an agent. You can create
multiple versions of your agent and publish them to separate
environments. When you edit an agent, you are editing the draft
agent. At any point, you can save the draft agent as an agent
version, which is an immutable snapshot of your agent. When you
save the draft agent, it is published to the default
environment. When you create agent versions, you can publish
them to custom environments. You can create a variety of custom
environments for testing, development, production, etc.
Attributes:
name (str):
The name of the environment. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/environments/<Environment ID>``.
display_name (str):
Required. The human-readable name of the
environment (unique in an agent). Limit of 64
characters.
description (str):
The human-readable description of the
environment. The maximum length is 500
characters. If exceeded, the request is
rejected.
version_configs (Sequence[google.cloud.dialogflowcx_v3beta1.types.Environment.VersionConfig]):
Required. A list of configurations for flow versions. You
should include version configs for all flows that are
reachable from [``Start Flow``][Agent.start_flow] in the
agent. Otherwise, an error will be returned.
update_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Update time of this environment.
test_cases_config (google.cloud.dialogflowcx_v3beta1.types.Environment.TestCasesConfig):
The test cases config for continuous tests of
this environment.
"""
class VersionConfig(proto.Message):
r"""Configuration for the version.
Attributes:
version (str):
Required. Format: projects/<Project
ID>/locations/<Location ID>/agents/<Agent
ID>/flows/<Flow ID>/versions/<Version ID>.
"""
version = proto.Field(
proto.STRING,
number=1,
)
class TestCasesConfig(proto.Message):
r"""The configuration for continuous tests.
Attributes:
test_cases (Sequence[str]):
A list of test case names to run. They should be under the
same agent. Format of each test case name:
``projects/<Project ID>/locations/ <Location ID>/agents/<AgentID>/testCases/<TestCase ID>``
enable_continuous_run (bool):
Whether to run test cases in
[TestCasesConfig.test_cases][google.cloud.dialogflow.cx.v3beta1.Environment.TestCasesConfig.test_cases]
periodically. Default false. If set to true, run once a day.
enable_predeployment_run (bool):
Whether to run test cases in
[TestCasesConfig.test_cases][google.cloud.dialogflow.cx.v3beta1.Environment.TestCasesConfig.test_cases]
before deploying a flow version to the environment. Default
false.
"""
test_cases = proto.RepeatedField(
proto.STRING,
number=1,
)
enable_continuous_run = proto.Field(
proto.BOOL,
number=2,
)
enable_predeployment_run = proto.Field(
proto.BOOL,
number=3,
)
name = proto.Field(
proto.STRING,
number=1,
)
display_name = proto.Field(
proto.STRING,
number=2,
)
description = proto.Field(
proto.STRING,
number=3,
)
version_configs = proto.RepeatedField(
proto.MESSAGE,
number=6,
message=VersionConfig,
)
update_time = proto.Field(
proto.MESSAGE,
number=5,
message=timestamp_pb2.Timestamp,
)
test_cases_config = proto.Field(
proto.MESSAGE,
number=7,
message=TestCasesConfig,
)
class ListEnvironmentsRequest(proto.Message):
r"""The request message for
[Environments.ListEnvironments][google.cloud.dialogflow.cx.v3beta1.Environments.ListEnvironments].
Attributes:
parent (str):
Required. The
[Agent][google.cloud.dialogflow.cx.v3beta1.Agent] to list
all environments for. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>``.
page_size (int):
The maximum number of items to return in a
single page. By default 20 and at most 100.
page_token (str):
The next_page_token value returned from a previous list
request.
"""
parent = proto.Field(
proto.STRING,
number=1,
)
page_size = proto.Field(
proto.INT32,
number=2,
)
page_token = proto.Field(
proto.STRING,
number=3,
)
class ListEnvironmentsResponse(proto.Message):
r"""The response message for
[Environments.ListEnvironments][google.cloud.dialogflow.cx.v3beta1.Environments.ListEnvironments].
Attributes:
environments (Sequence[google.cloud.dialogflowcx_v3beta1.types.Environment]):
The list of environments. There will be a maximum number of
items returned based on the page_size field in the request.
The list may in some cases be empty or contain fewer entries
than page_size even if this isn't the last page.
next_page_token (str):
Token to retrieve the next page of results,
or empty if there are no more results in the
list.
"""
@property
def raw_page(self):
return self
environments = proto.RepeatedField(
proto.MESSAGE,
number=1,
message='Environment',
)
next_page_token = proto.Field(
proto.STRING,
number=2,
)
class GetEnvironmentRequest(proto.Message):
r"""The request message for
[Environments.GetEnvironment][google.cloud.dialogflow.cx.v3beta1.Environments.GetEnvironment].
Attributes:
name (str):
Required. The name of the
[Environment][google.cloud.dialogflow.cx.v3beta1.Environment].
Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/environments/<Environment ID>``.
"""
name = proto.Field(
proto.STRING,
number=1,
)
class CreateEnvironmentRequest(proto.Message):
r"""The request message for
[Environments.CreateEnvironment][google.cloud.dialogflow.cx.v3beta1.Environments.CreateEnvironment].
Attributes:
parent (str):
Required. The
[Agent][google.cloud.dialogflow.cx.v3beta1.Agent] to create
an
[Environment][google.cloud.dialogflow.cx.v3beta1.Environment]
for. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>``.
environment (google.cloud.dialogflowcx_v3beta1.types.Environment):
Required. The environment to create.
"""
parent = proto.Field(
proto.STRING,
number=1,
)
environment = proto.Field(
proto.MESSAGE,
number=2,
message='Environment',
)
class UpdateEnvironmentRequest(proto.Message):
r"""The request message for
[Environments.UpdateEnvironment][google.cloud.dialogflow.cx.v3beta1.Environments.UpdateEnvironment].
Attributes:
environment (google.cloud.dialogflowcx_v3beta1.types.Environment):
Required. The environment to update.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Required. The mask to control which fields
get updated.
"""
environment = proto.Field(
proto.MESSAGE,
number=1,
message='Environment',
)
update_mask = proto.Field(
proto.MESSAGE,
number=2,
message=field_mask_pb2.FieldMask,
)
class DeleteEnvironmentRequest(proto.Message):
r"""The request message for
[Environments.DeleteEnvironment][google.cloud.dialogflow.cx.v3beta1.Environments.DeleteEnvironment].
Attributes:
name (str):
Required. The name of the
[Environment][google.cloud.dialogflow.cx.v3beta1.Environment]
to delete. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/environments/<Environment ID>``.
"""
name = proto.Field(
proto.STRING,
number=1,
)
class LookupEnvironmentHistoryRequest(proto.Message):
r"""The request message for
[Environments.LookupEnvironmentHistory][google.cloud.dialogflow.cx.v3beta1.Environments.LookupEnvironmentHistory].
Attributes:
name (str):
Required. Resource name of the environment to look up the
history for. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/environments/<Environment ID>``.
page_size (int):
The maximum number of items to return in a
single page. By default 100 and at most 1000.
page_token (str):
The next_page_token value returned from a previous list
request.
"""
name = proto.Field(
proto.STRING,
number=1,
)
page_size = proto.Field(
proto.INT32,
number=2,
)
page_token = proto.Field(
proto.STRING,
number=3,
)
class LookupEnvironmentHistoryResponse(proto.Message):
r"""The response message for
[Environments.LookupEnvironmentHistory][google.cloud.dialogflow.cx.v3beta1.Environments.LookupEnvironmentHistory].
Attributes:
environments (Sequence[google.cloud.dialogflowcx_v3beta1.types.Environment]):
Represents a list of snapshots for an environment. Time of
the snapshots is stored in
[``update_time``][google.cloud.dialogflow.cx.v3beta1.Environment.update_time].
next_page_token (str):
Token to retrieve the next page of results,
or empty if there are no more results in the
list.
"""
@property
def raw_page(self):
return self
environments = proto.RepeatedField(
proto.MESSAGE,
number=1,
message='Environment',
)
next_page_token = proto.Field(
proto.STRING,
number=2,
)
class ContinuousTestResult(proto.Message):
r"""Represents a result from running a test case in an agent
environment.
Attributes:
name (str):
The resource name for the continuous test result. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/environments/<Environment ID>/continuousTestResults/<ContinuousTestResult ID>``.
result (google.cloud.dialogflowcx_v3beta1.types.ContinuousTestResult.AggregatedTestResult):
The result of this continuous test run, i.e.
whether all the tests in this continuous test
run pass or not.
test_case_results (Sequence[str]):
A list of individual test case results names
in this continuous test run.
run_time (google.protobuf.timestamp_pb2.Timestamp):
Time when the continuous testing run starts.
"""
class AggregatedTestResult(proto.Enum):
r"""The overall result for a continuous test run in an agent
environment.
"""
AGGREGATED_TEST_RESULT_UNSPECIFIED = 0
PASSED = 1
FAILED = 2
name = proto.Field(
proto.STRING,
number=1,
)
result = proto.Field(
proto.ENUM,
number=2,
enum=AggregatedTestResult,
)
test_case_results = proto.RepeatedField(
proto.STRING,
number=3,
)
run_time = proto.Field(
proto.MESSAGE,
number=4,
message=timestamp_pb2.Timestamp,
)
class RunContinuousTestRequest(proto.Message):
r"""The request message for
[Environments.RunContinuousTest][google.cloud.dialogflow.cx.v3beta1.Environments.RunContinuousTest].
Attributes:
environment (str):
Required. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/environments/<Environment ID>``.
"""
environment = proto.Field(
proto.STRING,
number=1,
)
class RunContinuousTestResponse(proto.Message):
r"""The response message for
[Environments.RunContinuousTest][google.cloud.dialogflow.cx.v3beta1.Environments.RunContinuousTest].
Attributes:
continuous_test_result (google.cloud.dialogflowcx_v3beta1.types.ContinuousTestResult):
The result for a continuous test run.
"""
continuous_test_result = proto.Field(
proto.MESSAGE,
number=1,
message='ContinuousTestResult',
)
class RunContinuousTestMetadata(proto.Message):
r"""Metadata returned for the
[Environments.RunContinuousTest][google.cloud.dialogflow.cx.v3beta1.Environments.RunContinuousTest]
long running operation.
Attributes:
errors (Sequence[google.cloud.dialogflowcx_v3beta1.types.TestError]):
The test errors.
"""
errors = proto.RepeatedField(
proto.MESSAGE,
number=1,
message=test_case.TestError,
)
class ListContinuousTestResultsRequest(proto.Message):
r"""The request message for
[Environments.ListContinuousTestResults][google.cloud.dialogflow.cx.v3beta1.Environments.ListContinuousTestResults].
Attributes:
parent (str):
Required. The environment to list results for. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/ environments/<Environment ID>``.
page_size (int):
The maximum number of items to return in a
single page. By default 100 and at most 1000.
page_token (str):
The next_page_token value returned from a previous list
request.
"""
parent = proto.Field(
proto.STRING,
number=1,
)
page_size = proto.Field(
proto.INT32,
number=2,
)
page_token = proto.Field(
proto.STRING,
number=3,
)
class ListContinuousTestResultsResponse(proto.Message):
r"""The response message for [Environments.ListTestCaseResults][].
Attributes:
continuous_test_results (Sequence[google.cloud.dialogflowcx_v3beta1.types.ContinuousTestResult]):
The list of continuous test results.
next_page_token (str):
Token to retrieve the next page of results,
or empty if there are no more results in the
list.
"""
@property
def raw_page(self):
return self
continuous_test_results = proto.RepeatedField(
proto.MESSAGE,
number=1,
message='ContinuousTestResult',
)
next_page_token = proto.Field(
proto.STRING,
number=2,
)
class DeployFlowRequest(proto.Message):
r"""The request message for
[Environments.DeployFlow][google.cloud.dialogflow.cx.v3beta1.Environments.DeployFlow].
Attributes:
environment (str):
Required. The environment to deploy the flow to. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/ environments/<Environment ID>``.
flow_version (str):
Required. The flow version to deploy. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/ flows/<Flow ID>/versions/<Version ID>``.
"""
environment = proto.Field(
proto.STRING,
number=1,
)
flow_version = proto.Field(
proto.STRING,
number=2,
)
class DeployFlowResponse(proto.Message):
r"""The response message for
[Environments.DeployFlow][google.cloud.dialogflow.cx.v3beta1.Environments.DeployFlow].
Attributes:
environment (google.cloud.dialogflowcx_v3beta1.types.Environment):
The updated environment where the flow is
deployed.
deployment (str):
The name of the flow version deployment. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/ environments/<Environment ID>/deployments/<Deployment ID>``.
"""
environment = proto.Field(
proto.MESSAGE,
number=1,
message='Environment',
)
deployment = proto.Field(
proto.STRING,
number=2,
)
class DeployFlowMetadata(proto.Message):
r"""Metadata returned for the
[Environments.DeployFlow][google.cloud.dialogflow.cx.v3beta1.Environments.DeployFlow]
long running operation.
Attributes:
test_errors (Sequence[google.cloud.dialogflowcx_v3beta1.types.TestError]):
Errors of running deployment tests.
"""
test_errors = proto.RepeatedField(
proto.MESSAGE,
number=1,
message=test_case.TestError,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| 31.926871
| 158
| 0.642891
|
9547e1617998802f3d5812e8a863227c7b5d1313
| 1,422
|
py
|
Python
|
config/settings/test.py
|
udrems/cyberexperiment
|
9ad9c9580cd9dd1c28c0d8e27975f5bca530e101
|
[
"MIT"
] | null | null | null |
config/settings/test.py
|
udrems/cyberexperiment
|
9ad9c9580cd9dd1c28c0d8e27975f5bca530e101
|
[
"MIT"
] | 4
|
2021-09-10T16:21:53.000Z
|
2021-09-17T18:04:22.000Z
|
config/settings/test.py
|
udrems/cyberexperiment
|
9ad9c9580cd9dd1c28c0d8e27975f5bca530e101
|
[
"MIT"
] | 2
|
2021-09-08T07:50:59.000Z
|
2021-09-08T16:15:26.000Z
|
"""
With these settings, tests run faster.
"""
from .base import * # noqa
from .base import env
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env(
"DJANGO_SECRET_KEY",
default="Q6IK3vG2IT3tPPJ27SSG2P42HeI2qiZKcaUhlmmYGIGiAKUKvlKuzUeDctQK7xCm",
)
# https://docs.djangoproject.com/en/dev/ref/settings/#test-runner
TEST_RUNNER = "django.test.runner.DiscoverRunner"
# PASSWORDS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#password-hashers
PASSWORD_HASHERS = ["django.contrib.auth.hashers.MD5PasswordHasher"]
# TEMPLATES
# ------------------------------------------------------------------------------
TEMPLATES[-1]["OPTIONS"]["loaders"] = [ # type: ignore[index] # noqa F405
(
"django.template.loaders.cached.Loader",
[
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
)
]
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = "django.core.mail.backends.locmem.EmailBackend"
# Your stuff...
# ------------------------------------------------------------------------------
| 33.857143
| 80
| 0.510549
|
ce9b43f1ad0f8cc84fb01c56e69f9cfc71d0b87a
| 9,157
|
py
|
Python
|
tools/marvin/marvin/marvinInit.py
|
redbridge/cloudstack
|
2218053fb11d501950e4beb80e9bee4ae472b5b4
|
[
"ECL-2.0",
"Apache-2.0"
] | 2
|
2015-02-10T07:21:58.000Z
|
2021-05-07T08:52:17.000Z
|
tools/marvin/marvin/marvinInit.py
|
redbridge/cloudstack
|
2218053fb11d501950e4beb80e9bee4ae472b5b4
|
[
"ECL-2.0",
"Apache-2.0"
] | 2
|
2015-06-11T02:17:06.000Z
|
2015-06-22T20:46:42.000Z
|
tools/marvin/marvin/marvinInit.py
|
redbridge/cloudstack
|
2218053fb11d501950e4beb80e9bee4ae472b5b4
|
[
"ECL-2.0",
"Apache-2.0"
] | 4
|
2015-05-25T15:53:52.000Z
|
2018-05-23T14:08:07.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
'''
Initializes the marvin and does required prerequisites
for starting it.
1. Parses the configuration file passed to marvin and creates a
parsed config.
2. Initializes the logging required for marvin.All logs are
now made available under a single timestamped folder.
3. Deploys the Data Center based upon input.
'''
from marvin.configGenerator import getSetupConfig
from marvin.marvinLog import MarvinLog
from marvin.deployDataCenter import DeployDataCenters
from marvin.cloudstackTestClient import CSTestClient
from marvin.cloudstackException import GetDetailExceptionInfo
from marvin.codes import(
XEN_SERVER,
SUCCESS,
FAILED
)
import os
class MarvinInit:
def __init__(self, config_file,
deploy_dc_flag=None,
test_mod_name="deploydc",
zone=None,
hypervisor_type=None,
user_logfolder_path=None):
self.__configFile = config_file
self.__deployFlag = deploy_dc_flag
self.__logFolderPath = None
self.__tcRunLogger = None
self.__testModName = test_mod_name
self.__testClient = None
self.__tcResultFile = None
self.__testDataFilePath = None
self.__zoneForTests = zone
self.__parsedConfig = None
self.__hypervisorType = hypervisor_type
self.__userLogFolderPath = user_logfolder_path
def __parseConfig(self):
'''
@Name: __parseConfig
@Desc : Parses the configuration file passed and assigns
the parsed configuration
@Output : SUCCESS or FAILED
'''
try:
if not os.path.isfile(self.__configFile):
print "\n=== Marvin Parse Config Init Failed ==="
return FAILED
self.__parsedConfig = getSetupConfig(self.__configFile)
print "\n=== Marvin Parse Config Successful ==="
return SUCCESS
except Exception as e:
print "\nException Occurred Under __parseConfig : " \
"%s" % GetDetailExceptionInfo(e)
return FAILED
def getParsedConfig(self):
return self.__parsedConfig
def getLogFolderPath(self):
return self.__logFolderPath
def getTestClient(self):
return self.__testClient
def getLogger(self):
return self.__tcRunLogger
def getResultFile(self):
'''
@Name : getDebugFile
@Desc : Creates the result file at a given path.
@Output : Returns the Result file to be used for writing
test outputs
'''
if self.__logFolderPath is not None:
self.__tcResultFile = open(self.__logFolderPath +
"/results.txt", "w")
return self.__tcResultFile
def __setHypervisorAndZoneInfo(self):
'''
@Name : __setHypervisorAndZoneInfo
@Desc: Set the HyperVisor and Zone details;
default to XenServer
'''
try:
if not self.__hypervisorType:
self.__hypervisorType = XEN_SERVER
if not self.__zoneForTests:
if self.__parsedConfig:
for zone in self.__parsedConfig.zones:
self.__zoneForTests = zone.name
break
return SUCCESS
except Exception as e:
print "\n Exception Occurred Under init " \
"%s" % GetDetailExceptionInfo(e)
return FAILED
def init(self):
'''
@Name : init
@Desc :Initializes the marvin by
1. Parsing the configuration and creating a parsed config
structure
2. Creates a timestamped log folder and provides all logs
to be dumped there
3. Creates the DataCenter based upon configuration provided
@Output : SUCCESS or FAILED
'''
try:
if ((self.__parseConfig() != FAILED) and
(self.__setHypervisorAndZoneInfo())and
(self.__setTestDataPath() != FAILED) and
(self.__initLogging() != FAILED) and
(self.__createTestClient() != FAILED) and
(self.__deployDC() != FAILED)):
return SUCCESS
return FAILED
except Exception as e:
print "\n Exception Occurred Under init " \
"%s" % GetDetailExceptionInfo(e)
return FAILED
def __initLogging(self):
'''
@Name : __initLogging
@Desc : 1. Initializes the logging for marvin and so provides
various log features for automation run.
2. Initializes all logs to be available under
given Folder Path,where all test run logs
are available for a given run.
3. All logging like exception log,results, run info etc
for a given test run are available under a given
timestamped folder
@Output : SUCCESS or FAILED
'''
try:
log_obj = MarvinLog("CSLog")
if log_obj:
ret = log_obj.\
createLogs(self.__testModName,
self.__parsedConfig.logger,
self.__userLogFolderPath)
if ret != FAILED:
self.__logFolderPath = log_obj.getLogFolderPath()
self.__tcRunLogger = log_obj.getLogger()
print "\n=== Marvin Init Logging Successful==="
return SUCCESS
return FAILED
except Exception as e:
print "\n Exception Occurred Under __initLogging " \
":%s" % GetDetailExceptionInfo(e)
return FAILED
def __createTestClient(self):
'''
@Name : __createTestClient
@Desc : Creates the TestClient during init
based upon the parameters provided
@Output: Returns SUCCESS or FAILED
'''
try:
mgt_details = self.__parsedConfig.mgtSvr[0]
dbsvr_details = self.__parsedConfig.dbSvr
self.__testClient = CSTestClient(
mgt_details,
dbsvr_details,
logger=self.__tcRunLogger,
test_data_filepath=self.__testDataFilePath,
zone=self.__zoneForTests,
hypervisor_type=self.__hypervisorType)
if self.__testClient:
return self.__testClient.createTestClient()
return FAILED
except Exception as e:
print "\n Exception Occurred Under __createTestClient : %s" % \
GetDetailExceptionInfo(e)
return FAILED
def __setTestDataPath(self):
'''
@Name : __setTestDataPath
@Desc : Sets the TestData Path for tests to run
@Output:Returns SUCCESS or FAILED
'''
try:
if ((self.__parsedConfig.TestData is not None) and
(self.__parsedConfig.TestData.Path is not None)):
self.__testDataFilePath = self.__parsedConfig.TestData.Path
print "\n=== Marvin Setting TestData Successful==="
return SUCCESS
except Exception as e:
print "\nException Occurred Under __setTestDataPath : %s" % \
GetDetailExceptionInfo(e)
return FAILED
def __deployDC(self):
'''
@Name : __deployDC
@Desc : Deploy the DataCenter and returns accordingly.
@Output: SUCCESS or FAILED
'''
try:
ret = SUCCESS
if self.__deployFlag:
deploy_obj = DeployDataCenters(self.__testClient,
self.__parsedConfig,
self.__tcRunLogger)
ret = deploy_obj.deploy()
if ret == SUCCESS:
print "Deploy DC Successful"
else:
print "Deploy DC Failed"
return ret
except Exception as e:
print "\n Exception Occurred Under __deployDC : %s" % \
GetDetailExceptionInfo(e)
return FAILED
| 37.528689
| 75
| 0.580539
|
058ba0c74677a204db27b5421df18c9b02c7e481
| 14,849
|
py
|
Python
|
geos/kml.py
|
nono303/geos
|
212a2e12f73a899f1034d6c3d31acdd5b52afe7f
|
[
"BSD-3-Clause"
] | null | null | null |
geos/kml.py
|
nono303/geos
|
212a2e12f73a899f1034d6c3d31acdd5b52afe7f
|
[
"BSD-3-Clause"
] | null | null | null |
geos/kml.py
|
nono303/geos
|
212a2e12f73a899f1034d6c3d31acdd5b52afe7f
|
[
"BSD-3-Clause"
] | null | null | null |
"""
This module serves for generating Google Earth KML files
which create an overlay of tiled web maps.
This module comes with three major classes:
* KMLMaster:
Creates a KML that contains network links to
multiple KMLMapRoots (-> overview over available maps)
* KMLMapRoot:
Root document of a map containing the tiles of the first zoom level.
Can be used standalone to display one specific map.
* KMLRegion:
A region within a KML document containing multiple tiles and f
our network links to the next zoom level.
The number of tiles per KML region can be specified with the `log_tiles_per_row`
parameter. The number of tiles per region impacts the number of http requests to the server.
Many tiles per region will reduce the amount of KML documents requested while increasing the
number of tiles loaded at once which may be bad for users with a weak internet connection.
Understanding the `log_tiles_per_row` is likely to require some explanation:
A KMLRegion consists of:
* always four network links to the next zoom level
* a certain number of ground overlays (the actual tile images)
The following constraints apply:
* A KML region is always a square (nrow = ncol)
* the number of ground overlays per row is always a power of two.
`log_tile_per_row` is the log2(tiles per row per region).
Example: `log_tiles_per_row = 0` -> 2**0 = 1 tile per row -> 1 tile per region
Network Links Ground Overlay
--- --- -------
| 1 | 2 | | |
--- --- | 1 |
| 3 | 4 | | |
--- --- -------
Example: `log_tiles_per_row = 1` -> 2**1 = 2 tilese per row -> four tiles per region
Network Links Ground Overlays
--- --- -------
| 1 | 2 | | 1 | 2 |
--- --- --- ---
| 3 | 4 | | 3 | 4 |
--- --- -------
"""
from pykml_geos.factory import KML_ElementMaker as KML
from geos.geometry import *
from lxml import etree
import math
from abc import ABCMeta
from geos.mapsource import walk_mapsources, F_SEP
DEFAULT_MAX_LOD_PIXELS = 768
DEFAULT_MIN_LOD_PIXELS = 128
MIN_ZOOM_LIMIT = 5 # if minZoom is higher than that, create empty network links.
def kml_element_name(grid_coords, elem_id="KML"):
"""
Create a unique element name for KML
Args:
grid_coords (GridCoordinate):
elem_id (str):
>>> kml_element_name(GridCoordinate(zoom=5, x=42, y=60), elem_id="NL")
'NL_5_42_60'
"""
return "_".join(str(x) for x in [elem_id, grid_coords.zoom, grid_coords.x, grid_coords.y])
def kml_lat_lon_alt_box(geo_bb):
"""
Create the north/south/east/west tags for a <LatLonBox> or <LatLonAltBox> Bounding Box
Args:
geo_bb (GeographicBB):
Returns:
Tuple: (<north>, <south>, <east>, <west>) KMLElements
"""
return (
KML.north(geo_bb.max.lat),
KML.south(geo_bb.min.lat),
KML.east(geo_bb.max.lon),
KML.west(geo_bb.min.lon),
KML.minAltitude(0),
KML.maxAltitude(0)
)
def kml_lod(min_lod_pixels=DEFAULT_MIN_LOD_PIXELS, max_lod_pixels=DEFAULT_MAX_LOD_PIXELS):
"""
Create the KML LevelOfDetail (LOD) Tag.
In a Region, the <minLodPixels> and <maxLodPixels> elements allow you to specify
an area of the screen (in square pixels). When your data is projected onto the screen,
it must occupy an area of the screen that is greater than <minLodPixels> and less
than <maxLodPixels> in order to be visible. Once the projected size of the Region goes
outside of these limits, it is no longer visible, and the Region becomes inactive.
(from https://developers.google.com/kml/documentation/kml_21tutorial)
Args:
min_lod_pixels (int):
max_lod_pixels (int):
"""
return KML.Lod(
KML.minLodPixels(min_lod_pixels),
KML.maxLodPixels(max_lod_pixels),
KML.minFadeExtent(-1),
KML.maxFadeExtent(-1))
def kml_region(region_coords, min_lod_pixels=DEFAULT_MIN_LOD_PIXELS,
max_lod_pixels=DEFAULT_MAX_LOD_PIXELS):
"""
Create the KML <Region> tag with the appropriate geographic coordinates
Args:
region_coords (RegionCoordinate):
min_lod_pixels (int): see `kml_lod`
max_lod_pixels (int): see `kml_lod`
Returns:
KMLElement: the KML <Region>
"""
bbox = region_coords.geographic_bounds()
return KML.Region(
kml_lod(min_lod_pixels=min_lod_pixels, max_lod_pixels=max_lod_pixels),
KML.LatLonAltBox(
*kml_lat_lon_alt_box(bbox)
)
)
def kml_network_link(href, name=None, region_coords=None, visible=True):
"""
Create the KML <NetworkLink> Tag for a certain Region in the RegionGrid.
Args:
region_coords (RegionCoordinate):
href (str): the href attribute of the NetworkLink
name (str): KML <name>
visible (bool): If true the network link will appear as 'visible'
(i.e. checked) in Google Earth.
Returns:
KMLElement: the KML <NetworkLink>
"""
nl = KML.NetworkLink()
if name is None and region_coords is not None:
name = kml_element_name(region_coords, "NL")
if name is not None:
nl.append(KML.name(name))
if region_coords is not None:
nl.append(kml_region(region_coords, DEFAULT_MIN_LOD_PIXELS,-1))
if not visible:
nl.append(KML.visibility(0))
nl.append(KML.Link(
KML.href(href), KML.viewRefreshMode("onRegion")))
return nl
def kml_ground_overlay(mapsource, tile_coords, tile_url):
"""
Create a KML <GroundOverlay> for a certain TileCoordinate.
Args:
tile_coords (TileCoordinate): TileCoordinate
tile_url (str): web-url to the actual tile image.
Returns:
KMLElement: the KML <GroundOverlay>
"""
maxlod = DEFAULT_MAX_LOD_PIXELS
if mapsource.max_zoom == tile_coords.zoom:
maxlod = -1
go = KML.GroundOverlay()
go.append(KML.name(kml_element_name(tile_coords, "GO")))
go.append(kml_region(tile_coords, DEFAULT_MIN_LOD_PIXELS , maxlod))
go.append(KML.drawOrder(tile_coords.zoom + 1))
go.append(KML.Icon(
KML.href(tile_url)
))
go.append(KML.LatLonAltBox(
*kml_lat_lon_alt_box(tile_coords.geographic_bounds())
))
return go
def kml_folder(name):
"""
Create a KML <Folder> tag.
Args:
name (str): folder name
Returns:
KMLElement: the KML <Folder>
"""
return KML.Folder(KML.name(name.strip("\\")))
class URLFormatter:
"""Create absolute URLs for map resources"""
def __init__(self, host, port, url_scheme="http"):
self.host = host
self.port = port
self.url_scheme = url_scheme
def get_abs_url(self, rel_url):
"""
Create an absolute url from a relative one.
>>> url_formatter = URLFormatter("example.com", 80)
>>> url_formatter.get_abs_url("kml_master.kml")
'http://example.com:80/kml_master.kml'
"""
rel_url = rel_url.lstrip("/")
return "{}://{}/{}".format(self.url_scheme, self.host, rel_url)
def get_map_root_url(self, mapsource):
return self.get_abs_url("/geos/{}.kml".format(mapsource.id))
def get_map_url(self, mapsource, grid_coords):
""" Get URL to a map region. """
return self.get_abs_url(
"/geos/{}/{}/{}/{}.kml".format(mapsource.id, grid_coords.zoom,
grid_coords.x, grid_coords.y))
class KMLMap(metaclass=ABCMeta):
"""Abstract base class representing a KML Document"""
MIME_TYPE = "application/vnd.google-earth.kml+xml"
def __init__(self, url_formatter):
"""
Args:
url_formatter (URLFormatter): URLFormatter object
"""
self.url_formatter = url_formatter
self.kml_doc = KML.Document()
self.kml_root = KML.kml(self.kml_doc)
def add_elem(self, kml_elem):
"""Add an element to the KMLDocument"""
self.kml_doc.append(kml_elem)
def add_elems(self, kml_elems):
"""
Add elements from an iterator.
Args:
kml_elems (iterable of KMLElements): any iterator containing KML elements.
Can also be a KMLMap instance
"""
for kml_elem in kml_elems:
self.add_elem(kml_elem)
def get_kml(self):
"""Return the KML Document as formatted kml/xml"""
return etree.tostring(self.kml_root, pretty_print=True, xml_declaration=True)
def __iter__(self):
yield from self.kml_doc.iterchildren()
class KMLMaster(KMLMap):
"""Represents a KML master document that contains NetworkLinks to all maps
in the mapsource directory."""
def __init__(self, url_formatter, mapsources):
"""
Create a KML master document.
Args:
mapsources (list of MapSource):
"""
super().__init__(url_formatter)
self.map_folders = {
root: {
"folders": folders,
"maps": maps
} for root, folders, maps in walk_mapsources(mapsources)
}
self.add_maps(parent=self.kml_doc)
def add_maps(self, parent, root_path=""):
"""
Recursively add maps in a folder hierarchy.
Args:
parent (KMLElement): KMLElement to which we want to append child folders or maps respectively
root_path (str): path of 'parent'
"""
for mapsource in self.map_folders[root_path]['maps']:
parent.append(self.get_network_link(mapsource))
for folder in self.map_folders[root_path]['folders']:
kml_folder_obj = kml_folder(folder)
parent.append(kml_folder_obj)
self.add_maps(parent=kml_folder_obj, root_path=F_SEP.join((root_path, folder)))
def get_network_link(self, mapsource):
"""Get KML <NetworkLink> for a given mapsource. """
return kml_network_link(self.url_formatter.get_map_root_url(mapsource),
name=mapsource.name, visible=False)
class KMLMapRoot(KMLMap):
"""Represents the root document of an individual map.
Can be used as standalone KML to display that map only."""
def __init__(self, url_formatter, mapsource, log_tiles_per_row):
"""
Create the root document of an individual map.
Args:
mapsource (MapSource):
log_tiles_per_row (int): see module description. Needs to be in range(0, 5).
Note:
The zoom level of the root document is determined as follows:
The min_zoom level is read from the mapsource. `log_tiles_per_row` defines
a lower bound for min_zoom. This is because e.g. on zoom level 0 we could not have
more than one tile per row per region as there is simply only one tile at that zoom
level.
However, we can run into severe performance issues, if either min_zoom
or log_tiles_per_row are too high. At a zoom level of only 8, a root
document spanning the whole world would already contain (2**8)**2 = 65536 tiles
which will break map display in Google Earth.
Therefore MIN_ZOOM_LIMIT is applied as an upper bound. If the determined min_zoom level
exceeds this limit, empty network links (without GroundOverlay) will be used to adaptively
load the next zoom level(s).
"""
super().__init__(url_formatter)
self.mapsource = mapsource
assert(log_tiles_per_row in range(0, 5))
self.log_tiles_per_row = log_tiles_per_row
# see docstring for explanation
zoom = min(max(mapsource.min_zoom, log_tiles_per_row), MIN_ZOOM_LIMIT)
n_tiles = 2 ** zoom # tiles per row of the whole document
tiles_per_row = 2 ** self.log_tiles_per_row # tiles per row per region
n_regions = n_tiles//tiles_per_row # regions per row
assert n_tiles % tiles_per_row == 0, "regions per row is not an integer."
if mapsource.bbox is None:
regions = griditer(0, 0, n_regions)
else:
tile_bounds = mapsource.bbox.to_mercator().to_tile(zoom)
regions = bboxiter(tile_bounds, tiles_per_row)
self.add_elem(KML.name("{} root".format(mapsource.name)))
for x, y in regions:
self.add_elems(KMLRegion(self.url_formatter, self.mapsource,
self.log_tiles_per_row, zoom, x, y))
class KMLRegion(KMLMap):
"""Represents a KML document that is loaded on demand.
It contains the actual tiles as GroundOverlays and contains NetworkLinks
to the next LevelOfDetail."""
def __init__(self, url_formatter, mapsource, log_tiles_per_row, zoom, x, y):
"""
Create KML document displaying a certain map region.
Args:
mapsource (MapSource):
log_tiles_per_row (int): see module description. Needs to be in range(0, 5).
zoom (int): zoom level
x (int): x coordinate identifying the region
y (int): y coordinate indentifying the region
Note:
Will contain four network links to the next zoom level unless zoom = max_zoom.
Will contain ground overlays with tiles unless zoom < min_zoom
"""
super().__init__(url_formatter)
self.mapsource = mapsource
rc = RegionCoordinate(zoom, x, y, log_tiles_per_row)
self.add_elem(KML.name(kml_element_name(rc, "DOC")))
if zoom >= mapsource.min_zoom:
for tc in rc.get_tiles():
# add ground overlay for all active layers
for map_layer in mapsource.layers:
if map_layer.min_zoom <= zoom <= map_layer.max_zoom:
self.add_ground_overlay(tc, map_layer, mapsource)
if zoom < mapsource.max_zoom:
for rc_child in rc.zoom_in():
self.add_network_link(rc_child)
def add_ground_overlay(self, tile_coords, map_layer, mapsource):
tile_url = map_layer.get_tile_url(tile_coords.zoom, tile_coords.x, tile_coords.y)
self.add_elem(kml_ground_overlay(mapsource, tile_coords, tile_url))
def add_network_link(self, region_coords):
href = self.url_formatter.get_map_url(self.mapsource, region_coords)
self.add_elem(kml_network_link(href, region_coords=region_coords))
| 35.694712
| 105
| 0.62819
|
24a89bb973af8bff4e93592c4dfaee4b8acaffd5
| 3,822
|
py
|
Python
|
contributions/api/models/capabilities/capability.py
|
rokmetro/rokwire-building-blocks-api-fork
|
0208388394478c31186ab8b9356bd00e13dc2433
|
[
"Apache-2.0"
] | 1
|
2022-01-13T12:21:39.000Z
|
2022-01-13T12:21:39.000Z
|
contributions/api/models/capabilities/capability.py
|
rokmetro/rokwire-building-blocks-api-fork
|
0208388394478c31186ab8b9356bd00e13dc2433
|
[
"Apache-2.0"
] | 313
|
2019-04-12T22:00:21.000Z
|
2022-03-31T20:47:11.000Z
|
contributions/api/models/capabilities/capability.py
|
rokmetro/rokwire-building-blocks-api-fork
|
0208388394478c31186ab8b9356bd00e13dc2433
|
[
"Apache-2.0"
] | 6
|
2019-04-04T13:19:07.000Z
|
2021-12-02T05:28:38.000Z
|
# Copyright 2020 Board of Trustees of the University of Illinois.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import utils.datasetutils as datasetutils
class Capability():
def __init__(self, injson):
self.id = None
self.name = None
self.description = None
self.isOpenSource = None
self.apiDocUrl = None
self.deploymentDetails = None
self.apiBaseUrl = None
self.version = None
self.versionUrl = None
self.healthCheckUrl = None
self.dataDeletionEndpointDetails = None
# self.creationDate = None
# self.lastModifiedDate = None
self, restjson = datasetutils.update_capability_dataset_from_json(self, injson)
def set_id(self, id):
self.id = id
def get_id(self):
return self.id
def set_name(self, name):
self.name = name
def get_name(self):
return self.name
def set_description(self, description):
self.description = description
def get_description(self):
return self.description
def set_is_open_source(self, isOpenSource):
self.isOpenSource = isOpenSource
def get_is_open_source(self):
return self.isOpenSource
def set_api_doc_url(self, apiDocUrl):
self.apiDocUrl = apiDocUrl
def get_api_doc_url(self):
return self.apiDocUrl
def set_deployment_details(self, deploymentDetails):
self.deploymentDetails = deploymentDetails
def get_deployment_details(self):
return self.deploymentDetails
def set_docker_image_name(self, dockerImageName):
self.dockerImageName = dockerImageName
def get_docker_image_name(self):
return self.dockerImageName
def set_environment_variables(self, environmentVariables):
self.environmentVariables = environmentVariables
def get_environment_variables(self):
return self.environmentVariables
def set_database_details(self, databaseDetails):
self.databaseDetails = databaseDetails
def get_database_details(self):
return self.databaseDetails
def set_version(self, version):
self.version = version
def get_version(self):
return self.version
def set_version_url(self, versionUrl):
self.versionUrl = versionUrl
def get_version_url(self):
return self.versionUrl
def set_health_check_url(self, healthCheckUrl):
self.healthCheckUrl = healthCheckUrl
def get_health_check_url(self):
return self.healthCheckUrl
def set_auth_method(self, authMethod):
self.authMethod = authMethod
def get_auth_method(self):
return self.authMethod
def set_data_deletion_endpoint_details(self, dataDeletionEndpointDetails):
self.dataDeletionEndpointDetails = dataDeletionEndpointDetails
def get_data_deletion_endpoint_details(self):
return self.dataDeletionEndpointDetails
# def set_creation_date(self, creationDate):
# self.creationDate = creationDate
#
# def get_creation_date(self):
# return self.creationDate
#
# def set_last_modified_date(self, lastModifiedDate):
# self.lastModifiedDate = lastModifiedDate
#
# def get_last_modified_date(self):
# return self.lastModifiedDate
| 29.4
| 87
| 0.702773
|
1311a09ef936864cd63340f1d515337049b08eba
| 1,147
|
py
|
Python
|
simple_http_server_redis_session/__init__.py
|
keijack/python-simple-http-server-redis-session
|
84c16037370e621d093452ea2e4898dcc357871e
|
[
"MIT"
] | 2
|
2021-05-13T11:16:07.000Z
|
2021-05-27T22:40:33.000Z
|
simple_http_server_redis_session/__init__.py
|
keijack/python-simple-http-server-redis-session
|
84c16037370e621d093452ea2e4898dcc357871e
|
[
"MIT"
] | null | null | null |
simple_http_server_redis_session/__init__.py
|
keijack/python-simple-http-server-redis-session
|
84c16037370e621d093452ea2e4898dcc357871e
|
[
"MIT"
] | 1
|
2021-08-08T05:21:47.000Z
|
2021-08-08T05:21:47.000Z
|
# -*- coding: utf-8 -*-
"""
Copyright (c) 2021 Keijack Wu
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
name = "simple_http_server_redis_session"
version = "0.3.0"
| 44.115385
| 78
| 0.789015
|
882c849ad8ae0364f5e0e4971a74b543ea4cfd23
| 364
|
py
|
Python
|
WEEKS/CD_Sata-Structures/_MISC/misc-examples/python3-book-examples/xmlrpc.client/xmlrpc_ServerProxy_use_datetime.py
|
webdevhub42/Lambda
|
b04b84fb5b82fe7c8b12680149e25ae0d27a0960
|
[
"MIT"
] | null | null | null |
WEEKS/CD_Sata-Structures/_MISC/misc-examples/python3-book-examples/xmlrpc.client/xmlrpc_ServerProxy_use_datetime.py
|
webdevhub42/Lambda
|
b04b84fb5b82fe7c8b12680149e25ae0d27a0960
|
[
"MIT"
] | null | null | null |
WEEKS/CD_Sata-Structures/_MISC/misc-examples/python3-book-examples/xmlrpc.client/xmlrpc_ServerProxy_use_datetime.py
|
webdevhub42/Lambda
|
b04b84fb5b82fe7c8b12680149e25ae0d27a0960
|
[
"MIT"
] | null | null | null |
#
"""
"""
# end_pymotw_header
import xmlrpc.client
server = xmlrpc.client.ServerProxy("http://localhost:9000", use_datetime=True)
now = server.now()
print("With:", now, type(now), now.__class__.__name__)
server = xmlrpc.client.ServerProxy("http://localhost:9000", use_datetime=False)
now = server.now()
print("Without:", now, type(now), now.__class__.__name__)
| 24.266667
| 79
| 0.728022
|
ce35194245417bde2383de1adc5f52ce7ba0fdaa
| 6,190
|
py
|
Python
|
barython/screen.py
|
aruhier/barython
|
3761e2e163c52de407c7781dc9991a3eba464652
|
[
"BSD-3-Clause"
] | null | null | null |
barython/screen.py
|
aruhier/barython
|
3761e2e163c52de407c7781dc9991a3eba464652
|
[
"BSD-3-Clause"
] | 14
|
2016-01-19T23:32:38.000Z
|
2017-04-19T16:21:50.000Z
|
barython/screen.py
|
Anthony25/barython
|
3761e2e163c52de407c7781dc9991a3eba464652
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
from collections import OrderedDict
import itertools
import logging
import threading
import xcffib
import xcffib.xproto
import xcffib.randr
from barython import _BarSpawner
logger = logging.getLogger("barython")
def get_randr_screens():
conn = xcffib.connect()
conn.randr = conn(xcffib.randr.key)
window = conn.get_setup().roots[0].root
resources = conn.randr.GetScreenResourcesCurrent(window).reply()
outputs = OrderedDict()
for rroutput in resources.outputs:
try:
cookie = conn.randr.GetOutputInfo(
rroutput, resources.config_timestamp
)
info = cookie.reply()
name = "".join(map(chr, info.name))
cookie = conn.randr.GetCrtcInfo(
info.crtc, resources.config_timestamp
)
info = cookie.reply()
if info:
outputs[name] = (info.width, info.height, info.x, info.y)
except Exception as e:
logger.debug("Error when trying to fetch screens infos")
logger.debug(e)
continue
return outputs
class Screen(_BarSpawner):
_bspwm_monitor_name = None
@property
def geometry(self):
"""
Return the screen geometry in a tuple
"""
if self._geometry:
return self._geometry
elif self.name:
try:
x, y, px, py = get_randr_screens().get(self.name, None)
self._geometry = (x, self.height, px, py)
except (ValueError, TypeError):
logger.error(
"Properties of screen {} could not be fetched. Please "
"specify the geometry manually.".format(self.name)
)
return self._geometry
@geometry.setter
def geometry(self, value):
self._geometry = value
@property
def bspwm_monitor_name(self):
return (self.name if self._bspwm_monitor_name is None
else self._bspwm_monitor_name)
@bspwm_monitor_name.setter
def bspwm_monitor_name(self, value):
self._bspwm_monitor_name = value
def add_widget(self, alignment, *widgets, index=None):
"""
Add a widget to a screen
:param alignment: where adding the widget (left, center, right)
:param *widgets: widgets to add
:param index: if set, will insert the widgets before the specified
index (default: None)
"""
if alignment not in self._widgets.keys():
raise ValueError("'alignement' might be either 'l', 'c' or 'r'")
if index is None:
self._widgets[alignment].extend(widgets)
else:
list_widgets = self._widgets[alignment]
self._widgets[alignment] = (
list_widgets[:index] + list(widgets) + list_widgets[index:]
)
for w in self._widgets[alignment]:
w.screens.add(self)
self.hooks.merge(w.hooks)
def gather(self):
"""
Gather all widgets content
"""
return "".join(
"%{{{}}}{}".format(
alignment, "".join([
str(widget.content) if widget.content is not None
else "" for widget in widgets
])
) for alignment, widgets in self._widgets.items() if widgets
)
def update(self, *args, **kwargs):
if self.panel.instance_per_screen:
return super().update(*args, **kwargs)
else:
return self.panel.update(*args, **kwargs)
def propage_hooks_changes(self):
"""
Propage a change in the hooks pool
"""
if getattr(self, "panel", None):
self.panel.hooks.merge(self.hooks)
def start(self):
"""
Start the screen panel
If the global panel set that there might be one instance per screen,
starts a local lemonbar.
Starts all widgets in there own threads. They will callback a screen
update in case of any change.
"""
super().start()
attached_widgets = list(itertools.chain(*self._widgets.values()))
if not self.panel.instance_per_screen and len(attached_widgets) == 0:
# No widget attached, no need to keep this thread opened
# TODO: Add a test for it
self.content = ""
self.stop()
return
self.update(no_wait=True)
for widget in attached_widgets:
threading.Thread(
target=widget.start
).start()
self._stop.wait()
def stop(self, *args, **kwargs):
super().stop(*args, **kwargs)
if self.hooks.listen:
try:
self.hooks.stop()
except:
pass
for widget in itertools.chain(*self._widgets.values()):
try:
widget.stop()
except:
logger.debug("Error when stopping widget")
continue
def __getattribute__(self, name):
attr = super().__getattribute__(name)
# attributes to inherit from panel
panel_attr = ("height", "fg", "bg", "fonts", "refresh", "clickable")
if name in panel_attr:
if (attr is None or attr == -1) and self.panel:
return getattr(self.panel, name, attr)
return attr
def __init__(self, name=None, refresh=-1, clickable=-1, geometry=None,
panel=None, bspwm_monitor_name=None, *args, **kwargs):
super().__init__(*args, **kwargs)
#: screen name
self.name = name
#: refresh rate
self.refresh = refresh
#: clickable items (for lemonbar)
self.clickable = clickable
self.panel = panel
#: bar geometry, in a tuple (x, y, position_x, position_y)
self.geometry = geometry
#: widgets to show on this screen
self._widgets = OrderedDict([("l", []), ("c", []), ("r", [])])
#: only useful with bspwm. Used by Bspwm*DesktopWidget
self.bspwm_monitor_name = bspwm_monitor_name
| 30.79602
| 77
| 0.564459
|
917f79b141e81bdfe0165b7ba5374d52422dd74a
| 51,046
|
py
|
Python
|
salt/salt/utils/schema.py
|
smallyear/linuxLearn
|
342e5020bf24b5fac732c4275a512087b47e578d
|
[
"Apache-2.0"
] | 1
|
2017-11-21T16:57:27.000Z
|
2017-11-21T16:57:27.000Z
|
salt/salt/utils/schema.py
|
smallyear/linuxLearn
|
342e5020bf24b5fac732c4275a512087b47e578d
|
[
"Apache-2.0"
] | null | null | null |
salt/salt/utils/schema.py
|
smallyear/linuxLearn
|
342e5020bf24b5fac732c4275a512087b47e578d
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Pedro Algarvio (pedro@algarvio.me)`
salt.utils.schema
~~~~~~~~~~~~~~~~~
Object Oriented Configuration - JSON Schema compatible generator
This code was inspired by `jsl`__, "A Python DSL for describing JSON schemas".
.. __: http://jsl.readthedocs.org/
A configuration document or configuration document section is defined using the
py:class:`Schema`, the configuration items are defined by any of the subclasses
of py:class:`BaseSchemaItem` as attributes of a subclass of py:class:`Schema` class.
As an example:
.. code-block:: python
class HostConfig(Schema):
title = 'Host Configuration'
description = 'This is the host configuration'
host = StringItem(
'Host',
'The looong host description',
default=None,
minimum=1
)
port = NumberItem(
description='The port number',
default=80,
required=False,
minimum=0,
inclusiveMinimum=False,
maximum=65535
)
The serialized version of the above configuration definition is:
.. code-block:: python
>>> print(HostConfig.serialize())
OrderedDict([
('$schema', 'http://json-schema.org/draft-04/schema#'),
('title', 'Host Configuration'),
('description', 'This is the host configuration'),
('type', 'object'),
('properties', OrderedDict([
('host', {'minimum': 1,
'type': 'string',
'description': 'The looong host description',
'title': 'Host'}),
('port', {'description': 'The port number',
'default': 80,
'inclusiveMinimum': False,
'maximum': 65535,
'minimum': 0,
'type': 'number'})
])),
('required', ['host']),
('x-ordering', ['host', 'port']),
('additionalProperties', True)]
)
>>> print(json.dumps(HostConfig.serialize(), indent=2))
{
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Host Configuration",
"description": "This is the host configuration",
"type": "object",
"properties": {
"host": {
"minimum": 1,
"type": "string",
"description": "The looong host description",
"title": "Host"
},
"port": {
"description": "The port number",
"default": 80,
"inclusiveMinimum": false,
"maximum": 65535,
"minimum": 0,
"type": "number"
}
},
"required": [
"host"
],
"x-ordering": [
"host",
"port"
],
"additionalProperties": false
}
The serialized version of the configuration block can be used to validate a configuration dictionary using
the `python jsonschema library`__.
.. __: https://pypi.python.org/pypi/jsonschema
.. code-block:: python
>>> import jsonschema
>>> jsonschema.validate({'host': 'localhost', 'port': 80}, HostConfig.serialize())
>>> jsonschema.validate({'host': 'localhost', 'port': -1}, HostConfig.serialize())
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/lib/python2.7/site-packages/jsonschema/validators.py", line 478, in validate
cls(schema, *args, **kwargs).validate(instance)
File "/usr/lib/python2.7/site-packages/jsonschema/validators.py", line 123, in validate
raise error
jsonschema.exceptions.ValidationError: -1 is less than the minimum of 0
Failed validating 'minimum' in schema['properties']['port']:
{'default': 80,
'description': 'The port number',
'inclusiveMinimum': False,
'maximum': 65535,
'minimum': 0,
'type': 'number'}
On instance['port']:
-1
>>>
A configuration document can even be split into configuration sections. Let's reuse the above
``HostConfig`` class and include it in a configuration block:
.. code-block:: python
class LoggingConfig(Schema):
title = 'Logging Configuration'
description = 'This is the logging configuration'
log_level = StringItem(
'Logging Level',
'The logging level',
default='debug',
minimum=1
)
class MyConfig(Schema):
title = 'My Config'
description = 'This my configuration'
hostconfig = HostConfig()
logconfig = LoggingConfig()
The JSON Schema string version of the above is:
.. code-block:: python
>>> print json.dumps(MyConfig.serialize(), indent=4)
{
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "My Config",
"description": "This my configuration",
"type": "object",
"properties": {
"hostconfig": {
"id": "https://non-existing.saltstack.com/schemas/hostconfig.json#",
"title": "Host Configuration",
"description": "This is the host configuration",
"type": "object",
"properties": {
"host": {
"minimum": 1,
"type": "string",
"description": "The looong host description",
"title": "Host"
},
"port": {
"description": "The port number",
"default": 80,
"inclusiveMinimum": false,
"maximum": 65535,
"minimum": 0,
"type": "number"
}
},
"required": [
"host"
],
"x-ordering": [
"host",
"port"
],
"additionalProperties": false
},
"logconfig": {
"id": "https://non-existing.saltstack.com/schemas/logconfig.json#",
"title": "Logging Configuration",
"description": "This is the logging configuration",
"type": "object",
"properties": {
"log_level": {
"default": "debug",
"minimum": 1,
"type": "string",
"description": "The logging level",
"title": "Logging Level"
}
},
"required": [
"log_level"
],
"x-ordering": [
"log_level"
],
"additionalProperties": false
}
},
"additionalProperties": false
}
>>> import jsonschema
>>> jsonschema.validate(
{'hostconfig': {'host': 'localhost', 'port': 80},
'logconfig': {'log_level': 'debug'}},
MyConfig.serialize())
>>> jsonschema.validate(
{'hostconfig': {'host': 'localhost', 'port': -1},
'logconfig': {'log_level': 'debug'}},
MyConfig.serialize())
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/lib/python2.7/site-packages/jsonschema/validators.py", line 478, in validate
cls(schema, *args, **kwargs).validate(instance)
File "/usr/lib/python2.7/site-packages/jsonschema/validators.py", line 123, in validate
raise error
jsonschema.exceptions.ValidationError: -1 is less than the minimum of 0
Failed validating 'minimum' in schema['properties']['hostconfig']['properties']['port']:
{'default': 80,
'description': 'The port number',
'inclusiveMinimum': False,
'maximum': 65535,
'minimum': 0,
'type': 'number'}
On instance['hostconfig']['port']:
-1
>>>
If however, you just want to use the configuration blocks for readability and do not desire the nested
dictionaries serialization, you can pass ``flatten=True`` when defining a configuration section as a
configuration subclass attribute:
.. code-block:: python
class MyConfig(Schema):
title = 'My Config'
description = 'This my configuration'
hostconfig = HostConfig(flatten=True)
logconfig = LoggingConfig(flatten=True)
The JSON Schema string version of the above is:
.. code-block:: python
>>> print(json.dumps(MyConfig, indent=4))
{
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "My Config",
"description": "This my configuration",
"type": "object",
"properties": {
"host": {
"minimum": 1,
"type": "string",
"description": "The looong host description",
"title": "Host"
},
"port": {
"description": "The port number",
"default": 80,
"inclusiveMinimum": false,
"maximum": 65535,
"minimum": 0,
"type": "number"
},
"log_level": {
"default": "debug",
"minimum": 1,
"type": "string",
"description": "The logging level",
"title": "Logging Level"
}
},
"x-ordering": [
"host",
"port",
"log_level"
],
"additionalProperties": false
}
'''
# Import python libs
from __future__ import absolute_import, print_function
import sys
import inspect
import textwrap
import functools
# Import salt libs
from salt.utils.odict import OrderedDict
# Import 3rd-party libs
#import yaml
import salt.ext.six as six
BASE_SCHEMA_URL = 'https://non-existing.saltstack.com/schemas'
RENDER_COMMENT_YAML_MAX_LINE_LENGTH = 80
class Prepareable(type):
'''
Preserve attributes order for python 2.x
'''
# This code was taken from
# https://github.com/aromanovich/jsl/blob/master/jsl/_compat/prepareable.py
# which in turn was taken from https://gist.github.com/DasIch/5562625 with minor fixes
if not six.PY3:
def __new__(mcs, name, bases, attributes):
try:
constructor = attributes["__new__"]
except KeyError:
return type.__new__(mcs, name, bases, attributes)
def preparing_constructor(mcs, name, bases, attributes):
try:
mcs.__prepare__
except AttributeError:
return constructor(mcs, name, bases, attributes)
namespace = mcs.__prepare__(name, bases)
defining_frame = sys._getframe(1)
for constant in reversed(defining_frame.f_code.co_consts):
if inspect.iscode(constant) and constant.co_name == name:
def get_index(attribute_name, _names=constant.co_names): # pylint: disable=cell-var-from-loop
try:
return _names.index(attribute_name)
except ValueError:
return 0
break
else:
return constructor(mcs, name, bases, attributes)
by_appearance = sorted(
attributes.items(), key=lambda item: get_index(item[0])
)
for key, value in by_appearance:
namespace[key] = value
return constructor(mcs, name, bases, namespace)
attributes["__new__"] = functools.wraps(constructor)(preparing_constructor)
return type.__new__(mcs, name, bases, attributes)
class NullSentinel(object):
'''
A class which instance represents a null value.
Allows specifying fields with a default value of null.
'''
def __bool__(self):
return False
__nonzero__ = __bool__
Null = NullSentinel()
'''
A special value that can be used to set the default value
of a field to null.
'''
# make sure nobody creates another Null value
def _failing_new(*args, **kwargs):
raise TypeError('Can\'t create another NullSentinel instance')
NullSentinel.__new__ = staticmethod(_failing_new)
del _failing_new
class SchemaMeta(six.with_metaclass(Prepareable, type)):
@classmethod
def __prepare__(mcs, name, bases):
return OrderedDict()
def __new__(mcs, name, bases, attrs):
# Mark the instance as a configuration document/section
attrs['__config__'] = True
attrs['__flatten__'] = False
# Let's record the configuration items/sections
items = {}
sections = {}
order = []
# items from parent classes
for base in reversed(bases):
if hasattr(base, '_items'):
items.update(base._items)
if hasattr(base, '_sections'):
sections.update(base._sections)
if hasattr(base, '_order'):
order.extend(base._order)
# Iterate through attrs to discover items/config sections
for key, value in six.iteritems(attrs):
if not hasattr(value, '__item__') and not hasattr(value, '__config__'):
continue
if hasattr(value, '__item__'):
# the value is an item instance
if hasattr(value, 'title') and value.title is None:
# It's an item instance without a title, make the title
# it's name
value.title = key
items[key] = value
if hasattr(value, '__config__'):
sections[key] = value
order.append(key)
attrs['_order'] = order
attrs['_items'] = items
attrs['_sections'] = sections
return type.__new__(mcs, name, bases, attrs)
def __call__(cls, flatten=False, allow_additional_items=False, **kwargs):
instance = object.__new__(cls)
if flatten is True:
# This configuration block is to be treated as a part of the
# configuration for which it was defined as an attribute, not as
# it's own sub configuration
instance.__flatten__ = True
if allow_additional_items is True:
# The configuration block only accepts the configuration items
# which are defined on the class. On additional items, validation
# with jsonschema will fail
instance.__allow_additional_items__ = True
instance.__init__(**kwargs)
return instance
class BaseSchemaItemMeta(six.with_metaclass(Prepareable, type)):
'''
Config item metaclass to "tag" the class as a configuration item
'''
@classmethod
def __prepare__(mcs, name, bases):
return OrderedDict()
def __new__(mcs, name, bases, attrs):
# Register the class as an item class
attrs['__item__'] = True
# Instantiate an empty list to store the config item attribute names
attributes = []
for base in reversed(bases):
try:
# Extend the attributes with the base argspec argument names
# but skip "self"
for argname in inspect.getargspec(base.__init__).args:
if argname == 'self' or argname in attributes:
continue
attributes.append(argname)
except TypeError:
# On the base object type, __init__ is just a wrapper which
# triggers a TypeError when we're trying to find out it's
# argspec
continue
attrs['_attributes'] = attributes
return type.__new__(mcs, name, bases, attrs)
def __call__(cls, *args, **kwargs):
# Create the instance class
instance = object.__new__(cls)
if args:
raise RuntimeError(
'Please pass all arguments as named arguments. Un-named '
'arguments are not supported'
)
for key in kwargs.keys():
# Store the kwarg keys as the instance attributes for the
# serialization step
if key not in instance._attributes:
instance._attributes.append(key)
# Init the class
instance.__init__(*args, **kwargs)
# Validate the instance after initialization
for base in reversed(inspect.getmro(cls)):
validate_attributes = getattr(base, '__validate_attributes__', None)
if validate_attributes:
if instance.__validate_attributes__.__func__.__code__ is not validate_attributes.__code__:
# The method was overridden, run base.__validate_attributes__ function
base.__validate_attributes__(instance)
# Finally, run the instance __validate_attributes__ function
instance.__validate_attributes__()
# Return the initialized class
return instance
class Schema(six.with_metaclass(SchemaMeta, object)):
'''
Configuration definition class
'''
# Define some class level attributes to make PyLint happier
title = None
description = None
_items = _sections = None
__flatten__ = False
__allow_additional_items__ = False
@classmethod
def serialize(cls, id_=None):
# The order matters
serialized = OrderedDict()
if id_ is not None:
# This is meant as a configuration section, sub json schema
serialized['id'] = '{0}/{1}.json#'.format(BASE_SCHEMA_URL, id_)
else:
# Main configuration block, json schema
serialized['$schema'] = 'http://json-schema.org/draft-04/schema#'
if cls.title is not None:
serialized['title'] = cls.title
if cls.description is not None:
if cls.description == cls.__doc__:
serialized['description'] = textwrap.dedent(cls.description).strip()
else:
serialized['description'] = cls.description
required = []
ordering = []
serialized['type'] = 'object'
properties = OrderedDict()
cls.after_items_update = []
for name in cls._order:
skip_order = False
if name in cls._sections: # pylint: disable=E1135
section = cls._sections[name]
serialized_section = section.serialize(None if section.__flatten__ is True else name)
if section.__flatten__ is True:
# Flatten the configuration section into the parent
# configuration
properties.update(serialized_section['properties'])
if 'x-ordering' in serialized_section:
ordering.extend(serialized_section['x-ordering'])
if 'required' in serialized_section:
required.extend(serialized_section['required'])
if hasattr(section, 'after_items_update'):
cls.after_items_update.extend(section.after_items_update)
skip_order = True
else:
# Store it as a configuration section
properties[name] = serialized_section
if name in cls._items: # pylint: disable=E1135
config = cls._items[name]
# Handle the configuration items defined in the class instance
if config.__flatten__ is True:
serialized_config = config.serialize()
cls.after_items_update.append(serialized_config)
skip_order = True
else:
properties[name] = config.serialize()
if config.required:
# If it's a required item, add it to the required list
required.append(name)
if skip_order is False:
# Store the order of the item
if name not in ordering:
ordering.append(name)
if properties:
serialized['properties'] = properties
# Update the serialized object with any items to include after properties
if cls.after_items_update:
after_items_update = {}
for entry in cls.after_items_update:
name, data = next(six.iteritems(entry))
if name in after_items_update:
after_items_update[name].extend(data)
else:
after_items_update[name] = data
serialized.update(after_items_update)
if required:
# Only include required if not empty
serialized['required'] = required
if ordering:
# Only include ordering if not empty
serialized['x-ordering'] = ordering
serialized['additionalProperties'] = cls.__allow_additional_items__
return serialized
@classmethod
def defaults(cls):
serialized = cls.serialize()
defaults = {}
for name, details in serialized['properties'].items():
if 'default' in details:
defaults[name] = details['default']
continue
if 'properties' in details:
for sname, sdetails in details['properties'].items():
if 'default' in sdetails:
defaults.setdefault(name, {})[sname] = sdetails['default']
continue
return defaults
@classmethod
def as_requirements_item(cls):
serialized_schema = cls.serialize()
required = serialized_schema.get('required', [])
for name in serialized_schema['properties']:
if name not in required:
required.append(name)
return RequirementsItem(requirements=required)
#@classmethod
#def render_as_rst(cls):
# '''
# Render the configuration block as a restructured text string
# '''
# # TODO: Implement RST rendering
# raise NotImplementedError
#@classmethod
#def render_as_yaml(cls):
# '''
# Render the configuration block as a parseable YAML string including comments
# '''
# # TODO: Implement YAML rendering
# raise NotImplementedError
class SchemaItem(six.with_metaclass(BaseSchemaItemMeta, object)):
'''
Base configuration items class.
All configurations must subclass it
'''
# Define some class level attributes to make PyLint happier
__type__ = None
__format__ = None
_attributes = None
__flatten__ = False
__serialize_attr_aliases__ = None
required = False
def __init__(self, required=None, **extra):
'''
:param required: If the configuration item is required. Defaults to ``False``.
'''
if required is not None:
self.required = required
self.extra = extra
def __validate_attributes__(self):
'''
Run any validation check you need the instance attributes.
ATTENTION:
Don't call the parent class when overriding this
method because it will just duplicate the executions. This class'es
metaclass will take care of that.
'''
if self.required not in (True, False):
raise RuntimeError(
'\'required\' can only be True/False'
)
def _get_argname_value(self, argname):
'''
Return the argname value looking up on all possible attributes
'''
# Let's see if there's a private fuction to get the value
argvalue = getattr(self, '__get_{0}__'.format(argname), None)
if argvalue is not None and callable(argvalue):
argvalue = argvalue()
if argvalue is None:
# Let's see if the value is defined as a public class variable
argvalue = getattr(self, argname, None)
if argvalue is None:
# Let's see if it's defined as a private class variable
argvalue = getattr(self, '__{0}__'.format(argname), None)
if argvalue is None:
# Let's look for it in the extra dictionary
argvalue = self.extra.get(argname, None)
return argvalue
def serialize(self):
'''
Return a serializable form of the config instance
'''
raise NotImplementedError
class BaseSchemaItem(SchemaItem):
'''
Base configuration items class.
All configurations must subclass it
'''
# Let's define description as a class attribute, this will allow a custom configuration
# item to do something like:
# class MyCustomConfig(StringItem):
# '''
# This is my custom config, blah, blah, blah
# '''
# description = __doc__
#
description = None
# The same for all other base arguments
title = None
default = None
enum = None
enumNames = None
def __init__(self, title=None, description=None, default=None, enum=None, enumNames=None, **kwargs):
'''
:param required:
If the configuration item is required. Defaults to ``False``.
:param title:
A short explanation about the purpose of the data described by this item.
:param description:
A detailed explanation about the purpose of the data described by this item.
:param default:
The default value for this configuration item. May be :data:`.Null` (a special value
to set the default value to null).
:param enum:
A list(list, tuple, set) of valid choices.
'''
if title is not None:
self.title = title
if description is not None:
self.description = description
if default is not None:
self.default = default
if enum is not None:
self.enum = enum
if enumNames is not None:
self.enumNames = enumNames
super(BaseSchemaItem, self).__init__(**kwargs)
def __validate_attributes__(self):
if self.enum is not None:
if not isinstance(self.enum, (list, tuple, set)):
raise RuntimeError(
'Only the \'list\', \'tuple\' and \'set\' python types can be used '
'to define \'enum\''
)
if not isinstance(self.enum, list):
self.enum = list(self.enum)
if self.enumNames is not None:
if not isinstance(self.enumNames, (list, tuple, set)):
raise RuntimeError(
'Only the \'list\', \'tuple\' and \'set\' python types can be used '
'to define \'enumNames\''
)
if len(self.enum) != len(self.enumNames):
raise RuntimeError(
'The size of \'enumNames\' must match the size of \'enum\''
)
if not isinstance(self.enumNames, list):
self.enumNames = list(self.enumNames)
def serialize(self):
'''
Return a serializable form of the config instance
'''
serialized = {'type': self.__type__}
for argname in self._attributes:
if argname == 'required':
# This is handled elsewhere
continue
argvalue = self._get_argname_value(argname)
if argvalue is not None:
if argvalue is Null:
argvalue = None
# None values are not meant to be included in the
# serialization, since this is not None...
if self.__serialize_attr_aliases__ and argname in self.__serialize_attr_aliases__:
argname = self.__serialize_attr_aliases__[argname]
serialized[argname] = argvalue
return serialized
def __get_description__(self):
if self.description is not None:
if self.description == self.__doc__:
return textwrap.dedent(self.description).strip()
return self.description
#def render_as_rst(self, name):
# '''
# Render the configuration item as a restructured text string
# '''
# # TODO: Implement YAML rendering
# raise NotImplementedError
#def render_as_yaml(self, name):
# '''
# Render the configuration item as a parseable YAML string including comments
# '''
# # TODO: Include the item rules in the output, minimum, maximum, etc...
# output = '# ----- '
# output += self.title
# output += ' '
# output += '-' * (RENDER_COMMENT_YAML_MAX_LINE_LENGTH - 7 - len(self.title) - 2)
# output += '>\n'
# if self.description:
# output += '\n'.join(textwrap.wrap(self.description,
# width=RENDER_COMMENT_YAML_MAX_LINE_LENGTH,
# initial_indent='# '))
# output += '\n'
# yamled_default_value = yaml.dump(self.default, default_flow_style=False).split('\n...', 1)[0]
# output += '# Default: {0}\n'.format(yamled_default_value)
# output += '#{0}: {1}\n'.format(name, yamled_default_value)
# output += '# <---- '
# output += self.title
# output += ' '
# output += '-' * (RENDER_COMMENT_YAML_MAX_LINE_LENGTH - 7 - len(self.title) - 1)
# return output + '\n'
class BooleanItem(BaseSchemaItem):
__type__ = 'boolean'
class StringItem(BaseSchemaItem):
'''
A string configuration field
'''
__type__ = 'string'
__serialize_attr_aliases__ = {
'min_length': 'minLength',
'max_length': 'maxLength'
}
format = None
pattern = None
min_length = None
max_length = None
def __init__(self,
format=None, # pylint: disable=redefined-builtin
pattern=None,
min_length=None,
max_length=None,
**kwargs):
'''
:param required:
If the configuration item is required. Defaults to ``False``.
:param title:
A short explanation about the purpose of the data described by this item.
:param description:
A detailed explanation about the purpose of the data described by this item.
:param default:
The default value for this configuration item. May be :data:`.Null` (a special value
to set the default value to null).
:param enum:
A list(list, tuple, set) of valid choices.
:param format:
A semantic format of the string (for example, ``"date-time"``, ``"email"``, or ``"uri"``).
:param pattern:
A regular expression (ECMA 262) that a string value must match.
:param min_length:
The minimum length
:param max_length:
The maximum length
'''
if format is not None: # pylint: disable=redefined-builtin
self.format = format
if pattern is not None:
self.pattern = pattern
if min_length is not None:
self.min_length = min_length
if max_length is not None:
self.max_length = max_length
super(StringItem, self).__init__(**kwargs)
def __validate_attributes__(self):
if self.format is None and self.__format__ is not None:
self.format = self.__format__
class EMailItem(StringItem):
'''
An internet email address, see `RFC 5322, section 3.4.1`__.
.. __: http://tools.ietf.org/html/rfc5322
'''
__format__ = 'email'
class IPv4Item(StringItem):
'''
An IPv4 address configuration field, according to dotted-quad ABNF syntax as defined in
`RFC 2673, section 3.2`__.
.. __: http://tools.ietf.org/html/rfc2673
'''
__format__ = 'ipv4'
class IPv6Item(StringItem):
'''
An IPv6 address configuration field, as defined in `RFC 2373, section 2.2`__.
.. __: http://tools.ietf.org/html/rfc2373
'''
__format__ = 'ipv6'
class HostnameItem(StringItem):
'''
An Internet host name configuration field, see `RFC 1034, section 3.1`__.
.. __: http://tools.ietf.org/html/rfc1034
'''
__format__ = 'hostname'
class DateTimeItem(StringItem):
'''
An ISO 8601 formatted date-time configuration field, as defined by `RFC 3339, section 5.6`__.
.. __: http://tools.ietf.org/html/rfc3339
'''
__format__ = 'date-time'
class UriItem(StringItem):
'''
A universal resource identifier (URI) configuration field, according to `RFC3986`__.
.. __: http://tools.ietf.org/html/rfc3986
'''
__format__ = 'uri'
class SecretItem(StringItem):
'''
A string configuration field containing a secret, for example, passwords, API keys, etc
'''
__format__ = 'secret'
class NumberItem(BaseSchemaItem):
__type__ = 'number'
__serialize_attr_aliases__ = {
'multiple_of': 'multipleOf',
'exclusive_minimum': 'exclusiveMinimum',
'exclusive_maximum': 'exclusiveMaximum',
}
multiple_of = None
minimum = None
exclusive_minimum = None
maximum = None
exclusive_maximum = None
def __init__(self,
multiple_of=None,
minimum=None,
exclusive_minimum=None,
maximum=None,
exclusive_maximum=None,
**kwargs):
'''
:param required:
If the configuration item is required. Defaults to ``False``.
:param title:
A short explanation about the purpose of the data described by this item.
:param description:
A detailed explanation about the purpose of the data described by this item.
:param default:
The default value for this configuration item. May be :data:`.Null` (a special value
to set the default value to null).
:param enum:
A list(list, tuple, set) of valid choices.
:param multiple_of:
A value must be a multiple of this factor.
:param minimum:
The minimum allowed value
:param exclusive_minimum:
Wether a value is allowed to be exactly equal to the minimum
:param maximum:
The maximum allowed value
:param exclusive_maximum:
Wether a value is allowed to be exactly equal to the maximum
'''
if multiple_of is not None:
self.multiple_of = multiple_of
if minimum is not None:
self.minimum = minimum
if exclusive_minimum is not None:
self.exclusive_minimum = exclusive_minimum
if maximum is not None:
self.maximum = maximum
if exclusive_maximum is not None:
self.exclusive_maximum = exclusive_maximum
super(NumberItem, self).__init__(**kwargs)
class IntegerItem(NumberItem):
__type__ = 'integer'
class ArrayItem(BaseSchemaItem):
__type__ = 'array'
__serialize_attr_aliases__ = {
'min_items': 'minItems',
'max_items': 'maxItems',
'unique_items': 'uniqueItems',
'additional_items': 'additionalItems'
}
items = None
min_items = None
max_items = None
unique_items = None
additional_items = None
def __init__(self,
items=None,
min_items=None,
max_items=None,
unique_items=None,
additional_items=None,
**kwargs):
'''
:param required:
If the configuration item is required. Defaults to ``False``.
:param title:
A short explanation about the purpose of the data described by this item.
:param description:
A detailed explanation about the purpose of the data described by this item.
:param default:
The default value for this configuration item. May be :data:`.Null` (a special value
to set the default value to null).
:param enum:
A list(list, tuple, set) of valid choices.
:param items:
Either of the following:
* :class:`BaseSchemaItem` -- all items of the array must match the field schema;
* a list or a tuple of :class:`fields <.BaseSchemaItem>` -- all items of the array must be
valid according to the field schema at the corresponding index (tuple typing);
:param min_items:
Minimum length of the array
:param max_items:
Maximum length of the array
:param unique_items:
Whether all the values in the array must be distinct.
:param additional_items:
If the value of ``items`` is a list or a tuple, and the array length is larger than
the number of fields in ``items``, then the additional items are described
by the :class:`.BaseField` passed using this argument.
:type additional_items: bool or :class:`.BaseSchemaItem`
'''
if items is not None:
self.items = items
if min_items is not None:
self.min_items = min_items
if max_items is not None:
self.max_items = max_items
if unique_items is not None:
self.unique_items = unique_items
if additional_items is not None:
self.additional_items = additional_items
super(ArrayItem, self).__init__(**kwargs)
def __validate_attributes__(self):
if not self.items:
raise RuntimeError(
'The passed items must not be empty'
)
if isinstance(self.items, (list, tuple)):
for item in self.items:
if not isinstance(item, (Schema, SchemaItem)):
raise RuntimeError(
'All items passed in the item argument tuple/list must be '
'a subclass of Schema, SchemaItem or BaseSchemaItem, '
'not {0}'.format(type(item))
)
elif not isinstance(self.items, (Schema, SchemaItem)):
raise RuntimeError(
'The items argument passed must be a subclass of '
'Schema, SchemaItem or BaseSchemaItem, not '
'{0}'.format(type(self.items))
)
def __get_items__(self):
if isinstance(self.items, (Schema, SchemaItem)):
# This is either a Schema or a Basetem, return it in it's
# serialized form
return self.items.serialize()
if isinstance(self.items, (tuple, list)):
items = []
for item in self.items:
items.append(item.serialize())
return items
class DictItem(BaseSchemaItem):
__type__ = 'object'
__serialize_attr_aliases__ = {
'min_properties': 'minProperties',
'max_properties': 'maxProperties',
'pattern_properties': 'patternProperties',
'additional_properties': 'additionalProperties'
}
properties = None
pattern_properties = None
additional_properties = None
min_properties = None
max_properties = None
def __init__(self,
properties=None,
pattern_properties=None,
additional_properties=None,
min_properties=None,
max_properties=None,
**kwargs):
'''
:param required:
If the configuration item is required. Defaults to ``False``.
:type required:
boolean
:param title:
A short explanation about the purpose of the data described by this item.
:type title:
str
:param description:
A detailed explanation about the purpose of the data described by this item.
:param default:
The default value for this configuration item. May be :data:`.Null` (a special value
to set the default value to null).
:param enum:
A list(list, tuple, set) of valid choices.
:param properties:
A dictionary containing fields
:param pattern_properties:
A dictionary whose keys are regular expressions (ECMA 262).
Properties match against these regular expressions, and for any that match,
the property is described by the corresponding field schema.
:type pattern_properties: dict[str -> :class:`.Schema` or
:class:`.SchemaItem` or :class:`.BaseSchemaItem`]
:param additional_properties:
Describes properties that are not described by the ``properties`` or ``pattern_properties``.
:type additional_properties: bool or :class:`.Schema` or :class:`.SchemaItem`
or :class:`.BaseSchemaItem`
:param min_properties:
A minimum number of properties.
:type min_properties: int
:param max_properties:
A maximum number of properties
:type max_properties: int
'''
if properties is not None:
self.properties = properties
if pattern_properties is not None:
self.pattern_properties = pattern_properties
if additional_properties is not None:
self.additional_properties = additional_properties
if min_properties is not None:
self.min_properties = min_properties
if max_properties is not None:
self.max_properties = max_properties
super(DictItem, self).__init__(**kwargs)
def __validate_attributes__(self):
if not self.properties and not self.pattern_properties:
raise RuntimeError(
'One of properties or pattern properties must be passed'
)
if self.properties is not None:
if not isinstance(self.properties, (Schema, dict)):
raise RuntimeError(
'The passed properties must be passed as a dict or '
' a Schema not \'{0}\''.format(type(self.properties))
)
if not isinstance(self.properties, Schema):
for key, prop in self.properties.items():
if not isinstance(prop, (Schema, SchemaItem)):
raise RuntimeError(
'The passed property who\'s key is \'{0}\' must be of type '
'Schema, SchemaItem or BaseSchemaItem, not '
'\'{1}\''.format(key, type(prop))
)
if self.pattern_properties is not None:
if not isinstance(self.pattern_properties, dict):
raise RuntimeError(
'The passed pattern_properties must be passed as a dict '
'not \'{0}\''.format(type(self.pattern_properties))
)
for key, prop in self.pattern_properties.items():
if not isinstance(prop, (Schema, SchemaItem)):
raise RuntimeError(
'The passed pattern_property who\'s key is \'{0}\' must '
'be of type Schema, SchemaItem or BaseSchemaItem, '
'not \'{1}\''.format(key, type(prop))
)
if self.additional_properties is not None:
if not isinstance(self.additional_properties, (bool, Schema, SchemaItem)):
raise RuntimeError(
'The passed additional_properties must be of type bool, '
'Schema, SchemaItem or BaseSchemaItem, not \'{0}\''.format(
type(self.pattern_properties)
)
)
def __get_properties__(self):
if self.properties is None:
return
if isinstance(self.properties, Schema):
return self.properties.serialize()['properties']
properties = OrderedDict()
for key, prop in self.properties.items():
properties[key] = prop.serialize()
return properties
def __get_pattern_properties__(self):
if self.pattern_properties is None:
return
pattern_properties = OrderedDict()
for key, prop in self.pattern_properties.items():
pattern_properties[key] = prop.serialize()
return pattern_properties
def __get_additional_properties__(self):
if self.additional_properties is None:
return
if isinstance(self.additional_properties, bool):
return self.additional_properties
return self.additional_properties.serialize()
def __call__(self, flatten=False):
self.__flatten__ = flatten
return self
def serialize(self):
result = super(DictItem, self).serialize()
required = []
if self.properties is not None:
if isinstance(self.properties, Schema):
serialized = self.properties.serialize()
if 'required' in serialized:
required.extend(serialized['required'])
else:
for key, prop in self.properties.items():
if prop.required:
required.append(key)
if required:
result['required'] = required
return result
class RequirementsItem(SchemaItem):
__type__ = 'object'
requirements = None
def __init__(self, requirements=None):
if requirements is not None:
self.requirements = requirements
super(RequirementsItem, self).__init__()
def __validate_attributes__(self):
if self.requirements is None:
raise RuntimeError(
'The passed requirements must not be empty'
)
if not isinstance(self.requirements, (SchemaItem, list, tuple, set)):
raise RuntimeError(
'The passed requirements must be passed as a list, tuple, '
'set SchemaItem or BaseSchemaItem, not \'{0}\''.format(self.requirements)
)
if not isinstance(self.requirements, SchemaItem):
if not isinstance(self.requirements, list):
self.requirements = list(self.requirements)
for idx, item in enumerate(self.requirements):
if not isinstance(item, (six.string_types, SchemaItem)):
raise RuntimeError(
'The passed requirement at the {0} index must be of type '
'str or SchemaItem, not \'{1}\''.format(idx, type(item))
)
def serialize(self):
if isinstance(self.requirements, SchemaItem):
requirements = self.requirements.serialize()
else:
requirements = []
for requirement in self.requirements:
if isinstance(requirement, SchemaItem):
requirements.append(requirement.serialize())
continue
requirements.append(requirement)
return {'required': requirements}
class OneOfItem(SchemaItem):
__type__ = 'oneOf'
items = None
def __init__(self, items=None):
if items is not None:
self.items = items
super(OneOfItem, self).__init__()
def __validate_attributes__(self):
if not self.items:
raise RuntimeError(
'The passed items must not be empty'
)
if not isinstance(self.items, (list, tuple)):
raise RuntimeError(
'The passed items must be passed as a list/tuple not '
'\'{0}\''.format(type(self.items))
)
for idx, item in enumerate(self.items):
if not isinstance(item, (Schema, SchemaItem)):
raise RuntimeError(
'The passed item at the {0} index must be of type '
'Schema, SchemaItem or BaseSchemaItem, not '
'\'{1}\''.format(idx, type(item))
)
if not isinstance(self.items, list):
self.items = list(self.items)
def __call__(self, flatten=False):
self.__flatten__ = flatten
return self
def serialize(self):
return {self.__type__: [i.serialize() for i in self.items]}
class AnyOfItem(OneOfItem):
__type__ = 'anyOf'
class AllOfItem(OneOfItem):
__type__ = 'allOf'
class NotItem(SchemaItem):
__type__ = 'not'
item = None
def __init__(self, item=None):
if item is not None:
self.item = item
super(NotItem, self).__init__()
def __validate_attributes__(self):
if not self.item:
raise RuntimeError(
'An item must be passed'
)
if not isinstance(self.item, (Schema, SchemaItem)):
raise RuntimeError(
'The passed item be of type Schema, SchemaItem or '
'BaseSchemaItem, not \'{1}\''.format(type(self.item))
)
def serialize(self):
return {self.__type__: self.item.serialize()}
# ----- Custom Preconfigured Configs -------------------------------------------------------------------------------->
class PortItem(IntegerItem):
minimum = 0 # yes, 0 is a valid port number
maximum = 65535
# <---- Custom Preconfigured Configs ---------------------------------------------------------------------------------
| 36.100424
| 118
| 0.552991
|
ae127cbb6ecda2bce8e1f72a93417b0eb37c0344
| 195,719
|
py
|
Python
|
manila/tests/share/drivers/huawei/test_huawei_nas.py
|
openstack/manila
|
1ebae738c235c6f1874ac7b11307e0d5fb567dba
|
[
"Apache-2.0"
] | 159
|
2015-01-02T09:35:15.000Z
|
2022-01-04T11:51:34.000Z
|
manila/tests/share/drivers/huawei/test_huawei_nas.py
|
openstack/manila
|
1ebae738c235c6f1874ac7b11307e0d5fb567dba
|
[
"Apache-2.0"
] | 5
|
2015-07-24T09:28:21.000Z
|
2020-11-20T04:33:51.000Z
|
manila/tests/share/drivers/huawei/test_huawei_nas.py
|
openstack/manila
|
1ebae738c235c6f1874ac7b11307e0d5fb567dba
|
[
"Apache-2.0"
] | 128
|
2015-01-05T22:52:28.000Z
|
2021-12-29T14:00:58.000Z
|
# Copyright (c) 2014 Huawei Technologies Co., Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for the Huawei nas driver module."""
import os
import requests
import shutil
import six
import tempfile
import time
from unittest import mock
import xml.dom.minidom
import ddt
from oslo_serialization import jsonutils
from xml.etree import ElementTree as ET
from manila.common import constants as common_constants
from manila import context
from manila.data import utils as data_utils
from manila import db
from manila import exception
from manila import rpc
from manila.share import configuration as conf
from manila.share.drivers.huawei import constants
from manila.share.drivers.huawei import huawei_nas
from manila.share.drivers.huawei.v3 import connection
from manila.share.drivers.huawei.v3 import helper
from manila.share.drivers.huawei.v3 import replication
from manila.share.drivers.huawei.v3 import rpcapi
from manila.share.drivers.huawei.v3 import smartx
from manila import test
from manila import utils
def fake_sleep(time):
pass
def data_session(url):
if url == "/xx/sessions":
data = """{"error":{"code":0},
"data":{"username":"admin",
"iBaseToken":"2001031430",
"deviceid":"210235G7J20000000000"}}"""
if url == "sessions":
data = '{"error":{"code":0},"data":{"ID":11}}'
return data
def filesystem(method, data, fs_status_flag):
extend_share_flag = False
shrink_share_flag = False
if method == "PUT":
if data == """{"CAPACITY": 10485760}""":
data = """{"error":{"code":0},
"data":{"ID":"4",
"CAPACITY":"8388608"}}"""
extend_share_flag = True
elif data == """{"CAPACITY": 2097152}""":
data = """{"error":{"code":0},
"data":{"ID":"4",
"CAPACITY":"2097152"}}"""
shrink_share_flag = True
elif data == """{"NAME": "share_fake_manage_uuid"}""":
data = """{"error":{"code":0},
"data":{"ID":"4",
"CAPACITY":"8388608"}}"""
elif data == jsonutils.dumps({"ENABLEDEDUP": True,
"ENABLECOMPRESSION": True}):
data = """{"error":{"code":0},
"data":{"ID":"4",
"CAPACITY":"8388608"}}"""
elif data == jsonutils.dumps({"ENABLEDEDUP": False,
"ENABLECOMPRESSION": False}):
data = """{"error":{"code":0},
"data":{"ID":"4",
"CAPACITY":"8388608"}}"""
elif data == """{"IOPRIORITY": "3"}""":
data = """{"error":{"code":0}}"""
elif method == "DELETE":
data = """{"error":{"code":0}}"""
elif method == "GET":
if fs_status_flag:
data = """{"error":{"code":0},
"data":{"HEALTHSTATUS":"1",
"RUNNINGSTATUS":"27",
"ALLOCTYPE":"1",
"CAPACITY":"8388608",
"PARENTNAME":"OpenStack_Pool",
"ENABLECOMPRESSION":"false",
"ENABLEDEDUP":"false",
"CACHEPARTITIONID":"",
"SMARTCACHEPARTITIONID":"",
"IOCLASSID":"11"}}"""
else:
data = """{"error":{"code":0},
"data":{"HEALTHSTATUS":"0",
"RUNNINGSTATUS":"27",
"ALLOCTYPE":"0",
"CAPACITY":"8388608",
"PARENTNAME":"OpenStack_Pool",
"ENABLECOMPRESSION":"false",
"ENABLEDEDUP":"false",
"CACHEPARTITIONID":"",
"SMARTCACHEPARTITIONID":"",
"IOCLASSID":"11"}}"""
else:
data = '{"error":{"code":31755596}}'
return (data, extend_share_flag, shrink_share_flag)
def filesystem_thick(method, data, fs_status_flag):
extend_share_flag = False
shrink_share_flag = False
if method == "PUT":
if data == """{"CAPACITY": 10485760}""":
data = """{"error":{"code":0},
"data":{"ID":"5",
"CAPACITY":"8388608"}}"""
extend_share_flag = True
elif data == """{"CAPACITY": 2097152}""":
data = """{"error":{"code":0},
"data":{"ID":"5",
"CAPACITY":"2097152"}}"""
shrink_share_flag = True
elif data == """{"NAME": "share_fake_uuid_thickfs"}""":
data = """{"error":{"code":0},
"data":{"ID":"5",
"CAPACITY":"8388608"}}"""
elif data == jsonutils.dumps({"ENABLEDEDUP": False,
"ENABLECOMPRESSION": False}):
data = """{"error":{"code":0},
"data":{"ID":"5",
"CAPACITY":"8388608"}}"""
elif method == "DELETE":
data = """{"error":{"code":0}}"""
elif method == "GET":
if fs_status_flag:
data = """{"error":{"code":0},
"data":{"HEALTHSTATUS":"1",
"RUNNINGSTATUS":"27",
"ALLOCTYPE":"0",
"CAPACITY":"8388608",
"PARENTNAME":"OpenStack_Pool_Thick",
"ENABLECOMPRESSION":"false",
"ENABLEDEDUP":"false",
"CACHEPARTITIONID":"",
"SMARTCACHEPARTITIONID":"",
"IOCLASSID":"11"}}"""
else:
data = """{"error":{"code":0},
"data":{"HEALTHSTATUS":"0",
"RUNNINGSTATUS":"27",
"ALLOCTYPE":"0",
"CAPACITY":"8388608",
"PARENTNAME":"OpenStack_Pool_Thick",
"ENABLECOMPRESSION":"false",
"ENABLEDEDUP":"false",
"CACHEPARTITIONID":"",
"SMARTCACHEPARTITIONID":"",
"IOCLASSID":"11"}}"""
else:
data = '{"error":{"code":31755596}}'
return (data, extend_share_flag, shrink_share_flag)
def filesystem_inpartition(method, data, fs_status_flag):
extend_share_flag = False
shrink_share_flag = False
if method == "PUT":
if data == """{"CAPACITY": 10485760}""":
data = """{"error":{"code":0},
"data":{"ID":"6",
"CAPACITY":"8388608"}}"""
extend_share_flag = True
elif data == """{"CAPACITY": 2097152}""":
data = """{"error":{"code":0},
"data":{"ID":"6",
"CAPACITY":"2097152"}}"""
shrink_share_flag = True
elif data == """{"NAME": "share_fake_manage_uuid"}""":
data = """{"error":{"code":0},
"data":{"ID":"6",
"CAPACITY":"8388608"}}"""
elif data == """{"NAME": "share_fake_uuid_inpartition"}""":
data = """{"error":{"code":0},
"data":{"ID":"6",
"CAPACITY":"8388608"}}"""
elif data == jsonutils.dumps({"ENABLEDEDUP": True,
"ENABLECOMPRESSION": True}):
data = """{"error":{"code":0},
"data":{"ID":"6",
"CAPACITY":"8388608"}}"""
elif data == jsonutils.dumps({"ENABLEDEDUP": False,
"ENABLECOMPRESSION": False}):
data = """{"error":{"code":0},
"data":{"ID":"6",
"CAPACITY":"8388608"}}"""
elif method == "DELETE":
data = """{"error":{"code":0}}"""
elif method == "GET":
if fs_status_flag:
data = """{"error":{"code":0},
"data":{"HEALTHSTATUS":"1",
"RUNNINGSTATUS":"27",
"ALLOCTYPE":"1",
"CAPACITY":"8388608",
"PARENTNAME":"OpenStack_Pool",
"ENABLECOMPRESSION":"false",
"ENABLEDEDUP":"false",
"CACHEPARTITIONID":"1",
"SMARTCACHEPARTITIONID":"1",
"IOCLASSID":"11"}}"""
else:
data = """{"error":{"code":0},
"data":{"HEALTHSTATUS":"0",
"RUNNINGSTATUS":"27",
"ALLOCTYPE":"0",
"CAPACITY":"8388608",
"PARENTNAME":"OpenStack_Pool",
"ENABLECOMPRESSION":"false",
"ENABLEDEDUP":"false",
"CACHEPARTITIONID":"1",
"SMARTCACHEPARTITIONID":"1",
"IOCLASSID":"11"}}"""
else:
data = '{"error":{"code":31755596}}'
return (data, extend_share_flag, shrink_share_flag)
def allow_access(type, method, data):
allow_ro_flag = False
allow_rw_flag = False
request_data = jsonutils.loads(data)
success_data = """{"error":{"code":0}}"""
fail_data = """{"error":{"code":1077939723}}"""
ret = None
if type == "NFS":
if request_data['ACCESSVAL'] == '0':
allow_ro_flag = True
ret = success_data
elif request_data['ACCESSVAL'] == '1':
allow_rw_flag = True
ret = success_data
elif type == "CIFS":
if request_data['PERMISSION'] == '0':
allow_ro_flag = True
ret = success_data
elif request_data['PERMISSION'] == '1':
allow_rw_flag = True
ret = success_data
# Group name should start with '@'.
if ('group' in request_data['NAME']
and not request_data['NAME'].startswith('@')):
ret = fail_data
if ret is None:
ret = fail_data
return (ret, allow_ro_flag, allow_rw_flag)
def dec_driver_handles_share_servers(func):
def wrapper(*args, **kw):
self = args[0]
self.configuration.driver_handles_share_servers = True
self.recreate_fake_conf_file(logical_port='CTE0.A.H0')
self.driver.plugin.configuration.manila_huawei_conf_file = (
self.fake_conf_file)
self.driver.plugin.helper.login()
return func(*args, **kw)
return wrapper
def QoS_response(method):
if method == "GET":
data = """{"error":{"code":0},
"data":{"NAME": "OpenStack_Fake_QoS", "MAXIOPS": "100",
"FSLIST": "4", "LUNLIST": "", "RUNNINGSTATUS": "2"}}"""
elif method == "PUT":
data = """{"error":{"code":0}}"""
else:
data = """{"error":{"code":0},
"data":{"ID": "11"}}"""
return data
class FakeHuaweiNasHelper(helper.RestHelper):
def __init__(self, *args, **kwargs):
helper.RestHelper.__init__(self, *args, **kwargs)
self.test_normal = True
self.deviceid = None
self.delete_flag = False
self.allow_flag = False
self.deny_flag = False
self.create_snapflag = False
self.setupserver_flag = False
self.fs_status_flag = True
self.create_share_flag = False
self.snapshot_flag = True
self.service_status_flag = True
self.share_exist = True
self.service_nfs_status_flag = True
self.create_share_data_flag = False
self.allow_ro_flag = False
self.allow_rw_flag = False
self.extend_share_flag = False
self.shrink_share_flag = False
self.add_fs_to_partition_flag = False
self.add_fs_to_cache_flag = False
self.test_multi_url_flag = 0
self.cache_exist = True
self.partition_exist = True
self.alloc_type = None
self.custom_results = {}
def _change_file_mode(self, filepath):
pass
def do_call(self, url, data, method, calltimeout=4):
url = url.replace('http://100.115.10.69:8082/deviceManager/rest', '')
url = url.replace('/210235G7J20000000000/', '')
if self.custom_results and self.custom_results.get(url):
result = self.custom_results[url]
if isinstance(result, six.string_types):
return jsonutils.loads(result)
if isinstance(result, dict) and result.get(method):
return jsonutils.loads(result[method])
if self.test_normal:
if self.test_multi_url_flag == 1:
data = '{"error":{"code":-403}}'
res_json = jsonutils.loads(data)
return res_json
elif self.test_multi_url_flag == 2:
if ('http://100.115.10.70:8082/deviceManager/rest/xx/'
'sessions' == url):
self.url = url
data = data_session("/xx/sessions")
res_json = jsonutils.loads(data)
return res_json
elif (('/xx/sessions' == url) or (self.url is not None
and 'http://100.115.10.69:8082/deviceManager/rest'
in self.url)):
data = '{"error":{"code":-403}}'
res_json = jsonutils.loads(data)
return res_json
if url == "/xx/sessions" or url == "/sessions":
data = data_session(url)
if url == "/storagepool":
data = """{"error":{"code":0},
"data":[{"USERFREECAPACITY":"2097152",
"ID":"1",
"NAME":"OpenStack_Pool",
"USERTOTALCAPACITY":"4194304",
"USAGETYPE":"2",
"USERCONSUMEDCAPACITY":"2097152",
"TIER0CAPACITY":"100",
"TIER1CAPACITY":"0",
"TIER2CAPACITY":"0"},
{"USERFREECAPACITY":"2097152",
"ID":"2",
"NAME":"OpenStack_Pool_Thick",
"USERTOTALCAPACITY":"4194304",
"USAGETYPE":"2",
"USERCONSUMEDCAPACITY":"2097152",
"TIER0CAPACITY":"100",
"TIER1CAPACITY":"0",
"TIER2CAPACITY":"0"}]}"""
if url == "/filesystem":
request_data = jsonutils.loads(data)
self.alloc_type = request_data.get('ALLOCTYPE')
data = """{"error":{"code":0},"data":{
"ID":"4"}}"""
if url == "/system/":
data = """{"error":{"code":0},
"data":{"PRODUCTVERSION": "V300R003C10",
"wwn": "fake_wwn"}}"""
if url == "/remote_device":
data = """{"error":{"code":0},
"data":[{"ID": "0",
"NAME": "fake_name",
"WWN": "fake_wwn"}]}"""
if url == "/ioclass" or url == "/ioclass/11":
data = QoS_response(method)
if url == "/ioclass/active/11":
data = """{"error":{"code":0},
"data":[{"ID": "11", "MAXIOPS": "100",
"FSLIST": ""}]}"""
if url == "/NFSHARE" or url == "/CIFSHARE":
if self.create_share_flag:
data = '{"error":{"code":31755596}}'
elif self.create_share_data_flag:
data = '{"error":{"code":0}}'
else:
data = """{"error":{"code":0},"data":{
"ID":"10"}}"""
if url == "/NFSHARE?range=[100-200]":
if self.share_exist:
data = """{"error":{"code":0},
"data":[{"ID":"1",
"FSID":"4",
"NAME":"test",
"SHAREPATH":"/share_fake_uuid/"},
{"ID":"2",
"FSID":"5",
"NAME":"test",
"SHAREPATH":"/share_fake_uuid_thickfs/"},
{"ID":"3",
"FSID":"6",
"NAME":"test",
"SHAREPATH":"/share_fake_uuid_inpartition/"}]}"""
else:
data = """{"error":{"code":0},
"data":[{"ID":"1",
"FSID":"4",
"NAME":"test",
"SHAREPATH":"/share_fake_uuid_fail/"}]}"""
if url == "/CIFSHARE?range=[100-200]":
data = """{"error":{"code":0},
"data":[{"ID":"2",
"FSID":"4",
"NAME":"test",
"SHAREPATH":"/share_fake_uuid/"}]}"""
if url == "/NFSHARE?range=[0-100]":
data = """{"error":{"code":0},
"data":[{"ID":"1",
"FSID":"4",
"NAME":"test_fail",
"SHAREPATH":"/share_fake_uuid_fail/"}]}"""
if url == "/CIFSHARE?range=[0-100]":
data = """{"error":{"code":0},
"data":[{"ID":"2",
"FSID":"4",
"NAME":"test_fail",
"SHAREPATH":"/share_fake_uuid_fail/"}]}"""
if url == "/NFSHARE/1" or url == "/CIFSHARE/2":
data = """{"error":{"code":0}}"""
self.delete_flag = True
if url == "/FSSNAPSHOT":
data = """{"error":{"code":0},"data":{
"ID":"3"}}"""
self.create_snapflag = True
if url == "/FSSNAPSHOT/4@share_snapshot_fake_snapshot_uuid":
if self.snapshot_flag:
data = """{"error":{"code":0},
"data":{"ID":"4@share_snapshot_fake_snapshot_uuid"}}"""
else:
data = '{"error":{"code":1073754118}}'
self.delete_flag = True
if url == "/FSSNAPSHOT/4@fake_storage_snapshot_name":
if self.snapshot_flag:
data = """{"error":{"code":0},
"data":{"ID":"4@share_snapshot_fake_snapshot_uuid",
"NAME":"share_snapshot_fake_snapshot_uuid",
"HEALTHSTATUS":"1"}}"""
else:
data = '{"error":{"code":1073754118}}'
if url == "/FSSNAPSHOT/3":
data = """{"error":{"code":0}}"""
self.delete_flag = True
if url == "/NFS_SHARE_AUTH_CLIENT":
data, self.allow_ro_flag, self.allow_rw_flag = (
allow_access('NFS', method, data))
self.allow_flag = True
if url == "/CIFS_SHARE_AUTH_CLIENT":
data, self.allow_ro_flag, self.allow_rw_flag = (
allow_access('CIFS', method, data))
self.allow_flag = True
if url == ("/FSSNAPSHOT?TYPE=48&PARENTID=4"
"&&sortby=TIMESTAMP,d&range=[0-2000]"):
data = """{"error":{"code":0},
"data":[{"ID":"3",
"NAME":"share_snapshot_fake_snapshot_uuid"}]}"""
self.delete_flag = True
if url == ("/NFS_SHARE_AUTH_CLIENT?"
"filter=PARENTID::1&range=[0-100]"):
data = """{"error":{"code":0},
"data":[{"ID":"0",
"NAME":"100.112.0.1_fail"}]}"""
if url == ("/CIFS_SHARE_AUTH_CLIENT?"
"filter=PARENTID::2&range=[0-100]"):
data = """{"error":{"code":0},
"data":[{"ID":"0",
"NAME":"user_name_fail"}]}"""
if url == ("/NFS_SHARE_AUTH_CLIENT?"
"filter=PARENTID::1&range=[100-200]"):
data = """{"error":{"code":0},
"data":[{"ID":"5",
"NAME":"100.112.0.2"}]}"""
if url == ("/CIFS_SHARE_AUTH_CLIENT?"
"filter=PARENTID::2&range=[100-200]"):
data = """{"error":{"code":0},
"data":[{"ID":"6",
"NAME":"user_exist"}]}"""
if url in ("/NFS_SHARE_AUTH_CLIENT/0",
"/NFS_SHARE_AUTH_CLIENT/5",
"/CIFS_SHARE_AUTH_CLIENT/0",
"/CIFS_SHARE_AUTH_CLIENT/6"):
if method == "DELETE":
data = """{"error":{"code":0}}"""
self.deny_flag = True
elif method == "GET":
if 'CIFS' in url:
data = """{"error":{"code":0},
"data":{"'PERMISSION'":"0"}}"""
else:
data = """{"error":{"code":0},
"data":{"ACCESSVAL":"0"}}"""
else:
data = """{"error":{"code":0}}"""
self.allow_rw_flagg = True
if url == "/NFSHARE/count" or url == "/CIFSHARE/count":
data = """{"error":{"code":0},"data":{
"COUNT":"196"}}"""
if (url == "/NFS_SHARE_AUTH_CLIENT/count?filter=PARENTID::1"
or url == ("/CIFS_SHARE_AUTH_CLIENT/count?filter="
"PARENTID::2")):
data = """{"error":{"code":0},"data":{
"COUNT":"196"}}"""
if url == "/CIFSSERVICE":
if self.service_status_flag:
data = """{"error":{"code":0},"data":{
"RUNNINGSTATUS":"2"}}"""
else:
data = """{"error":{"code":0},"data":{
"RUNNINGSTATUS":"1"}}"""
if url == "/NFSSERVICE":
if self.service_nfs_status_flag:
data = """{"error":{"code":0},
"data":{"RUNNINGSTATUS":"2",
"SUPPORTV3":"true",
"SUPPORTV4":"true"}}"""
else:
data = """{"error":{"code":0},
"data":{"RUNNINGSTATUS":"1",
"SUPPORTV3":"true",
"SUPPORTV4":"true"}}"""
self.setupserver_flag = True
if "/FILESYSTEM?filter=NAME::" in url:
data = """{"error":{"code":0},
"data":[{"ID":"4",
"NAME":"share_fake_uuid"},
{"ID":"8",
"NAME":"share_fake_new_uuid"}]}"""
if url == "/filesystem/4":
data, self.extend_share_flag, self.shrink_share_flag = (
filesystem(method, data, self.fs_status_flag))
self.delete_flag = True
if url == "/filesystem/5":
data, self.extend_share_flag, self.shrink_share_flag = (
filesystem_thick(method, data, self.fs_status_flag))
self.delete_flag = True
if url == "/filesystem/6":
data, self.extend_share_flag, self.shrink_share_flag = (
filesystem_inpartition(method, data, self.fs_status_flag))
self.delete_flag = True
if url == "/cachepartition":
if self.partition_exist:
data = """{"error":{"code":0},
"data":[{"ID":"7",
"NAME":"test_partition_name"}]}"""
else:
data = """{"error":{"code":0},
"data":[{"ID":"7",
"NAME":"test_partition_name_fail"}]}"""
if url == "/cachepartition/1":
if self.partition_exist:
data = """{"error":{"code":0},
"data":{"ID":"7",
"NAME":"test_partition_name"}}"""
else:
data = """{"error":{"code":0},
"data":{"ID":"7",
"NAME":"test_partition_name_fail"}}"""
if url == "/SMARTCACHEPARTITION":
if self.cache_exist:
data = """{"error":{"code":0},
"data":[{"ID":"8",
"NAME":"test_cache_name"}]}"""
else:
data = """{"error":{"code":0},
"data":[{"ID":"8",
"NAME":"test_cache_name_fail"}]}"""
if url == "/SMARTCACHEPARTITION/1":
if self.cache_exist:
data = """{"error":{"code":0},
"data":{"ID":"8",
"NAME":"test_cache_name"}}"""
else:
data = """{"error":{"code":0},
"data":{"ID":"8",
"NAME":"test_cache_name_fail"}}"""
if url == "/filesystem/associate/cachepartition":
data = """{"error":{"code":0}}"""
self.add_fs_to_partition_flag = True
if url == "/SMARTCACHEPARTITION/CREATE_ASSOCIATE":
data = """{"error":{"code":0}}"""
self.add_fs_to_cache_flag = True
if url == "/SMARTCACHEPARTITION/REMOVE_ASSOCIATE":
data = """{"error":{"code":0}}"""
if url == "/smartPartition/removeFs":
data = """{"error":{"code":0}}"""
if url == "/ETH_PORT":
data = """{"error":{"code":0},
"data":[{"ID": "4",
"LOCATION":"CTE0.A.H0",
"IPV4ADDR":"",
"BONDNAME":"",
"BONDID":"",
"RUNNINGSTATUS":"10"},
{"ID": "6",
"LOCATION":"CTE0.A.H1",
"IPV4ADDR":"",
"BONDNAME":"fake_bond",
"BONDID":"5",
"RUNNINGSTATUS":"10"}]}"""
if url == "/ETH_PORT/6":
data = """{"error":{"code":0},
"data":{"ID": "6",
"LOCATION":"CTE0.A.H1",
"IPV4ADDR":"",
"BONDNAME":"fake_bond",
"BONDID":"5",
"RUNNINGSTATUS":"10"}}"""
if url == "/BOND_PORT":
data = "{\"error\":{\"code\":0},\
\"data\":[{\"ID\": \"5\",\
\"NAME\":\"fake_bond\",\
\"PORTIDLIST\": \"[\\\"6\\\"]\",\
\"RUNNINGSTATUS\":\"10\"}]}"
if url == "/vlan":
if method == "GET":
data = """{"error":{"code":0}}"""
else:
data = """{"error":{"code":0},"data":{
"ID":"4"}}"""
if url == "/LIF":
if method == "GET":
data = """{"error":{"code":0}}"""
else:
data = """{"error":{"code":0},"data":{
"ID":"4"}}"""
if url == "/DNS_Server":
if method == "GET":
data = "{\"error\":{\"code\":0},\"data\":{\
\"ADDRESS\":\"[\\\"\\\"]\"}}"
else:
data = """{"error":{"code":0}}"""
if url == "/AD_CONFIG":
if method == "GET":
data = """{"error":{"code":0},"data":{
"DOMAINSTATUS":"1",
"FULLDOMAINNAME":"huawei.com"}}"""
else:
data = """{"error":{"code":0}}"""
if url == "/LDAP_CONFIG":
if method == "GET":
data = """{"error":{"code":0},"data":{
"BASEDN":"dc=huawei,dc=com",
"LDAPSERVER": "100.97.5.87"}}"""
else:
data = """{"error":{"code":0}}"""
if url == "/REPLICATIONPAIR":
data = """{"error":{"code":0},"data":{
"ID":"fake_pair_id"}}"""
if url == "/REPLICATIONPAIR/sync":
data = """{"error":{"code":0}}"""
if url == "/REPLICATIONPAIR/switch":
data = """{"error":{"code":0}}"""
if url == "/REPLICATIONPAIR/split":
data = """{"error":{"code":0}}"""
if url == "/REPLICATIONPAIR/CANCEL_SECODARY_WRITE_LOCK":
data = """{"error":{"code":0}}"""
if url == "/REPLICATIONPAIR/SET_SECODARY_WRITE_LOCK":
data = """{"error":{"code":0}}"""
if url == "/REPLICATIONPAIR/fake_pair_id":
data = """{"error":{"code":0},"data":{
"ID": "fake_pair_id",
"HEALTHSTATUS": "1",
"SECRESDATASTATUS": "1",
"ISPRIMARY": "false",
"SECRESACCESS": "1",
"RUNNINGSTATUS": "1"}}"""
else:
data = '{"error":{"code":31755596}}'
res_json = jsonutils.loads(data)
return res_json
class FakeRpcClient(rpcapi.HuaweiV3API):
def __init__(self, helper):
super(FakeRpcClient, self).__init__()
self.replica_mgr = replication.ReplicaPairManager(helper)
class fake_call_context(object):
def __init__(self, replica_mgr):
self.replica_mgr = replica_mgr
def call(self, context, func_name, **kwargs):
if func_name == 'create_replica_pair':
return self.replica_mgr.create_replica_pair(
context, **kwargs)
def create_replica_pair(self, context, host, local_share_info,
remote_device_wwn, remote_fs_id):
self.client.prepare = mock.Mock(
return_value=self.fake_call_context(self.replica_mgr))
return super(FakeRpcClient, self).create_replica_pair(
context, host, local_share_info,
remote_device_wwn, remote_fs_id)
class FakeRpcServer(object):
def start(self):
pass
class FakePrivateStorage(object):
def __init__(self):
self.map = {}
def get(self, entity_id, key=None, default=None):
if self.map.get(entity_id):
return self.map[entity_id].get(key, default)
return default
def update(self, entity_id, details, delete_existing=False):
self.map[entity_id] = details
def delete(self, entity_id, key=None):
self.map.pop(entity_id)
class FakeHuaweiNasDriver(huawei_nas.HuaweiNasDriver):
"""Fake HuaweiNasDriver."""
def __init__(self, *args, **kwargs):
huawei_nas.HuaweiNasDriver.__init__(self, *args, **kwargs)
self.plugin = connection.V3StorageConnection(self.configuration)
self.plugin.helper = FakeHuaweiNasHelper(self.configuration)
self.plugin.replica_mgr = replication.ReplicaPairManager(
self.plugin.helper)
self.plugin.rpc_client = FakeRpcClient(self.plugin.helper)
self.plugin.private_storage = FakePrivateStorage()
class FakeConfigParseTree(object):
class FakeNode(object):
def __init__(self, text):
self._text = text
@property
def text(self):
return self._text
@text.setter
def text(self, text):
self._text = text
class FakeRoot(object):
def __init__(self):
self._node_map = {}
def findtext(self, path, default=None):
if path in self._node_map:
return self._node_map[path].text
return default
def find(self, path):
if path in self._node_map:
return self._node_map[path]
return None
def __init__(self, path_value):
self.root = self.FakeRoot()
for k in path_value:
self.root._node_map[k] = self.FakeNode(path_value[k])
def getroot(self):
return self.root
def write(self, filename, format):
pass
@ddt.ddt
class HuaweiShareDriverTestCase(test.TestCase):
"""Tests GenericShareDriver."""
def setUp(self):
super(HuaweiShareDriverTestCase, self).setUp()
self._context = context.get_admin_context()
def _safe_get(opt):
return getattr(self.configuration, opt)
self.configuration = mock.Mock(spec=conf.Configuration)
self.configuration.safe_get = mock.Mock(side_effect=_safe_get)
self.configuration.network_config_group = 'fake_network_config_group'
self.configuration.admin_network_config_group = (
'fake_admin_network_config_group')
self.configuration.config_group = 'fake_share_backend_name'
self.configuration.share_backend_name = 'fake_share_backend_name'
self.configuration.huawei_share_backend = 'V3'
self.configuration.max_over_subscription_ratio = 1
self.configuration.driver_handles_share_servers = False
self.configuration.replication_domain = None
self.configuration.filter_function = None
self.configuration.goodness_function = None
self.tmp_dir = tempfile.mkdtemp()
self.fake_conf_file = self.tmp_dir + '/manila_huawei_conf.xml'
self.addCleanup(shutil.rmtree, self.tmp_dir)
self.create_fake_conf_file(self.fake_conf_file)
self.addCleanup(os.remove, self.fake_conf_file)
self.configuration.manila_huawei_conf_file = self.fake_conf_file
self._helper_fake = mock.Mock()
self.mock_object(huawei_nas.importutils, 'import_object',
mock.Mock(return_value=self._helper_fake))
self.mock_object(time, 'sleep', fake_sleep)
self.driver = FakeHuaweiNasDriver(configuration=self.configuration)
self.driver.plugin.helper.test_normal = True
self.share_nfs = {
'id': 'fake_uuid',
'share_id': 'fake_uuid',
'project_id': 'fake_tenant_id',
'display_name': 'fake',
'name': 'share-fake-uuid',
'size': 1,
'share_proto': 'NFS',
'share_network_id': 'fake_net_id',
'share_server_id': 'fake-share-srv-id',
'export_locations': [
{'path': '100.115.10.68:/share_fake_uuid'},
],
'host': 'fake_host@fake_backend#OpenStack_Pool',
'share_type_id': 'fake_id',
}
self.share_nfs_thick = {
'id': 'fake_uuid',
'project_id': 'fake_tenant_id',
'display_name': 'fake',
'name': 'share-fake-uuid',
'size': 1,
'share_proto': 'NFS',
'share_network_id': 'fake_net_id',
'share_server_id': 'fake-share-srv-id',
'host': 'fake_host@fake_backend#OpenStack_Pool_Thick',
'export_locations': [
{'path': '100.115.10.68:/share_fake_uuid'},
],
'share_type_id': 'fake_id',
}
self.share_nfs_thickfs = {
'id': 'fake_uuid',
'project_id': 'fake_tenant_id',
'display_name': 'fake',
'name': 'share-fake-uuid-thickfs',
'size': 1,
'share_proto': 'NFS',
'share_network_id': 'fake_net_id',
'share_server_id': 'fake-share-srv-id',
'host': 'fake_host@fake_backend#OpenStack_Pool',
'export_locations': [
{'path': '100.115.10.68:/share_fake_uuid_thickfs'},
],
'share_type_id': 'fake_id',
}
self.share_nfs_thick_thickfs = {
'id': 'fake_uuid',
'project_id': 'fake_tenant_id',
'display_name': 'fake',
'name': 'share-fake-uuid-thickfs',
'size': 1,
'share_proto': 'NFS',
'share_network_id': 'fake_net_id',
'share_server_id': 'fake-share-srv-id',
'host': 'fake_host@fake_backend#OpenStack_Pool_Thick',
'export_locations': [
{'path': '100.115.10.68:/share_fake_uuid_thickfs'},
],
'share_type_id': 'fake_id',
}
self.share_nfs_inpartition = {
'id': 'fake_uuid',
'project_id': 'fake_tenant_id',
'display_name': 'fake',
'name': 'share-fake-uuid-inpartition',
'size': 1,
'share_proto': 'NFS',
'share_network_id': 'fake_net_id',
'share_server_id': 'fake-share-srv-id',
'host': 'fake_host@fake_backend#OpenStack_Pool',
'export_locations': [
{'path': '100.115.10.68:/share_fake_uuid_inpartition'},
],
'share_type_id': 'fake_id',
}
self.share_manage_nfs = {
'id': 'fake_uuid',
'project_id': 'fake_tenant_id',
'display_name': 'fake',
'name': 'share-fake-manage-uuid',
'size': 1,
'share_proto': 'NFS',
'share_network_id': 'fake_net_id',
'share_server_id': 'fake-share-srv-id',
'export_locations': [
{'path': '100.115.10.68:/share_fake_uuid'},
],
'host': 'fake_host@fake_backend#OpenStack_Pool',
'share_type_id': 'fake_id',
}
self.share_pool_name_not_match = {
'id': 'fake_uuid',
'project_id': 'fake_tenant_id',
'display_name': 'fake',
'name': 'share-fake-manage-uuid',
'size': 1,
'share_proto': 'NFS',
'share_network_id': 'fake_net_id',
'share_server_id': 'fake-share-srv-id',
'export_locations': [
{'path': '100.115.10.68:/share_fake_uuid'},
],
'host': 'fake_host@fake_backend#OpenStack_Pool_not_match',
'share_type_id': 'fake_id',
}
self.share_proto_fail = {
'id': 'fake_uuid',
'project_id': 'fake_tenant_id',
'display_name': 'fake',
'name': 'share-fake-uuid',
'size': 1,
'share_proto': 'proto_fail',
'share_network_id': 'fake_net_id',
'share_server_id': 'fake-share-srv-id',
'host': 'fake_host@fake_backend#OpenStack_Pool',
}
self.share_cifs = {
'id': 'fake_uuid',
'share_id': 'fake_uuid',
'project_id': 'fake_tenant_id',
'display_name': 'fake',
'name': 'share-fake-uuid',
'size': 1,
'share_proto': 'CIFS',
'share_network_id': 'fake_net_id',
'share_server_id': 'fake-share-srv-id',
'export_locations': [
{'path': 'share_fake_uuid'},
],
'host': 'fake_host@fake_backend#OpenStack_Pool',
'share_type_id': 'fake_id',
}
self.share_manage_cifs = {
'id': 'fake_uuid',
'project_id': 'fake_tenant_id',
'display_name': 'fake',
'name': 'share-fake-manage-uuid',
'size': 1,
'share_proto': 'CIFS',
'share_network_id': 'fake_net_id',
'share_server_id': 'fake-share-srv-id',
'export_locations': [
{'path': '\\\\100.115.10.68\\share_fake_uuid'},
],
'host': 'fake_host@fake_backend#OpenStack_Pool',
'share_type_id': 'fake_id',
}
self.nfs_snapshot = {
'id': 'fake_snapshot_uuid',
'snapshot_id': 'fake_snapshot_uuid',
'display_name': 'snapshot',
'name': 'fake_snapshot_name',
'size': 1,
'share_name': 'share_fake_uuid',
'share_id': 'fake_uuid',
'share': {
'share_name': 'share_fake_uuid',
'share_id': 'fake_uuid',
'share_size': 1,
'share_proto': 'NFS',
},
}
self.cifs_snapshot = {
'id': 'fake_snapshot_uuid',
'snapshot_id': 'fake_snapshot_uuid',
'display_name': 'snapshot',
'name': 'fake_snapshot_name',
'size': 1,
'share_name': 'share_fake_uuid',
'share_id': 'fake_uuid',
'share': {
'share_name': 'share_fake_uuid',
'share_id': 'fake_uuid',
'share_size': 1,
'share_proto': 'CIFS',
},
}
self.storage_nfs_snapshot = {
'id': 'fake_snapshot_uuid',
'snapshot_id': 'fake_snapshot_uuid',
'display_name': 'snapshot',
'name': 'fake_snapshot_name',
'provider_location': 'fake_storage_snapshot_name',
'size': 1,
'share_name': 'share_fake_uuid',
'share_id': 'fake_uuid',
'share': {
'share_name': 'share_fake_uuid',
'share_id': 'fake_uuid',
'share_size': 1,
'share_proto': 'NFS',
},
}
self.storage_cifs_snapshot = {
'id': 'fake_snapshot_uuid',
'snapshot_id': 'fake_snapshot_uuid',
'display_name': 'snapshot',
'name': 'fake_snapshot_name',
'provider_location': 'fake_storage_snapshot_name',
'size': 1,
'share_name': 'share_fake_uuid',
'share_id': 'fake_uuid',
'share': {
'share_name': 'share_fake_uuid',
'share_id': 'fake_uuid',
'share_size': 1,
'share_proto': 'CIFS',
},
}
self.security_service = {
'id': 'fake_id',
'domain': 'FAKE',
'server': 'fake_server',
'user': 'fake_user',
'password': 'fake_password',
}
self.access_ip = {
'access_type': 'ip',
'access_to': '100.112.0.1',
'access_level': 'rw',
}
self.access_ip_exist = {
'access_type': 'ip',
'access_to': '100.112.0.2',
'access_level': 'rw',
}
self.access_user = {
'access_type': 'user',
'access_to': 'user_name',
'access_level': 'rw',
}
self.access_user_exist = {
'access_type': 'user',
'access_to': 'user_exist',
'access_level': 'rw',
}
self.access_group = {
'access_type': 'user',
'access_to': 'group_name',
'access_level': 'rw',
}
self.access_cert = {
'access_type': 'cert',
'access_to': 'fake_cert',
'access_level': 'rw',
}
self.driver_options = {
'volume_id': 'fake',
}
self.share_server = None
self.driver._licenses = ['fake']
self.fake_network_allocations = [{
'id': 'fake_network_allocation_id',
'ip_address': '111.111.111.109',
}]
self.fake_network_info = {
'server_id': '0',
'segmentation_id': '2',
'cidr': '111.111.111.0/24',
'neutron_net_id': 'fake_neutron_net_id',
'neutron_subnet_id': 'fake_neutron_subnet_id',
'security_services': '',
'network_allocations': self.fake_network_allocations,
'network_type': 'vlan',
}
self.fake_active_directory = {
'type': 'active_directory',
'dns_ip': '100.97.5.5',
'user': 'ad_user',
'password': 'ad_password',
'domain': 'huawei.com'
}
self.fake_ldap = {
'type': 'ldap',
'server': '100.97.5.87',
'domain': 'dc=huawei,dc=com'
}
fake_share_type_id_not_extra = 'fake_id'
self.fake_type_not_extra = {
'test_with_extra': {
'created_at': 'fake_time',
'deleted': '0',
'deleted_at': None,
'extra_specs': {},
'required_extra_specs': {},
'id': fake_share_type_id_not_extra,
'name': 'test_with_extra',
'updated_at': None
}
}
fake_extra_specs = {
'capabilities:dedupe': '<is> True',
'capabilities:compression': '<is> True',
'capabilities:huawei_smartcache': '<is> True',
'huawei_smartcache:cachename': 'test_cache_name',
'capabilities:huawei_smartpartition': '<is> True',
'huawei_smartpartition:partitionname': 'test_partition_name',
'capabilities:thin_provisioning': '<is> True',
'test:test:test': 'test',
}
fake_share_type_id = 'fooid-2'
self.fake_type_w_extra = {
'test_with_extra': {
'created_at': 'fake_time',
'deleted': '0',
'deleted_at': None,
'extra_specs': fake_extra_specs,
'required_extra_specs': {},
'id': fake_share_type_id,
'name': 'test_with_extra',
'updated_at': None
}
}
fake_extra_specs = {
'capabilities:dedupe': '<is> True',
'capabilities:compression': '<is> True',
'capabilities:huawei_smartcache': '<is> False',
'huawei_smartcache:cachename': None,
'capabilities:huawei_smartpartition': '<is> False',
'huawei_smartpartition:partitionname': None,
'capabilities:thin_provisioning': '<is> True',
'test:test:test': 'test',
}
fake_share_type_id = 'fooid-3'
self.fake_type_fake_extra = {
'test_with_extra': {
'created_at': 'fake_time',
'deleted': '0',
'deleted_at': None,
'extra_specs': fake_extra_specs,
'required_extra_specs': {},
'id': fake_share_type_id,
'name': 'test_with_extra',
'updated_at': None
}
}
fake_extra_specs = {
'capabilities:dedupe': '<is> True',
'capabilities:compression': '<is> True',
'capabilities:huawei_smartcache': '<is> False',
'huawei_smartcache:cachename': None,
'capabilities:huawei_smartpartition': '<is> False',
'huawei_smartpartition:partitionname': None,
'capabilities:thin_provisioning': '<is> False',
'test:test:test': 'test',
}
fake_share_type_id = 'fooid-4'
self.fake_type_thin_extra = {
'test_with_extra': {
'created_at': 'fake_time',
'deleted': '0',
'deleted_at': None,
'extra_specs': fake_extra_specs,
'required_extra_specs': {},
'id': fake_share_type_id,
'name': 'test_with_extra',
'updated_at': None
}
}
self.share_nfs_host_not_exist = {
'id': 'fake_uuid',
'project_id': 'fake_tenant_id',
'display_name': 'fake',
'name': 'share-fake-uuid',
'size': 1,
'share_proto': 'NFS',
'share_network_id': 'fake_net_id',
'share_server_id': 'fake-share-srv-id',
'host': 'fake_host@fake_backend#',
}
self.share_nfs_storagepool_fail = {
'id': 'fake_uuid',
'project_id': 'fake_tenant_id',
'display_name': 'fake',
'name': 'share-fake-uuid',
'size': 1,
'share_proto': 'NFS',
'share_network_id': 'fake_net_id',
'share_server_id': 'fake-share-srv-id',
'host': 'fake_host@fake_backend#OpenStack_Pool2',
}
fake_extra_specs = {
'driver_handles_share_servers': 'False',
}
fake_share_type_id = 'fake_id'
self.fake_type_extra = {
'test_with_extra': {
'created_at': 'fake_time',
'deleted': '0',
'deleted_at': None,
'extra_specs': fake_extra_specs,
'required_extra_specs': {},
'id': fake_share_type_id,
'name': 'test_with_extra',
'updated_at': None
}
}
self.active_replica = {
'id': 'fake_active_replica_id',
'share_id': 'fake_share_id',
'name': 'share_fake_uuid',
'host': 'hostname1@backend_name1#OpenStack_Pool',
'size': 5,
'share_proto': 'NFS',
'replica_state': common_constants.REPLICA_STATE_ACTIVE,
}
self.new_replica = {
'id': 'fake_new_replica_id',
'share_id': 'fake_share_id',
'name': 'share_fake_new_uuid',
'host': 'hostname2@backend_name2#OpenStack_Pool',
'size': 5,
'share_proto': 'NFS',
'replica_state': common_constants.REPLICA_STATE_OUT_OF_SYNC,
'share_type_id': 'fake_id',
}
def _get_share_by_proto(self, share_proto):
if share_proto == "NFS":
share = self.share_nfs
elif share_proto == "CIFS":
share = self.share_cifs
else:
share = None
return share
def mock_share_type(self, share_type):
self.mock_object(db,
'share_type_get',
mock.Mock(return_value=share_type))
def test_no_configuration(self):
self.mock_object(huawei_nas.HuaweiNasDriver,
'driver_handles_share_servers',
True)
self.assertRaises(exception.InvalidInput,
huawei_nas.HuaweiNasDriver)
def test_conf_product_fail(self):
self.recreate_fake_conf_file(product_flag=False)
self.driver.plugin.configuration.manila_huawei_conf_file = (
self.fake_conf_file)
self.assertRaises(exception.InvalidInput,
self.driver.plugin.check_conf_file)
def test_conf_pool_node_fail(self):
self.recreate_fake_conf_file(pool_node_flag=False)
self.driver.plugin.configuration.manila_huawei_conf_file = (
self.fake_conf_file)
self.assertRaises(exception.InvalidInput,
self.driver.plugin.check_conf_file)
def test_conf_username_fail(self):
self.recreate_fake_conf_file(username_flag=False)
self.driver.plugin.configuration.manila_huawei_conf_file = (
self.fake_conf_file)
self.assertRaises(exception.InvalidInput,
self.driver.plugin.check_conf_file)
def test_conf_timeout_fail(self):
self.recreate_fake_conf_file(timeout_flag=False)
self.driver.plugin.configuration.manila_huawei_conf_file = (
self.fake_conf_file)
timeout = self.driver.plugin._get_timeout()
self.assertEqual(60, timeout)
def test_conf_wait_interval_fail(self):
self.recreate_fake_conf_file(wait_interval_flag=False)
self.driver.plugin.configuration.manila_huawei_conf_file = (
self.fake_conf_file)
wait_interval = self.driver.plugin._get_wait_interval()
self.assertEqual(3, wait_interval)
def test_conf_logical_ip_fail(self):
self.configuration.driver_handles_share_servers = True
self.recreate_fake_conf_file(logical_port="fake_port")
self.driver.plugin.configuration.manila_huawei_conf_file = (
self.fake_conf_file)
self.configuration.driver_handles_share_servers = False
self.assertRaises(exception.InvalidInput,
self.driver.plugin.check_conf_file)
def test_conf_snapshot_replication_conflict(self):
self.recreate_fake_conf_file(snapshot_support=True,
replication_support=True)
self.driver.plugin.configuration.manila_huawei_conf_file = (
self.fake_conf_file)
self.driver.plugin._setup_conf()
self.assertRaises(exception.BadConfigurationException,
self.driver.plugin.check_conf_file)
def test_get_backend_driver_fail(self):
test_fake_conf_file = None
self.driver.plugin.configuration.manila_huawei_conf_file = (
test_fake_conf_file)
self.assertRaises(exception.InvalidInput,
self.driver.get_backend_driver)
def test_get_backend_driver_fail_driver_none(self):
self.recreate_fake_conf_file(product_flag=False)
self.driver.plugin.configuration.manila_huawei_conf_file = (
self.fake_conf_file)
self.assertRaises(exception.InvalidInput,
self.driver.get_backend_driver)
def test_create_share_storagepool_not_exist(self):
self.driver.plugin.helper.login()
self.assertRaises(exception.InvalidHost,
self.driver.create_share,
self._context,
self.share_nfs_host_not_exist,
self.share_server)
def test_create_share_nfs_storagepool_fail(self):
self.driver.plugin.helper.login()
self.assertRaises(exception.InvalidHost,
self.driver.create_share,
self._context,
self.share_nfs_storagepool_fail,
self.share_server)
def test_create_share_nfs_no_data_fail(self):
self.driver.plugin.helper.create_share_data_flag = True
self.driver.plugin.helper.login()
self.assertRaises(exception.InvalidShare,
self.driver.create_share,
self._context,
self.share_nfs,
self.share_server)
def test_read_xml_fail(self):
test_fake_conf_file = None
self.driver.plugin.configuration.manila_huawei_conf_file = (
test_fake_conf_file)
self.assertRaises(exception.InvalidInput,
self.driver.plugin.helper._read_xml)
def test_connect_success(self):
FakeRpcServer.start = mock.Mock()
rpc.get_server = mock.Mock(return_value=FakeRpcServer())
self.driver.plugin.connect()
FakeRpcServer.start.assert_called_once()
def test_connect_fail(self):
self.driver.plugin.helper.test_multi_url_flag = 1
self.assertRaises(exception.InvalidShare,
self.driver.plugin.connect)
def test_login_success(self):
deviceid = self.driver.plugin.helper.login()
self.assertEqual("210235G7J20000000000", deviceid)
def test_check_for_setup_success(self):
self.driver.plugin.helper.login()
self.driver.check_for_setup_error()
def test_check_for_setup_service_down(self):
self.driver.plugin.helper.service_status_flag = False
self.driver.plugin.helper.login()
self.driver.check_for_setup_error()
def test_check_for_setup_nfs_down(self):
self.driver.plugin.helper.service_nfs_status_flag = False
self.driver.plugin.helper.login()
self.driver.check_for_setup_error()
def test_check_for_setup_service_false(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.test_normal = False
self.assertRaises(exception.InvalidShare,
self.driver.check_for_setup_error)
def test_create_share_no_extra(self):
share_type = self.fake_type_not_extra['test_with_extra']
self.mock_object(db,
'share_type_get',
mock.Mock(return_value=share_type))
location = self.driver.create_share(self._context, self.share_nfs,
self.share_server)
self.assertEqual("100.115.10.68:/share_fake_uuid", location)
self.assertEqual(constants.ALLOC_TYPE_THIN_FLAG,
self.driver.plugin.helper.alloc_type)
def test_create_share_with_extra_thin(self):
share_type = {
'extra_specs': {
'capabilities:thin_provisioning': '<is> True'
},
}
self.mock_object(db,
'share_type_get',
mock.Mock(return_value=share_type))
self.driver.plugin.helper.login()
location = self.driver.create_share(self._context, self.share_nfs,
self.share_server)
self.assertEqual("100.115.10.68:/share_fake_uuid", location)
self.assertEqual(constants.ALLOC_TYPE_THIN_FLAG,
self.driver.plugin.helper.alloc_type)
def test_create_share_with_extra_thick(self):
share_type = {
'extra_specs': {
'capabilities:thin_provisioning': '<is> False'
},
}
self.mock_object(db,
'share_type_get',
mock.Mock(return_value=share_type))
self.driver.plugin.helper.login()
location = self.driver.create_share(self._context, self.share_nfs,
self.share_server)
self.assertEqual("100.115.10.68:/share_fake_uuid", location)
self.assertEqual(constants.ALLOC_TYPE_THICK_FLAG,
self.driver.plugin.helper.alloc_type)
@ddt.data(*constants.VALID_SECTOR_SIZES)
def test_create_share_with_sectorsize_in_type(self, sectorsize):
share_type = {
'extra_specs': {
'capabilities:huawei_sectorsize': "<is> true",
'huawei_sectorsize:sectorsize': sectorsize,
},
}
self.mock_share_type(share_type)
self.driver.plugin.helper.login()
location = self.driver.create_share(self._context, self.share_nfs,
self.share_server)
self.assertEqual("100.115.10.68:/share_fake_uuid", location)
self.assertTrue(db.share_type_get.called)
@ddt.data('128', 'xx', 'None', ' ')
def test_create_share_with_illegal_sectorsize_in_type(self, sectorsize):
share_type = {
'extra_specs': {
'capabilities:huawei_sectorsize': "<is> true",
'huawei_sectorsize:sectorsize': sectorsize,
},
}
self.mock_share_type(share_type)
self.driver.plugin.helper.login()
self.assertRaises(exception.InvalidShare,
self.driver.create_share,
self._context,
self.share_nfs,
self.share_server)
@ddt.data({'extra_specs': {'capabilities:huawei_sectorsize': "<is> false",
'huawei_sectorsize:sectorsize': '0'}, 'xmlvalue': '4'},
{'extra_specs': {'capabilities:huawei_sectorsize': "<is> False",
'huawei_sectorsize:sectorsize': '128'}, 'xmlvalue': '8'},
{'extra_specs': {'capabilities:huawei_sectorsize': "false",
'huawei_sectorsize:sectorsize': 'a'}, 'xmlvalue': '16'},
{'extra_specs': {'capabilities:huawei_sectorsize': "False",
'huawei_sectorsize:sectorsize': 'xx'}, 'xmlvalue': '32'},
{'extra_specs': {'capabilities:huawei_sectorsize': "true",
'huawei_sectorsize:sectorsize': 'None'}, 'xmlvalue': '64'},
{'extra_specs': {'capabilities:huawei_sectorsize': "True",
'huawei_sectorsize:sectorsize': ' '}, 'xmlvalue': ' '},
{'extra_specs': {'capabilities:huawei_sectorsize': "True",
'huawei_sectorsize:sectorsize': ''}, 'xmlvalue': ''})
@ddt.unpack
def test_create_share_with_invalid_type_valid_xml(self, extra_specs,
xmlvalue):
fake_share_type = {}
fake_share_type['extra_specs'] = extra_specs
self.mock_share_type(fake_share_type)
self.recreate_fake_conf_file(sectorsize_value=xmlvalue)
self.driver.plugin.configuration.manila_huawei_conf_file = (
self.fake_conf_file)
self.driver.plugin.helper.login()
location = self.driver.create_share(self._context, self.share_nfs,
self.share_server)
self.assertEqual("100.115.10.68:/share_fake_uuid", location)
self.assertTrue(db.share_type_get.called)
@ddt.data({'extra_specs': {'capabilities:huawei_sectorsize': "<is> false",
'huawei_sectorsize:sectorsize': '4'}, 'xmlvalue': '0'},
{'extra_specs': {'capabilities:huawei_sectorsize': "<is> False",
'huawei_sectorsize:sectorsize': '8'}, 'xmlvalue': '128'},
{'extra_specs': {'capabilities:huawei_sectorsize': "false",
'huawei_sectorsize:sectorsize': '16'}, 'xmlvalue': 'a'},
{'extra_specs': {'capabilities:huawei_sectorsize': "False",
'huawei_sectorsize:sectorsize': '32'}, 'xmlvalue': 'xx'},
{'extra_specs': {'capabilities:huawei_sectorsize': "true",
'huawei_sectorsize:sectorsize': '64'}, 'xmlvalue': 'None'})
@ddt.unpack
def test_create_share_with_invalid_type_illegal_xml(self, extra_specs,
xmlvalue):
fake_share_type = {}
fake_share_type['extra_specs'] = extra_specs
self.mock_share_type(fake_share_type)
self.recreate_fake_conf_file(sectorsize_value=xmlvalue)
self.driver.plugin.configuration.manila_huawei_conf_file = (
self.fake_conf_file)
self.driver.plugin.helper.login()
self.assertRaises(exception.InvalidShare,
self.driver.create_share,
self._context,
self.share_nfs,
self.share_server)
def test_shrink_share_success(self):
self.driver.plugin.helper.shrink_share_flag = False
self.driver.plugin.helper.login()
self.driver.shrink_share(self.share_nfs, 1,
self.share_server)
self.assertTrue(self.driver.plugin.helper.shrink_share_flag)
def test_shrink_share_fail(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.test_normal = False
self.assertRaises(exception.InvalidShare,
self.driver.shrink_share,
self.share_nfs,
1,
self.share_server)
def test_shrink_share_size_fail(self):
self.driver.plugin.helper.login()
self.assertRaises(exception.InvalidShare,
self.driver.shrink_share,
self.share_nfs,
5,
self.share_server)
def test_shrink_share_alloctype_fail(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.fs_status_flag = False
self.assertRaises(exception.InvalidShare,
self.driver.shrink_share,
self.share_nfs,
1,
self.share_server)
def test_shrink_share_not_exist(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.share_exist = False
self.assertRaises(exception.InvalidShare,
self.driver.shrink_share,
self.share_nfs,
1,
self.share_server)
def test_extend_share_success(self):
self.driver.plugin.helper.extend_share_flag = False
self.driver.plugin.helper.login()
self.driver.extend_share(self.share_nfs, 5,
self.share_server)
self.assertTrue(self.driver.plugin.helper.extend_share_flag)
def test_extend_share_fail(self):
self.driver.plugin.helper.login()
self.assertRaises(exception.InvalidInput,
self.driver.extend_share,
self.share_nfs,
3,
self.share_server)
def test_extend_share_not_exist(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.share_exist = False
self.assertRaises(exception.InvalidShareAccess,
self.driver.extend_share,
self.share_nfs,
4,
self.share_server)
def test_create_share_nfs_success(self):
share_type = self.fake_type_not_extra['test_with_extra']
self.mock_object(db,
'share_type_get',
mock.Mock(return_value=share_type))
self.driver.plugin.helper.login()
location = self.driver.create_share(self._context, self.share_nfs,
self.share_server)
self.assertEqual("100.115.10.68:/share_fake_uuid", location)
def test_create_share_cifs_success(self):
share_type = self.fake_type_not_extra['test_with_extra']
self.mock_object(db,
'share_type_get',
mock.Mock(return_value=share_type))
self.driver.plugin.helper.login()
location = self.driver.create_share(self._context, self.share_cifs,
self.share_server)
self.assertEqual("\\\\100.115.10.68\\share_fake_uuid", location)
def test_create_share_with_extra(self):
self.driver.plugin.helper.add_fs_to_partition_flag = False
self.driver.plugin.helper.add_fs_to_cache_flag = False
share_type = self.fake_type_w_extra['test_with_extra']
self.mock_object(db,
'share_type_get',
mock.Mock(return_value=share_type))
location = self.driver.create_share(self._context, self.share_nfs,
self.share_server)
self.assertEqual("100.115.10.68:/share_fake_uuid", location)
self.assertTrue(self.driver.plugin.helper.add_fs_to_partition_flag)
self.assertTrue(self.driver.plugin.helper.add_fs_to_cache_flag)
@ddt.data({'capabilities:dedupe': '<is> True',
'capabilities:thin_provisioning': '<is> False'},
{'capabilities:dedupe': '<is> True',
'capabilities:compression': '<is> True',
'capabilities:thin_provisioning': '<is> False'},
{'capabilities:huawei_smartcache': '<is> True',
'huawei_smartcache:cachename': None},
{'capabilities:huawei_smartpartition': '<is> True',
'huawei_smartpartition:partitionname': None},
{'capabilities:huawei_smartcache': '<is> True'},
{'capabilities:huawei_smartpartition': '<is> True'})
def test_create_share_with_extra_error(self, fake_extra_specs):
fake_share_type_id = 'fooid-2'
fake_type_error_extra = {
'test_with_extra': {
'created_at': 'fake_time',
'deleted': '0',
'deleted_at': None,
'extra_specs': fake_extra_specs,
'required_extra_specs': {},
'id': fake_share_type_id,
'name': 'test_with_extra',
'updated_at': None
}
}
share_type = fake_type_error_extra['test_with_extra']
self.mock_object(db,
'share_type_get',
mock.Mock(return_value=share_type))
self.driver.plugin.helper.login()
self.assertRaises(exception.InvalidShare,
self.driver.create_share,
self._context,
self.share_nfs_thick,
self.share_server)
@ddt.data({"fake_extra_specs_qos": {"qos:maxIOPS": "100",
"qos:maxBandWidth": "50",
"qos:IOType": "0"},
"fake_qos_info": {"MAXIOPS": "100",
"MAXBANDWIDTH": "50",
"IOTYPE": "0",
"LATENCY": "0",
"NAME": "OpenStack_fake_qos"}},
{"fake_extra_specs_qos": {"qos:maxIOPS": "100",
"qos:IOType": "1"},
"fake_qos_info": {"NAME": "fake_qos",
"MAXIOPS": "100",
"IOTYPE": "1",
"LATENCY": "0"}},
{"fake_extra_specs_qos": {"qos:minIOPS": "100",
"qos:minBandWidth": "50",
'qos:latency': "50",
"qos:IOType": "0"},
"fake_qos_info": {"MINIOPS": "100",
"MINBANDWIDTH": "50",
"IOTYPE": "0",
"LATENCY": "50",
"NAME": "OpenStack_fake_qos"}})
@ddt.unpack
def test_create_share_with_qos(self, fake_extra_specs_qos, fake_qos_info):
fake_share_type_id = 'fooid-2'
fake_extra_specs = {"capabilities:qos": "<is> True"}
fake_extra_specs.update(fake_extra_specs_qos)
fake_type_error_extra = {
'test_with_extra': {
'created_at': 'fake_time',
'deleted': '0',
'deleted_at': None,
'extra_specs': fake_extra_specs,
'required_extra_specs': {},
'id': fake_share_type_id,
'name': 'test_with_extra',
'updated_at': None
}
}
fake_qos_info_respons = {
"error": {
"code": 0
},
"data": [{
"ID": "11",
"FSLIST": u'["1", "2", "3", "4"]',
"LUNLIST": '[""]',
"RUNNINGSTATUS": "2",
}]
}
fake_qos_info_respons["data"][0].update(fake_qos_info)
share_type = fake_type_error_extra['test_with_extra']
self.mock_object(db,
'share_type_get',
mock.Mock(return_value=share_type))
self.mock_object(helper.RestHelper,
'get_qos',
mock.Mock(return_value=fake_qos_info_respons))
self.driver.plugin.configuration.manila_huawei_conf_file = (
self.fake_conf_file)
self.driver.plugin.helper.login()
location = self.driver.create_share(self._context, self.share_nfs,
self.share_server)
self.assertEqual("100.115.10.68:/share_fake_uuid", location)
@ddt.data({'capabilities:qos': '<is> True',
'qos:maxIOPS': -1},
{'capabilities:qos': '<is> True',
'qos:IOTYPE': 4},
{'capabilities:qos': '<is> True',
'qos:IOTYPE': 100},
{'capabilities:qos': '<is> True',
'qos:maxIOPS': 0},
{'capabilities:qos': '<is> True',
'qos:minIOPS': 0},
{'capabilities:qos': '<is> True',
'qos:minBandWidth': 0},
{'capabilities:qos': '<is> True',
'qos:maxBandWidth': 0},
{'capabilities:qos': '<is> True',
'qos:latency': 0},
{'capabilities:qos': '<is> True',
'qos:maxIOPS': 100},
{'capabilities:qos': '<is> True',
'qos:maxIOPS': 100,
'qos:minBandWidth': 100,
'qos:IOType': '0'})
def test_create_share_with_invalid_qos(self, fake_extra_specs):
fake_share_type_id = 'fooid-2'
fake_type_error_extra = {
'test_with_extra': {
'created_at': 'fake_time',
'deleted': '0',
'deleted_at': None,
'extra_specs': fake_extra_specs,
'required_extra_specs': {},
'id': fake_share_type_id,
'name': 'test_with_extra',
'updated_at': None
}
}
share_type = fake_type_error_extra['test_with_extra']
self.mock_object(db,
'share_type_get',
mock.Mock(return_value=share_type))
self.driver.plugin.configuration.manila_huawei_conf_file = (
self.fake_conf_file)
self.driver.plugin.helper.login()
self.assertRaises(exception.InvalidShare,
self.driver.create_share,
self._context,
self.share_nfs,
self.share_server)
def test_create_share_cache_not_exist(self):
self.driver.plugin.helper.cache_exist = False
share_type = self.fake_type_w_extra['test_with_extra']
self.mock_object(db,
'share_type_get',
mock.Mock(return_value=share_type))
self.driver.plugin.helper.login()
self.assertRaises(exception.InvalidShare,
self.driver.create_share,
self._context,
self.share_nfs,
self.share_server)
def test_add_share_to_cache_fail(self):
opts = dict(
huawei_smartcache='true',
cachename=None,
)
fsid = 4
smartcache = smartx.SmartCache(self.driver.plugin.helper)
self.assertRaises(exception.InvalidInput, smartcache.add,
opts, fsid)
def test_create_share_partition_not_exist(self):
self.driver.plugin.helper.partition_exist = False
share_type = self.fake_type_w_extra['test_with_extra']
self.mock_object(db,
'share_type_get',
mock.Mock(return_value=share_type))
self.driver.plugin.helper.login()
self.assertRaises(exception.InvalidShare,
self.driver.create_share,
self._context,
self.share_nfs,
self.share_server)
def test_add_share_to_partition_fail(self):
opts = dict(
huawei_smartpartition='true',
partitionname=None,
)
fsid = 4
smartpartition = smartx.SmartPartition(self.driver.plugin.helper)
self.assertRaises(exception.InvalidInput, smartpartition.add,
opts, fsid)
def test_login_fail(self):
self.driver.plugin.helper.test_normal = False
self.assertRaises(exception.InvalidShare,
self.driver.plugin.helper.login)
def test_create_share_nfs_fs_fail(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.test_normal = False
self.assertRaises(exception.InvalidShare,
self.driver.create_share,
self._context,
self.share_nfs,
self.share_server)
def test_create_share_nfs_status_fail(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.fs_status_flag = False
self.assertRaises(exception.InvalidShare,
self.driver.create_share,
self._context,
self.share_nfs,
self.share_server)
def test_create_share_cifs_fs_fail(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.test_normal = False
self.assertRaises(exception.InvalidShare,
self.driver.create_share,
self._context,
self.share_cifs,
self.share_server)
def test_create_share_cifs_fail(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.create_share_flag = True
self.assertRaises(exception.InvalidShare,
self.driver.create_share,
self._context,
self.share_cifs,
self.share_server)
def test_create_share_nfs_fail(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.create_share_flag = True
self.assertRaises(exception.InvalidShare,
self.driver.create_share,
self._context,
self.share_nfs,
self.share_server)
@ddt.data({"share_proto": "NFS",
"fake_qos_info_respons": {"ID": "11", "MAXIOPS": "100",
"IOType": "2",
"FSLIST": u'["0", "1", "4"]'}},
{"share_proto": "CIFS",
"fake_qos_info_respons": {"ID": "11", "MAXIOPS": "100",
"IOType": "2", "FSLIST": u'["4"]',
"RUNNINGSTATUS": "2"}})
@ddt.unpack
def test_delete_share_success(self, share_proto, fake_qos_info_respons):
self.driver.plugin.helper.login()
self.driver.plugin.helper.delete_flag = False
if share_proto == 'NFS':
share = self.share_nfs
else:
share = self.share_cifs
with mock.patch.object(helper.RestHelper, 'get_qos_info',
return_value=fake_qos_info_respons):
self.driver.delete_share(self._context,
share, self.share_server)
self.assertTrue(self.driver.plugin.helper.delete_flag)
def test_delete_share_withoutqos_success(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.delete_flag = False
self.driver.plugin.qos_support = True
self.driver.delete_share(self._context,
self.share_nfs, self.share_server)
self.assertTrue(self.driver.plugin.helper.delete_flag)
def test_check_snapshot_id_exist_fail(self):
snapshot_id = "4@share_snapshot_not_exist"
self.driver.plugin.helper.login()
self.driver.plugin.helper.test_normal = False
snapshot_info = self.driver.plugin.helper._get_snapshot_by_id(
snapshot_id)
self.assertRaises(exception.InvalidShareSnapshot,
self.driver.plugin.helper._check_snapshot_id_exist,
snapshot_info)
def test_delete_share_nfs_fail_not_exist(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.delete_flag = False
self.driver.plugin.helper.share_exist = False
self.driver.delete_share(self._context,
self.share_nfs, self.share_server)
self.assertTrue(self.driver.plugin.helper.delete_flag)
def test_delete_share_cifs_success(self):
self.driver.plugin.helper.delete_flag = False
fake_qos_info_respons = {
"ID": "11",
"FSLIST": u'["1", "2", "3", "4"]',
"LUNLIST": '[""]',
"RUNNINGSTATUS": "2",
}
self.mock_object(helper.RestHelper,
'get_qos_info',
mock.Mock(return_value=fake_qos_info_respons))
self.driver.plugin.helper.login()
self.driver.delete_share(self._context, self.share_cifs,
self.share_server)
self.assertTrue(self.driver.plugin.helper.delete_flag)
def test_get_network_allocations_number_dhss_true(self):
self.configuration.driver_handles_share_servers = True
number = self.driver.get_network_allocations_number()
self.assertEqual(1, number)
def test_get_network_allocations_number_dhss_false(self):
self.configuration.driver_handles_share_servers = False
number = self.driver.get_network_allocations_number()
self.assertEqual(0, number)
def test_create_nfsshare_from_nfssnapshot_success(self):
share_type = self.fake_type_not_extra['test_with_extra']
self.mock_object(db,
'share_type_get',
mock.Mock(return_value=share_type))
self.mock_object(self.driver.plugin,
'mount_share_to_host',
mock.Mock(return_value={}))
self.mock_object(self.driver.plugin,
'copy_snapshot_data',
mock.Mock(return_value=True))
self.mock_object(self.driver.plugin,
'umount_share_from_host',
mock.Mock(return_value={}))
self.driver.plugin.helper.login()
self.driver.plugin.helper.snapshot_flag = True
location = self.driver.create_share_from_snapshot(self._context,
self.share_nfs,
self.nfs_snapshot,
self.share_server)
self.assertTrue(db.share_type_get.called)
self.assertEqual(2, self.driver.plugin.
mount_share_to_host.call_count)
self.assertTrue(self.driver.plugin.
copy_snapshot_data.called)
self.assertEqual(2, self.driver.plugin.
umount_share_from_host.call_count)
self.assertEqual("100.115.10.68:/share_fake_uuid", location)
def test_create_cifsshare_from_cifssnapshot_success(self):
share_type = self.fake_type_not_extra['test_with_extra']
self.mock_object(db,
'share_type_get',
mock.Mock(return_value=share_type))
self.mock_object(self.driver.plugin,
'mount_share_to_host',
mock.Mock(return_value={}))
self.mock_object(self.driver.plugin,
'copy_snapshot_data',
mock.Mock(return_value=True))
self.mock_object(self.driver.plugin,
'umount_share_from_host',
mock.Mock(return_value={}))
self.driver.plugin.helper.login()
self.driver.plugin.helper.snapshot_flag = True
location = self.driver.create_share_from_snapshot(self._context,
self.share_cifs,
self.cifs_snapshot,
self.share_server)
self.assertTrue(db.share_type_get.called)
self.assertEqual(2, self.driver.plugin.
mount_share_to_host.call_count)
self.assertTrue(self.driver.plugin.
copy_snapshot_data.called)
self.assertEqual(2, self.driver.plugin.
umount_share_from_host.call_count)
self.assertEqual("\\\\100.115.10.68\\share_fake_uuid", location)
def test_create_nfsshare_from_cifssnapshot_success(self):
share_type = self.fake_type_not_extra['test_with_extra']
self.mock_object(db,
'share_type_get',
mock.Mock(return_value=share_type))
self.mock_object(self.driver.plugin,
'_get_access_id',
mock.Mock(return_value={}))
self.mock_object(self.driver.plugin,
'mount_share_to_host',
mock.Mock(return_value={}))
self.mock_object(self.driver.plugin,
'copy_snapshot_data',
mock.Mock(return_value=True))
self.mock_object(self.driver.plugin,
'umount_share_from_host',
mock.Mock(return_value={}))
self.driver.plugin.helper.login()
self.driver.plugin.helper.access_id = None
self.driver.plugin.helper.snapshot_flag = True
location = self.driver.create_share_from_snapshot(self._context,
self.share_nfs,
self.cifs_snapshot,
self.share_server)
self.assertTrue(db.share_type_get.called)
self.assertTrue(self.driver.plugin.
_get_access_id.called)
self.assertEqual(2, self.driver.plugin.
mount_share_to_host.call_count)
self.assertTrue(self.driver.plugin.
copy_snapshot_data.called)
self.assertEqual(2, self.driver.plugin.
umount_share_from_host.call_count)
self.assertEqual("100.115.10.68:/share_fake_uuid", location)
def test_create_cifsshare_from_nfssnapshot_success(self):
share_type = self.fake_type_not_extra['test_with_extra']
self.mock_object(db,
'share_type_get',
mock.Mock(return_value=share_type))
self.mock_object(self.driver.plugin,
'_get_access_id',
mock.Mock(return_value={}))
self.mock_object(utils,
'execute',
mock.Mock(return_value=("", "")))
self.driver.plugin.helper.login()
self.driver.plugin.helper.snapshot_flag = True
location = self.driver.create_share_from_snapshot(self._context,
self.share_cifs,
self.nfs_snapshot,
self.share_server)
self.assertTrue(db.share_type_get.called)
self.assertTrue(self.driver.plugin.
_get_access_id.called)
self.assertEqual(7, utils.execute.call_count)
self.assertEqual("\\\\100.115.10.68\\share_fake_uuid", location)
def test_create_share_from_snapshot_nonefs(self):
self.driver.plugin.helper.login()
self.mock_object(self.driver.plugin.helper,
'get_fsid_by_name',
mock.Mock(return_value={}))
self.assertRaises(exception.StorageResourceNotFound,
self.driver.create_share_from_snapshot,
self._context, self.share_nfs,
self.nfs_snapshot, self.share_server)
self.assertTrue(self.driver.plugin.helper.
get_fsid_by_name.called)
def test_create_share_from_notexistingsnapshot_fail(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.snapshot_flag = False
self.assertRaises(exception.ShareSnapshotNotFound,
self.driver.create_share_from_snapshot,
self._context, self.share_nfs,
self.nfs_snapshot, self.share_server)
def test_create_share_from_share_fail(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.snapshot_flag = True
self.mock_object(self.driver.plugin,
'check_fs_status',
mock.Mock(return_value={}))
self.assertRaises(exception.StorageResourceException,
self.driver.create_share_from_snapshot,
self._context, self.share_nfs,
self.nfs_snapshot, self.share_server)
self.assertTrue(self.driver.plugin.check_fs_status.called)
def test_create_share_from_snapshot_share_error(self):
self.mock_object(self.driver.plugin,
'_get_share_proto',
mock.Mock(return_value={}))
self.driver.plugin.helper.login()
self.driver.plugin.helper.snapshot_flag = True
self.assertRaises(exception.ShareResourceNotFound,
self.driver.create_share_from_snapshot,
self._context, self.share_nfs,
self.nfs_snapshot, self.share_server)
self.assertTrue(self.driver.plugin.
_get_share_proto.called)
def test_create_share_from_snapshot_allow_oldaccess_fail(self):
share_type = self.fake_type_not_extra['test_with_extra']
self.mock_object(db,
'share_type_get',
mock.Mock(return_value=share_type))
self.mock_object(self.driver.plugin,
'_get_share_proto',
mock.Mock(return_value='NFS'))
self.mock_object(self.driver.plugin,
'_get_access_id',
mock.Mock(return_value={}))
self.mock_object(self.driver.plugin.helper,
'_get_share_by_name',
mock.Mock(return_value={}))
self.driver.plugin.helper.login()
self.driver.plugin.helper.snapshot_flag = True
self.assertRaises(exception.ShareResourceNotFound,
self.driver.create_share_from_snapshot,
self._context, self.share_nfs,
self.nfs_snapshot, self.share_server)
self.assertTrue(db.share_type_get.called)
self.assertTrue(self.driver.plugin._get_share_proto.called)
self.assertTrue(self.driver.plugin._get_access_id.called)
self.assertTrue(self.driver.plugin.helper._get_share_by_name.called)
def test_create_share_from_snapshot_mountshare_fail(self):
share_type = self.fake_type_not_extra['test_with_extra']
self.mock_object(db,
'share_type_get',
mock.Mock(return_value=share_type))
self.mock_object(self.driver.plugin,
'mount_share_to_host',
mock.Mock(side_effect=exception.
ShareMountException('err')))
self.driver.plugin.helper.login()
self.driver.plugin.helper.snapshot_flag = True
self.assertRaises(exception.ShareMountException,
self.driver.create_share_from_snapshot,
self._context, self.share_nfs,
self.nfs_snapshot, self.share_server)
self.assertTrue(db.share_type_get.called)
self.assertEqual(1, self.driver.plugin.
mount_share_to_host.call_count)
def test_create_share_from_snapshot_allow_newaccess_fail(self):
share_type = self.fake_type_not_extra['test_with_extra']
self.mock_object(db,
'share_type_get',
mock.Mock(return_value=share_type))
self.mock_object(self.driver.plugin,
'_get_share_proto',
mock.Mock(return_value='NFS'))
self.mock_object(self.driver.plugin,
'_get_access_id',
mock.Mock(return_value='5'))
self.mock_object(self.driver.plugin,
'mount_share_to_host',
mock.Mock(return_value={}))
self.mock_object(self.driver.plugin.helper,
'_get_share_by_name',
mock.Mock(return_value={}))
self.mock_object(self.driver.plugin,
'umount_share_from_host',
mock.Mock(return_value={}))
self.driver.plugin.helper.login()
self.driver.plugin.helper.snapshot_flag = True
self.assertRaises(exception.ShareResourceNotFound,
self.driver.create_share_from_snapshot,
self._context, self.share_nfs,
self.nfs_snapshot, self.share_server)
self.assertTrue(db.share_type_get.called)
self.assertTrue(self.driver.plugin._get_share_proto.called)
self.assertTrue(self.driver.plugin._get_access_id.called)
self.assertEqual(1, self.driver.plugin.
mount_share_to_host.call_count)
self.assertTrue(self.driver.plugin.helper.
_get_share_by_name.called)
self.assertEqual(1, self.driver.plugin.
umount_share_from_host.call_count)
def test_create_nfsshare_from_nfssnapshot_copydata_fail(self):
share_type = self.fake_type_not_extra['test_with_extra']
self.mock_object(db,
'share_type_get',
mock.Mock(return_value=share_type))
self.mock_object(self.driver.plugin,
'mount_share_to_host',
mock.Mock(return_value={}))
self.mock_object(data_utils,
'Copy',
mock.Mock(side_effect=Exception('err')))
self.mock_object(utils,
'execute',
mock.Mock(return_value={}))
self.driver.plugin.helper.login()
self.driver.plugin.helper.snapshot_flag = True
self.assertRaises(exception.ShareCopyDataException,
self.driver.create_share_from_snapshot,
self._context, self.share_nfs,
self.nfs_snapshot, self.share_server)
self.assertTrue(db.share_type_get.called)
self.assertEqual(2, self.driver.plugin.
mount_share_to_host.call_count)
self.assertTrue(data_utils.Copy.called)
self.assertEqual(2, utils.execute.call_count)
def test_create_nfsshare_from_nfssnapshot_umountshare_fail(self):
share_type = self.fake_type_not_extra['test_with_extra']
self.mock_object(db,
'share_type_get',
mock.Mock(return_value=share_type))
self.mock_object(self.driver.plugin,
'mount_share_to_host',
mock.Mock(return_value={}))
self.mock_object(self.driver.plugin,
'copy_snapshot_data',
mock.Mock(return_value=True))
self.mock_object(self.driver.plugin,
'umount_share_from_host',
mock.Mock(side_effect=exception.
ShareUmountException('err')))
self.mock_object(os, 'rmdir',
mock.Mock(side_effect=Exception('err')))
self.driver.plugin.helper.login()
self.driver.plugin.helper.snapshot_flag = True
location = self.driver.create_share_from_snapshot(self._context,
self.share_nfs,
self.cifs_snapshot,
self.share_server)
self.assertTrue(db.share_type_get.called)
self.assertEqual(2, self.driver.plugin.
mount_share_to_host.call_count)
self.assertTrue(self.driver.plugin.copy_snapshot_data.called)
self.assertEqual(2, self.driver.plugin.
umount_share_from_host.call_count)
self.assertTrue(os.rmdir.called)
self.assertEqual("100.115.10.68:/share_fake_uuid", location)
def test_get_share_stats_refresh_pool_not_exist(self):
self.recreate_fake_conf_file(pool_node_flag=False)
self.driver.plugin.configuration.manila_huawei_conf_file = (
self.fake_conf_file)
self.assertRaises(exception.InvalidInput,
self.driver._update_share_stats)
@ddt.data({"snapshot_support": True,
"replication_support": False},
{"snapshot_support": False,
"replication_support": True})
@ddt.unpack
def test_get_share_stats_refresh(self, snapshot_support,
replication_support):
self.recreate_fake_conf_file(snapshot_support=snapshot_support,
replication_support=replication_support)
self.driver.plugin.configuration.manila_huawei_conf_file = (
self.fake_conf_file)
self.driver.plugin._setup_conf()
self.driver._update_share_stats()
expected = {
"share_backend_name": "fake_share_backend_name",
"driver_handles_share_servers": False,
"vendor_name": "Huawei",
"driver_version": "1.3",
"storage_protocol": "NFS_CIFS",
"reserved_percentage": 0,
'reserved_snapshot_percentage': 0,
"total_capacity_gb": 0.0,
"free_capacity_gb": 0.0,
"qos": True,
"snapshot_support": snapshot_support,
"create_share_from_snapshot_support": snapshot_support,
"revert_to_snapshot_support": snapshot_support,
"mount_snapshot_support": False,
"replication_domain": None,
"filter_function": None,
"goodness_function": None,
"pools": [],
"share_group_stats": {"consistent_snapshot_support": None},
"ipv4_support": True,
"ipv6_support": False,
"security_service_update_support": False,
}
if replication_support:
expected['replication_type'] = 'dr'
pool = dict(
pool_name='OpenStack_Pool',
total_capacity_gb=2.0,
free_capacity_gb=1.0,
allocated_capacity_gb=1.0,
qos=True,
reserved_percentage=0,
reserved_snapshot_percentage=0,
compression=[True, False],
dedupe=[True, False],
max_over_subscription_ratio=1,
provisioned_capacity_gb=1.0,
thin_provisioning=[True, False],
huawei_smartcache=[True, False],
huawei_smartpartition=[True, False],
huawei_sectorsize=[True, False],
huawei_disk_type='ssd',
)
expected["pools"].append(pool)
self.assertEqual(expected, self.driver._stats)
@ddt.data({'TIER0CAPACITY': '100',
'TIER1CAPACITY': '0',
'TIER2CAPACITY': '0',
'disktype': 'ssd'},
{'TIER0CAPACITY': '0',
'TIER1CAPACITY': '100',
'TIER2CAPACITY': '0',
'disktype': 'sas'},
{'TIER0CAPACITY': '0',
'TIER1CAPACITY': '0',
'TIER2CAPACITY': '100',
'disktype': 'nl_sas'},
{'TIER0CAPACITY': '100',
'TIER1CAPACITY': '100',
'TIER2CAPACITY': '100',
'disktype': 'mix'},
{'TIER0CAPACITY': '0',
'TIER1CAPACITY': '0',
'TIER2CAPACITY': '0',
'disktype': ''})
def test_get_share_stats_disk_type(self, disk_type_value):
self.driver.plugin.helper.login()
storage_pool_info = {"error": {"code": 0},
"data": [{"USERFREECAPACITY": "2097152",
"ID": "1",
"NAME": "OpenStack_Pool",
"USERTOTALCAPACITY": "4194304",
"USAGETYPE": "2",
"USERCONSUMEDCAPACITY": "2097152"}]}
storage_pool_info['data'][0]['TIER0CAPACITY'] = (
disk_type_value['TIER0CAPACITY'])
storage_pool_info['data'][0]['TIER1CAPACITY'] = (
disk_type_value['TIER1CAPACITY'])
storage_pool_info['data'][0]['TIER2CAPACITY'] = (
disk_type_value['TIER2CAPACITY'])
self.mock_object(self.driver.plugin.helper, '_find_all_pool_info',
mock.Mock(return_value=storage_pool_info))
self.driver._update_share_stats()
if disk_type_value['disktype']:
self.assertEqual(
disk_type_value['disktype'],
self.driver._stats['pools'][0]['huawei_disk_type'])
else:
self.assertIsNone(
self.driver._stats['pools'][0].get('huawei_disk_type'))
def test_get_disk_type_pool_info_none(self):
self.driver.plugin.helper.login()
self.mock_object(self.driver.plugin.helper, '_find_pool_info',
mock.Mock(return_value=None))
self.assertRaises(exception.InvalidInput,
self.driver._update_share_stats)
def test_allow_access_proto_fail(self):
self.driver.plugin.helper.login()
self.assertRaises(exception.InvalidInput,
self.driver.allow_access,
self._context,
self.share_proto_fail,
self.access_ip,
self.share_server)
def test_allow_access_ip_rw_success(self):
self.driver.plugin.helper.login()
self.allow_flag = False
self.allow_rw_flag = False
self.driver.allow_access(self._context,
self.share_nfs,
self.access_ip,
self.share_server)
self.assertTrue(self.driver.plugin.helper.allow_flag)
self.assertTrue(self.driver.plugin.helper.allow_rw_flag)
def test_allow_access_ip_ro_success(self):
access_ro = {
'access_type': 'ip',
'access_to': '1.2.3.4',
'access_level': 'ro',
}
self.driver.plugin.helper.login()
self.allow_flag = False
self.allow_ro_flag = False
self.driver.allow_access(self._context,
self.share_nfs,
access_ro,
self.share_server)
self.assertTrue(self.driver.plugin.helper.allow_flag)
self.assertTrue(self.driver.plugin.helper.allow_ro_flag)
def test_allow_access_nfs_user_success(self):
self.driver.plugin.helper.login()
self.allow_flag = False
self.allow_rw_flag = False
self.driver.allow_access(self._context,
self.share_nfs,
self.access_user,
self.share_server)
self.assertTrue(self.driver.plugin.helper.allow_flag)
self.assertTrue(self.driver.plugin.helper.allow_rw_flag)
@ddt.data(
{
'access_type': 'user',
'access_to': 'user_name',
'access_level': 'rw',
},
{
'access_type': 'user',
'access_to': 'group_name',
'access_level': 'rw',
},
{
'access_type': 'user',
'access_to': 'domain\\user_name',
'access_level': 'rw',
},
{
'access_type': 'user',
'access_to': 'domain\\group_name',
'access_level': 'rw',
},
)
def test_allow_access_cifs_rw_success(self, access_user):
self.driver.plugin.helper.login()
self.allow_flag = False
self.allow_rw_flag = False
self.driver.allow_access(self._context, self.share_cifs,
access_user, self.share_server)
self.assertTrue(self.driver.plugin.helper.allow_flag)
self.assertTrue(self.driver.plugin.helper.allow_rw_flag)
def test_allow_access_cifs_user_ro_success(self):
access_ro = {
'access_type': 'user',
'access_to': 'user_name',
'access_level': 'ro',
}
self.driver.plugin.helper.login()
self.allow_flag = False
self.allow_ro_flag = False
self.driver.allow_access(self._context, self.share_cifs,
access_ro, self.share_server)
self.assertTrue(self.driver.plugin.helper.allow_flag)
self.assertTrue(self.driver.plugin.helper.allow_ro_flag)
def test_allow_access_level_fail(self):
access_fail = {
'access_type': 'user',
'access_to': 'user_name',
'access_level': 'fail',
}
self.driver.plugin.helper.login()
self.assertRaises(exception.InvalidShareAccess,
self.driver.allow_access,
self._context, self.share_cifs,
access_fail, self.share_server)
def test_update_access_add_delete(self):
self.driver.plugin.helper.login()
self.allow_flag = False
self.allow_rw_flag = False
self.deny_flag = False
add_rules = [self.access_ip]
delete_rules = [self.access_ip_exist]
self.driver.update_access(self._context,
self.share_nfs,
None,
add_rules,
delete_rules,
self.share_server)
self.assertTrue(self.driver.plugin.helper.allow_flag)
self.assertTrue(self.driver.plugin.helper.allow_rw_flag)
self.assertTrue(self.driver.plugin.helper.deny_flag)
def test_update_access_nfs(self):
self.driver.plugin.helper.login()
self.allow_flag = False
self.allow_rw_flag = False
rules = [self.access_ip, self.access_ip_exist]
self.driver.update_access(self._context,
self.share_nfs,
rules,
None,
None,
self.share_server)
self.assertTrue(self.driver.plugin.helper.allow_flag)
self.assertTrue(self.driver.plugin.helper.allow_rw_flag)
def test_update_access_cifs(self):
self.driver.plugin.helper.login()
self.allow_flag = False
self.allow_rw_flag = False
rules = [self.access_user, self.access_user_exist]
self.driver.update_access(self._context,
self.share_cifs,
rules,
None,
None,
self.share_server)
self.assertTrue(self.driver.plugin.helper.allow_flag)
self.assertTrue(self.driver.plugin.helper.allow_rw_flag)
def test_update_access_rules_share_not_exist(self):
self.driver.plugin.helper.login()
rules = [self.access_ip]
self.driver.plugin.helper.share_exist = False
self.assertRaises(exception.ShareResourceNotFound,
self.driver.update_access, self._context,
self.share_nfs, rules, None, None, self.share_server)
@ddt.data(True, False)
def test_nfs_access_for_all_ip_addresses(self, is_allow):
access_all = {
'access_type': 'ip',
'access_to': '0.0.0.0/0',
'access_level': 'rw',
}
self.driver.plugin.helper.login()
method = (self.driver.allow_access if is_allow
else self.driver.deny_access)
with mock.patch.object(self.driver.plugin.helper,
'_get_access_from_share') as mock_call:
mock_call.return_value = None
method(self._context, self.share_nfs,
access_all, self.share_server)
mock_call.assert_called_with('1', '*', 'NFS')
def test_get_share_client_type_fail(self):
share_proto = 'fake_proto'
self.assertRaises(exception.InvalidInput,
self.driver.plugin.helper._get_share_client_type,
share_proto)
@ddt.data("NFS", "CIFS")
def test_get_share_url_type(self, share_proto):
share_url_type = self.driver.plugin.helper._get_share_url_type(
share_proto)
self.assertEqual(share_proto + 'HARE', share_url_type)
def test_get_location_path_fail(self):
share_name = 'share-fake-uuid'
share_proto = 'fake_proto'
self.assertRaises(exception.InvalidShareAccess,
self.driver.plugin._get_location_path, share_name,
share_proto)
def test_allow_access_nfs_fail(self):
self.driver.plugin.helper.login()
self.assertRaises(exception.InvalidShareAccess,
self.driver.allow_access, self._context,
self.share_nfs, self.access_cert, self.share_server)
def test_allow_access_cifs_fail(self):
self.driver.plugin.helper.login()
self.assertRaises(exception.InvalidShareAccess,
self.driver.allow_access, self._context,
self.share_cifs, self.access_ip, self.share_server)
def test_deny_access_nfs_fail(self):
self.driver.plugin.helper.login()
result = self.driver.deny_access(self._context, self.share_nfs,
self.access_cert, self.share_server)
self.assertIsNone(result)
def test_deny_access_not_exist_fail(self):
self.driver.plugin.helper.login()
access_ip_not_exist = {
'access_type': 'ip',
'access_to': '100.112.0.99',
'access_level': 'rw',
}
result = self.driver.deny_access(self._context, self.share_nfs,
access_ip_not_exist,
self.share_server)
self.assertIsNone(result)
def test_deny_access_cifs_fail(self):
self.driver.plugin.helper.login()
result = self.driver.deny_access(self._context, self.share_cifs,
self.access_ip, self.share_server)
self.assertIsNone(result)
def test_allow_access_ip_share_not_exist(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.share_exist = False
self.assertRaises(exception.ShareResourceNotFound,
self.driver.allow_access, self._context,
self.share_nfs, self.access_ip, self.share_server)
def test_deny_access_ip_share_not_exist(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.share_exist = False
self.driver.deny_access(self._context, self.share_nfs,
self.access_ip, self.share_server)
def test_allow_access_ip_fail(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.test_normal = False
self.assertRaises(exception.InvalidShare,
self.driver.allow_access, self._context,
self.share_nfs, self.access_ip, self.share_server)
def test_allow_access_user_fail(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.test_normal = False
self.assertRaises(exception.InvalidShare,
self.driver.allow_access, self._context,
self.share_cifs, self.access_user, self.share_server)
def test_deny_access_ip_success(self):
self.driver.plugin.helper.login()
self.deny_flag = False
self.driver.deny_access(self._context, self.share_nfs,
self.access_ip_exist, self.share_server)
self.assertTrue(self.driver.plugin.helper.deny_flag)
def test_deny_access_user_success(self):
self.driver.plugin.helper.login()
self.deny_flag = False
self.driver.deny_access(self._context, self.share_cifs,
self.access_user_exist, self.share_server)
self.assertTrue(self.driver.plugin.helper.deny_flag)
def test_deny_access_ip_fail(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.test_normal = False
self.assertRaises(exception.InvalidShare,
self.driver.deny_access, self._context,
self.share_nfs, self.access_ip, self.share_server)
def test_deny_access_user_fail(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.test_normal = False
self.assertRaises(exception.InvalidShare,
self.driver.deny_access, self._context,
self.share_cifs, self.access_user, self.share_server)
def test_create_nfs_snapshot_success(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.create_snapflag = False
self.driver.create_snapshot(self._context, self.nfs_snapshot,
self.share_server)
self.assertTrue(self.driver.plugin.helper.create_snapflag)
def test_create_nfs_snapshot_share_not_exist(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.share_exist = False
self.assertRaises(exception.InvalidInput,
self.driver.create_snapshot, self._context,
self.nfs_snapshot, self.share_server)
def test_create_cifs_snapshot_success(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.create_snapflag = False
self.driver.create_snapshot(self._context, self.cifs_snapshot,
self.share_server)
self.assertTrue(self.driver.plugin.helper.create_snapflag)
def test_delete_snapshot_success(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.delete_flag = False
self.driver.plugin.helper.snapshot_flag = True
self.driver.delete_snapshot(self._context, self.nfs_snapshot,
self.share_server)
self.assertTrue(self.driver.plugin.helper.delete_flag)
def test_delete_snapshot_not_exist_success(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.delete_flag = False
self.driver.plugin.helper.snapshot_flag = False
self.driver.delete_snapshot(self._context, self.nfs_snapshot,
self.share_server)
self.assertTrue(self.driver.plugin.helper.delete_flag)
def test_create_nfs_snapshot_fail(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.test_normal = False
self.assertRaises(exception.InvalidShare,
self.driver.create_snapshot, self._context,
self.nfs_snapshot, self.share_server)
def test_create_cifs_snapshot_fail(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.test_normal = False
self.assertRaises(exception.InvalidShare,
self.driver.create_snapshot, self._context,
self.cifs_snapshot, self.share_server)
def test_delete_nfs_snapshot_fail(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.test_normal = False
self.assertRaises(exception.InvalidShare,
self.driver.delete_snapshot, self._context,
self.nfs_snapshot, self.share_server)
def test_delete_cifs_snapshot_fail(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.test_normal = False
self.assertRaises(exception.InvalidShare,
self.driver.delete_snapshot, self._context,
self.cifs_snapshot, self.share_server)
@ddt.data({"share_proto": "NFS",
"path": ["100.115.10.68:/share_fake_manage_uuid"]},
{"share_proto": "CIFS",
"path": ["\\\\100.115.10.68\\share_fake_manage_uuid"]})
@ddt.unpack
def test_manage_share_nfs_success(self, share_proto, path):
if share_proto == "NFS":
share = self.share_manage_nfs
elif share_proto == "CIFS":
share = self.share_manage_cifs
share_type = self.fake_type_w_extra['test_with_extra']
self.mock_object(db, 'share_type_get',
mock.Mock(return_value=share_type))
self.driver.plugin.helper.login()
share_info = self.driver.manage_existing(share,
self.driver_options)
self.assertEqual(4, share_info["size"])
self.assertEqual(path, share_info["export_locations"])
@ddt.data({"fs_alloctype": "THIN",
"path": ["100.115.10.68:/share_fake_manage_uuid"]},
{"fs_alloctype": "THICK",
"path": ["100.115.10.68:/share_fake_uuid_thickfs"]})
@ddt.unpack
def test_manage_share_with_default_type(self, fs_alloctype, path):
if fs_alloctype == "THIN":
share = self.share_manage_nfs
elif fs_alloctype == "THICK":
share = self.share_nfs_thick_thickfs
share_type = self.fake_type_not_extra['test_with_extra']
self.mock_object(db, 'share_type_get',
mock.Mock(return_value=share_type))
self.driver.plugin.helper.login()
share_info = self.driver.manage_existing(share,
self.driver_options)
self.assertEqual(4, share_info["size"])
self.assertEqual(path, share_info["export_locations"])
@ddt.data({"path": ["100.115.10.68:/share_fake_uuid_inpartition"]})
@ddt.unpack
def test_manage_share_remove_from_partition(self, path):
share = self.share_nfs_inpartition
share_type = self.fake_type_fake_extra['test_with_extra']
self.mock_object(db,
'share_type_get',
mock.Mock(return_value=share_type))
self.driver.plugin.helper.login()
share_info = self.driver.manage_existing(share,
self.driver_options)
self.assertEqual(4, share_info["size"])
self.assertEqual(path,
share_info["export_locations"])
@ddt.data({"flag": "share_not_exist", "exc": exception.InvalidShare},
{"flag": "fs_status_error", "exc": exception.InvalidShare},
{"flag": "poolname_not_match", "exc": exception.InvalidHost})
@ddt.unpack
def test_manage_share_fail(self, flag, exc):
share = None
if flag == "share_not_exist":
self.driver.plugin.helper.share_exist = False
share = self.share_nfs
elif flag == "fs_status_error":
self.driver.plugin.helper.fs_status_flag = False
share = self.share_nfs
elif flag == "poolname_not_match":
share = self.share_pool_name_not_match
self.driver.plugin.helper.login()
share_type = self.fake_type_extra['test_with_extra']
self.mock_object(db,
'share_type_get',
mock.Mock(return_value=share_type))
self.assertRaises(exc,
self.driver.manage_existing,
share,
self.driver_options)
def test_manage_share_thickfs_set_dedupe_fail(self):
share = self.share_nfs_thick_thickfs
self.driver.plugin.helper.login()
share_type = self.fake_type_thin_extra['test_with_extra']
self.mock_object(db,
'share_type_get',
mock.Mock(return_value=share_type))
self.assertRaises(exception.InvalidInput,
self.driver.manage_existing,
share,
self.driver_options)
def test_manage_share_thickfs_not_match_thinpool_fail(self):
share = self.share_nfs_thickfs
self.driver.plugin.helper.login()
share_type = self.fake_type_extra['test_with_extra']
self.mock_object(db,
'share_type_get',
mock.Mock(return_value=share_type))
self.assertRaises(exception.InvalidHost,
self.driver.manage_existing,
share,
self.driver_options)
@ddt.data({"flag": "old_cache_id", "exc": exception.InvalidInput},
{"flag": "not_old_cache_id", "exc": exception.InvalidInput})
@ddt.unpack
def test_manage_share_cache_not_exist(self, flag, exc):
share = None
if flag == "old_cache_id":
share = self.share_nfs_inpartition
elif flag == "not_old_cache_id":
share = self.share_nfs
self.driver.plugin.helper.cache_exist = False
share_type = self.fake_type_w_extra['test_with_extra']
self.mock_object(db,
'share_type_get',
mock.Mock(return_value=share_type))
self.driver.plugin.helper.login()
self.assertRaises(exc,
self.driver.manage_existing,
share,
self.share_server)
def test_manage_add_share_to_cache_fail(self):
opts = dict(
huawei_smartcache='true',
huawei_smartpartition='true',
cachename='test_cache_name_fake',
partitionname='test_partition_name_fake',
)
fs = dict(
SMARTCACHEID='6',
SMARTPARTITIONID=None,
)
poolinfo = dict(
type='Thin',
)
self.assertRaises(exception.InvalidInput,
self.driver.plugin.check_retype_change_opts,
opts, poolinfo, fs)
def test_manage_notsetcache_fail(self):
opts = dict(
huawei_smartcache='true',
huawei_smartpartition='true',
cachename=None,
partitionname='test_partition_name_fake',
)
fs = dict(
SMARTCACHEID='6',
SMARTPARTITIONID='6',
)
poolinfo = dict(
type='Thin',
)
self.assertRaises(exception.InvalidInput,
self.driver.plugin.check_retype_change_opts,
opts, poolinfo, fs)
@ddt.data({"flag": "old_partition_id", "exc": exception.InvalidInput},
{"flag": "not_old_partition_id", "exc": exception.InvalidInput})
@ddt.unpack
def test_manage_share_partition_not_exist(self, flag, exc):
share = None
if flag == "old_partition_id":
share = self.share_nfs_inpartition
elif flag == "not_old_partition_id":
share = self.share_nfs
self.driver.plugin.helper.partition_exist = False
share_type = self.fake_type_w_extra['test_with_extra']
self.mock_object(db,
'share_type_get',
mock.Mock(return_value=share_type))
self.driver.plugin.helper.login()
self.assertRaises(exc,
self.driver.manage_existing,
share,
self.share_server)
def test_manage_add_share_to_partition_fail(self):
opts = dict(
huawei_smartcache='true',
huawei_smartpartition='true',
cachename='test_cache_name_fake',
partitionname='test_partition_name_fake',
)
fs = dict(
SMARTCACHEID=None,
SMARTPARTITIONID='6',
)
poolinfo = dict(
type='Thin',
)
self.assertRaises(exception.InvalidInput,
self.driver.plugin.check_retype_change_opts,
opts, poolinfo, fs)
def test_manage_notset_partition_fail(self):
opts = dict(
huawei_smartcache='true',
huawei_smartpartition='true',
cachename='test_cache_name_fake',
partitionname=None,
)
fs = dict(
SMARTCACHEID=None,
SMARTPARTITIONID='6',
)
poolinfo = dict(
type='Thin',
)
self.assertRaises(exception.InvalidInput,
self.driver.plugin.check_retype_change_opts,
opts, poolinfo, fs)
@ddt.data({"share_proto": "NFS",
"export_path": "fake_ip:/share_fake_uuid"},
{"share_proto": "NFS", "export_path": "fake_ip:/"},
{"share_proto": "NFS",
"export_path": "100.112.0.1://share_fake_uuid"},
{"share_proto": "NFS", "export_path": None},
{"share_proto": "NFS", "export_path": "\\share_fake_uuid"},
{"share_proto": "CIFS",
"export_path": "\\\\fake_ip\\share_fake_uuid"},
{"share_proto": "CIFS",
"export_path": "\\dd\\100.115.10.68\\share_fake_uuid"})
@ddt.unpack
def test_manage_export_path_fail(self, share_proto, export_path):
share_manage_nfs_export_path_fail = {
'id': 'fake_uuid',
'project_id': 'fake_tenant_id',
'display_name': 'fake',
'name': 'share-fake-manage-uuid',
'size': 1,
'share_proto': share_proto,
'share_network_id': 'fake_net_id',
'share_server_id': 'fake-share-srv-id',
'export_locations': [
{'path': export_path},
],
'host': 'fake_host@fake_backend#OpenStack_Pool',
'share_type_id': 'fake_id'
}
share_type = self.fake_type_extra['test_with_extra']
self.mock_object(db,
'share_type_get',
mock.Mock(return_value=share_type))
self.driver.plugin.helper.login()
self.assertRaises(exception.InvalidInput,
self.driver.manage_existing,
share_manage_nfs_export_path_fail,
self.driver_options)
def test_manage_logical_port_ip_fail(self):
self.recreate_fake_conf_file(logical_port="")
self.driver.plugin.configuration.manila_huawei_conf_file = (
self.fake_conf_file)
self.driver.plugin.helper.login()
share_type = self.fake_type_extra['test_with_extra']
self.mock_object(db,
'share_type_get',
mock.Mock(return_value=share_type))
self.assertRaises(exception.InvalidInput,
self.driver.manage_existing,
self.share_nfs,
self.driver_options)
@ddt.data({"share_proto": "NFS",
"provider_location": "share_snapshot_fake_snapshot_uuid"},
{"share_proto": "CIFS",
"provider_location": "share_snapshot_fake_snapshot_uuid"})
@ddt.unpack
def test_manage_existing_snapshot_success(self, share_proto,
provider_location):
if share_proto == "NFS":
snapshot = self.storage_nfs_snapshot
elif share_proto == "CIFS":
snapshot = self.storage_cifs_snapshot
self.driver.plugin.helper.login()
snapshot_info = self.driver.manage_existing_snapshot(
snapshot, self.driver_options)
self.assertEqual(provider_location, snapshot_info['provider_location'])
def test_manage_existing_snapshot_share_not_exist(self):
self.driver.plugin.helper.login()
self.mock_object(self.driver.plugin.helper,
'_get_share_by_name',
mock.Mock(return_value={}))
self.assertRaises(exception.InvalidShare,
self.driver.manage_existing_snapshot,
self.storage_nfs_snapshot,
self.driver_options)
def test_manage_existing_snapshot_sharesnapshot_not_exist(self):
self.driver.plugin.helper.login()
self.mock_object(self.driver.plugin.helper,
'_check_snapshot_id_exist',
mock.Mock(return_value={}))
self.assertRaises(exception.ManageInvalidShareSnapshot,
self.driver.manage_existing_snapshot,
self.storage_nfs_snapshot,
self.driver_options)
def test_manage_existing_snapshot_sharesnapshot_not_normal(self):
snapshot_info = {"error": {"code": 0},
"data": {"ID": "4@share_snapshot_fake_snapshot_uuid",
"NAME": "share_snapshot_fake_snapshot_uuid",
"HEALTHSTATUS": "2"}}
self.driver.plugin.helper.login()
self.mock_object(self.driver.plugin.helper,
'_get_snapshot_by_id',
mock.Mock(return_value=snapshot_info))
self.assertRaises(exception.ManageInvalidShareSnapshot,
self.driver.manage_existing_snapshot,
self.storage_nfs_snapshot,
self.driver_options)
def test_get_pool_success(self):
self.driver.plugin.helper.login()
pool_name = self.driver.get_pool(self.share_nfs_host_not_exist)
self.assertEqual('OpenStack_Pool', pool_name)
def test_get_pool_fail(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.share_exist = False
pool_name = self.driver.get_pool(self.share_nfs_host_not_exist)
self.assertIsNone(pool_name)
def test_multi_resturls_success(self):
share_type = self.fake_type_not_extra['test_with_extra']
self.mock_object(db,
'share_type_get',
mock.Mock(return_value=share_type))
self.recreate_fake_conf_file(multi_url=True)
self.driver.plugin.configuration.manila_huawei_conf_file = (
self.fake_conf_file)
self.driver.plugin.helper.test_multi_url_flag = 2
location = self.driver.create_share(self._context, self.share_nfs,
self.share_server)
self.assertEqual("100.115.10.68:/share_fake_uuid", location)
def test_multi_resturls_fail(self):
self.recreate_fake_conf_file(multi_url=True)
self.driver.plugin.configuration.manila_huawei_conf_file = (
self.fake_conf_file)
self.driver.plugin.helper.test_multi_url_flag = 1
self.assertRaises(exception.InvalidShare,
self.driver.create_share,
self._context,
self.share_nfs,
self.share_server)
@dec_driver_handles_share_servers
def test_setup_server_success(self):
backend_details = self.driver.setup_server(self.fake_network_info)
fake_share_server = {
'backend_details': backend_details
}
share_type = self.fake_type_not_extra['test_with_extra']
self.mock_object(db,
'share_type_get',
mock.Mock(return_value=share_type))
location = self.driver.create_share(self._context, self.share_nfs,
fake_share_server)
self.assertTrue(db.share_type_get.called)
self.assertEqual((self.fake_network_allocations[0]['ip_address']
+ ":/share_fake_uuid"), location)
@dec_driver_handles_share_servers
def test_setup_server_with_bond_port_success(self):
self.recreate_fake_conf_file(logical_port='fake_bond')
self.driver.plugin.configuration.manila_huawei_conf_file = (
self.fake_conf_file)
backend_details = self.driver.setup_server(self.fake_network_info)
fake_share_server = {
'backend_details': backend_details
}
share_type = self.fake_type_not_extra['test_with_extra']
self.mock_object(db,
'share_type_get',
mock.Mock(return_value=share_type))
location = self.driver.create_share(self._context, self.share_nfs,
fake_share_server)
self.assertTrue(db.share_type_get.called)
self.assertEqual((self.fake_network_allocations[0]['ip_address']
+ ":/share_fake_uuid"), location)
@dec_driver_handles_share_servers
def test_setup_server_logical_port_exist(self):
def call_logical_port_exist(*args, **kwargs):
url = args[0]
method = args[2]
if url == "/LIF" and method == "GET":
data = """{"error":{"code":0},"data":[{
"ID":"4",
"HOMEPORTID":"4",
"IPV4ADDR":"111.111.111.109",
"IPV4MASK":"255.255.255.0",
"OPERATIONALSTATUS":"false"}]}"""
elif url == "/LIF/4" and method == "PUT":
data = """{"error":{"code":0}}"""
else:
return self.driver.plugin.helper.do_call(*args, **kwargs)
res_json = jsonutils.loads(data)
return res_json
self.mock_object(self.driver.plugin.helper, "create_logical_port")
with mock.patch.object(self.driver.plugin.helper,
'call') as mock_call:
mock_call.side_effect = call_logical_port_exist
backend_details = self.driver.setup_server(self.fake_network_info)
self.assertEqual(backend_details['ip'],
self.fake_network_allocations[0]['ip_address'])
self.assertEqual(
0, self.driver.plugin.helper.create_logical_port.call_count)
@dec_driver_handles_share_servers
def test_setup_server_vlan_exist(self):
def call_vlan_exist(*args, **kwargs):
url = args[0]
method = args[2]
if url == "/vlan" and method == "GET":
data = """{"error":{"code":0},"data":[{
"ID":"4",
"NAME":"fake_vlan",
"PORTID":"4",
"TAG":"2"}]}"""
else:
return self.driver.plugin.helper.do_call(*args, **kwargs)
res_json = jsonutils.loads(data)
return res_json
self.mock_object(self.driver.plugin.helper, "create_vlan")
with mock.patch.object(self.driver.plugin.helper,
'call') as mock_call:
mock_call.side_effect = call_vlan_exist
backend_details = self.driver.setup_server(self.fake_network_info)
self.assertEqual(backend_details['ip'],
self.fake_network_allocations[0]['ip_address'])
self.assertEqual(
0, self.driver.plugin.helper.create_vlan.call_count)
def test_setup_server_invalid_ipv4(self):
netwot_info_invali_ipv4 = self.fake_network_info
netwot_info_invali_ipv4['network_allocations'][0]['ip_address'] = (
"::1/128")
self.assertRaises(exception.InvalidInput,
self.driver._setup_server,
netwot_info_invali_ipv4)
@dec_driver_handles_share_servers
def test_setup_server_network_type_error(self):
vxlan_netwotk_info = self.fake_network_info
vxlan_netwotk_info['network_type'] = 'vxlan'
self.assertRaises(exception.NetworkBadConfigurationException,
self.driver.setup_server,
vxlan_netwotk_info)
@dec_driver_handles_share_servers
def test_setup_server_port_conf_miss(self):
self.recreate_fake_conf_file(logical_port='')
self.driver.plugin.configuration.manila_huawei_conf_file = (
self.fake_conf_file)
backend_details = self.driver.setup_server(self.fake_network_info)
self.assertEqual(self.fake_network_allocations[0]['ip_address'],
backend_details['ip'])
@dec_driver_handles_share_servers
def test_setup_server_port_offline_error(self):
self.mock_object(self.driver.plugin,
'_get_online_port',
mock.Mock(return_value=(None, None)))
self.assertRaises(exception.InvalidInput,
self.driver.setup_server,
self.fake_network_info)
self.assertTrue(self.driver.plugin._get_online_port.called)
@dec_driver_handles_share_servers
def test_setup_server_port_not_exist(self):
self.mock_object(self.driver.plugin.helper,
'get_port_id',
mock.Mock(return_value=None))
self.assertRaises(exception.InvalidInput,
self.driver.setup_server,
self.fake_network_info)
self.assertTrue(self.driver.plugin.helper.get_port_id.called)
@dec_driver_handles_share_servers
def test_setup_server_port_type_not_exist(self):
self.mock_object(self.driver.plugin,
'_get_optimal_port',
mock.Mock(return_value=('CTE0.A.H2', '8')))
self.assertRaises(exception.InvalidInput,
self.driver.setup_server,
self.fake_network_info)
self.assertTrue(self.driver.plugin._get_optimal_port.called)
@dec_driver_handles_share_servers
def test_setup_server_choose_eth_port(self):
self.recreate_fake_conf_file(logical_port='CTE0.A.H0;fake_bond')
self.driver.plugin.configuration.manila_huawei_conf_file = (
self.fake_conf_file)
self.mock_object(self.driver.plugin.helper,
'get_all_vlan',
mock.Mock(return_value=[{'NAME': 'fake_bond.10'}]))
fake_network_info = self.fake_network_info
backend_details = self.driver.setup_server(fake_network_info)
self.assertTrue(self.driver.plugin.helper.get_all_vlan.called)
self.assertEqual(self.fake_network_allocations[0]['ip_address'],
backend_details['ip'])
@dec_driver_handles_share_servers
def test_setup_server_choose_bond_port(self):
self.recreate_fake_conf_file(logical_port='CTE0.A.H0;fake_bond')
self.driver.plugin.configuration.manila_huawei_conf_file = (
self.fake_conf_file)
self.mock_object(self.driver.plugin.helper,
'get_all_vlan',
mock.Mock(return_value=[{'NAME': 'CTE0.A.H0.10'}]))
fake_network_info = self.fake_network_info
backend_details = self.driver.setup_server(fake_network_info)
self.assertTrue(self.driver.plugin.helper.get_all_vlan.called)
self.assertEqual(self.fake_network_allocations[0]['ip_address'],
backend_details['ip'])
@dec_driver_handles_share_servers
def test_setup_server_choose_least_logic_port(self):
self.recreate_fake_conf_file(
logical_port='CTE0.A.H0;CTE0.A.H2;CTE0.B.H0;BOND0')
self.driver.plugin.configuration.manila_huawei_conf_file = (
self.fake_conf_file)
fake_network_info = {
'server_id': '0',
'segmentation_id': None,
'cidr': '111.111.111.0/24',
'network_allocations': self.fake_network_allocations,
'network_type': None,
}
self.mock_object(self.driver.plugin, '_get_online_port',
mock.Mock(return_value=(['CTE0.A.H0', 'CTE0.A.H2',
'CTE0.B.H0'], ['BOND0'])))
self.mock_object(self.driver.plugin.helper, 'get_all_logical_port',
mock.Mock(return_value=[
{'HOMEPORTTYPE': constants.PORT_TYPE_ETH,
'HOMEPORTNAME': 'CTE0.A.H0'},
{'HOMEPORTTYPE': constants.PORT_TYPE_VLAN,
'HOMEPORTNAME': 'CTE0.B.H0.10'},
{'HOMEPORTTYPE': constants.PORT_TYPE_BOND,
'HOMEPORTNAME': 'BOND0'}]))
self.mock_object(self.driver.plugin.helper,
'get_port_id',
mock.Mock(return_value=4))
backend_details = self.driver.setup_server(fake_network_info)
self.assertEqual(self.fake_network_allocations[0]['ip_address'],
backend_details['ip'])
self.driver.plugin._get_online_port.assert_called_once_with(
['CTE0.A.H0', 'CTE0.A.H2', 'CTE0.B.H0', 'BOND0'])
self.assertTrue(self.driver.plugin.helper.get_all_logical_port.called)
self.driver.plugin.helper.get_port_id.assert_called_once_with(
'CTE0.A.H2', constants.PORT_TYPE_ETH)
@dec_driver_handles_share_servers
def test_setup_server_create_vlan_fail(self):
def call_create_vlan_fail(*args, **kwargs):
url = args[0]
method = args[2]
if url == "/vlan" and method == "POST":
data = """{"error":{"code":1}}"""
res_json = jsonutils.loads(data)
return res_json
else:
return self.driver.plugin.helper.do_call(*args, **kwargs)
with mock.patch.object(self.driver.plugin.helper,
'call') as mock_call:
mock_call.side_effect = call_create_vlan_fail
self.assertRaises(exception.InvalidShare,
self.driver.setup_server,
self.fake_network_info)
@dec_driver_handles_share_servers
def test_setup_server_create_logical_port_fail(self):
def call_create_logical_port_fail(*args, **kwargs):
url = args[0]
method = args[2]
if url == "/LIF" and method == "POST":
data = """{"error":{"code":1}}"""
res_json = jsonutils.loads(data)
return res_json
else:
return self.driver.plugin.helper.do_call(*args, **kwargs)
fake_network_info = self.fake_network_info
fake_network_info['security_services'] = [
self.fake_active_directory, self.fake_ldap]
self.mock_object(self.driver.plugin.helper, "delete_vlan")
self.mock_object(self.driver.plugin.helper, "delete_AD_config")
self.mock_object(self.driver.plugin.helper, "delete_LDAP_config")
self.mock_object(self.driver.plugin.helper,
"get_AD_config",
mock.Mock(side_effect=[None,
{'DOMAINSTATUS': '1'},
{'DOMAINSTATUS': '0'}]))
self.mock_object(
self.driver.plugin.helper,
"get_LDAP_config",
mock.Mock(
side_effect=[None, {'BASEDN': 'dc=huawei,dc=com'}]))
with mock.patch.object(self.driver.plugin.helper,
'call') as mock_call:
mock_call.side_effect = call_create_logical_port_fail
self.assertRaises(exception.InvalidShare,
self.driver.setup_server,
fake_network_info)
self.assertTrue(self.driver.plugin.helper.get_AD_config.called)
self.assertTrue(self.driver.plugin.helper.get_LDAP_config.called)
self.assertEqual(
1, self.driver.plugin.helper.delete_vlan.call_count)
self.assertEqual(
1, self.driver.plugin.helper.delete_AD_config.call_count)
self.assertEqual(
1, self.driver.plugin.helper.delete_LDAP_config.call_count)
@dec_driver_handles_share_servers
def test_setup_server_with_ad_domain_success(self):
fake_network_info = self.fake_network_info
fake_network_info['security_services'] = [self.fake_active_directory]
self.mock_object(self.driver.plugin.helper,
"get_AD_config",
mock.Mock(
side_effect=[None,
{'DOMAINSTATUS': '0',
'FULLDOMAINNAME': 'huawei.com'},
{'DOMAINSTATUS': '1',
'FULLDOMAINNAME': 'huawei.com'}]))
backend_details = self.driver.setup_server(fake_network_info)
self.assertEqual(self.fake_network_allocations[0]['ip_address'],
backend_details['ip'])
self.assertTrue(self.driver.plugin.helper.get_AD_config.called)
@ddt.data(
"100.97.5.87",
"100.97.5.87,100.97.5.88",
"100.97.5.87,100.97.5.88,100.97.5.89"
)
@dec_driver_handles_share_servers
def test_setup_server_with_ldap_domain_success(self, server_ips):
fake_network_info = self.fake_network_info
fake_network_info['security_services'] = [self.fake_ldap]
fake_network_info['security_services'][0]['server'] = server_ips
self.mock_object(
self.driver.plugin.helper,
"get_LDAP_config",
mock.Mock(
side_effect=[None, {'BASEDN': 'dc=huawei,dc=com'}]))
backend_details = self.driver.setup_server(fake_network_info)
self.assertEqual(self.fake_network_allocations[0]['ip_address'],
backend_details['ip'])
self.assertTrue(self.driver.plugin.helper.get_LDAP_config.called)
@dec_driver_handles_share_servers
def test_setup_server_with_ldap_domain_fail(self):
server_ips = "100.97.5.87,100.97.5.88,100.97.5.89,100.97.5.86"
fake_network_info = self.fake_network_info
fake_network_info['security_services'] = [self.fake_ldap]
fake_network_info['security_services'][0]['server'] = server_ips
self.mock_object(
self.driver.plugin.helper,
"get_LDAP_config",
mock.Mock(
side_effect=[None, {'BASEDN': 'dc=huawei,dc=com'}]))
self.assertRaises(exception.InvalidInput,
self.driver.setup_server,
fake_network_info)
self.assertTrue(self.driver.plugin.helper.get_LDAP_config.called)
@ddt.data(
{'type': 'fake_unsupport'},
{'type': 'active_directory',
'dns_ip': '',
'user': '',
'password': '',
'domain': ''},
{'type': 'ldap',
'server': '',
'domain': ''},
)
@dec_driver_handles_share_servers
def test_setup_server_with_security_service_invalid(self, data):
fake_network_info = self.fake_network_info
fake_network_info['security_services'] = [data]
self.assertRaises(exception.InvalidInput,
self.driver.setup_server,
fake_network_info)
@dec_driver_handles_share_servers
def test_setup_server_with_security_service_number_invalid(self):
fake_network_info = self.fake_network_info
ss = [
{'type': 'fake_unsupport'},
{'type': 'active_directory',
'dns_ip': '',
'user': '',
'password': '',
'domain': ''},
{'type': 'ldap',
'server': '',
'domain': ''},
]
fake_network_info['security_services'] = ss
self.assertRaises(exception.InvalidInput,
self.driver.setup_server,
fake_network_info)
@dec_driver_handles_share_servers
def test_setup_server_dns_exist_error(self):
fake_network_info = self.fake_network_info
fake_network_info['security_services'] = [self.fake_active_directory]
self.mock_object(self.driver.plugin.helper,
"get_DNS_ip_address",
mock.Mock(return_value=['100.97.5.85']))
self.assertRaises(exception.InvalidInput,
self.driver.setup_server,
fake_network_info)
self.assertTrue(self.driver.plugin.helper.get_DNS_ip_address.called)
@dec_driver_handles_share_servers
def test_setup_server_ad_exist_error(self):
fake_network_info = self.fake_network_info
fake_network_info['security_services'] = [self.fake_active_directory]
self.mock_object(self.driver.plugin.helper,
"get_AD_config",
mock.Mock(
return_value={'DOMAINSTATUS': '1',
'FULLDOMAINNAME': 'huawei.com'}))
self.assertRaises(exception.InvalidInput,
self.driver.setup_server,
fake_network_info)
self.assertTrue(self.driver.plugin.helper.get_AD_config.called)
@dec_driver_handles_share_servers
def test_setup_server_ldap_exist_error(self):
fake_network_info = self.fake_network_info
fake_network_info['security_services'] = [self.fake_ldap]
self.mock_object(self.driver.plugin.helper,
"get_LDAP_config",
mock.Mock(
return_value={'LDAPSERVER': '100.97.5.87'}))
self.assertRaises(exception.InvalidInput,
self.driver.setup_server,
fake_network_info)
self.assertTrue(self.driver.plugin.helper.get_LDAP_config.called)
@dec_driver_handles_share_servers
def test_setup_server_with_dns_fail(self):
fake_network_info = self.fake_network_info
fake_active_directory = self.fake_active_directory
ip_list = "100.97.5.5,100.97.5.6,100.97.5.7,100.97.5.8"
fake_active_directory['dns_ip'] = ip_list
fake_network_info['security_services'] = [fake_active_directory]
self.mock_object(
self.driver.plugin.helper,
"get_AD_config",
mock.Mock(side_effect=[None, {'DOMAINSTATUS': '1'}]))
self.assertRaises(exception.InvalidInput,
self.driver.setup_server,
fake_network_info)
self.assertTrue(self.driver.plugin.helper.get_AD_config.called)
@dec_driver_handles_share_servers
def test_setup_server_with_ad_domain_fail(self):
fake_network_info = self.fake_network_info
fake_network_info['security_services'] = [self.fake_active_directory]
self.mock_object(self.driver.plugin,
'_get_wait_interval',
mock.Mock(return_value=1))
self.mock_object(self.driver.plugin,
'_get_timeout',
mock.Mock(return_value=1))
self.mock_object(
self.driver.plugin.helper,
"get_AD_config",
mock.Mock(side_effect=[None,
{'DOMAINSTATUS': '0',
'FULLDOMAINNAME': 'huawei.com'}]))
self.mock_object(self.driver.plugin.helper, "set_DNS_ip_address")
self.assertRaises(exception.InvalidShare,
self.driver.setup_server,
fake_network_info)
self.assertTrue(self.driver.plugin.helper.get_AD_config.called)
self.assertTrue(self.driver.plugin._get_wait_interval.called)
self.assertTrue(self.driver.plugin._get_timeout.called)
self.assertEqual(
2, self.driver.plugin.helper.set_DNS_ip_address.call_count)
def test_teardown_server_success(self):
server_details = {
"logical_port_id": "1",
"vlan_id": "2",
"ad_created": "1",
"ldap_created": "1",
}
security_services = [
self.fake_ldap,
self.fake_active_directory
]
self.logical_port_deleted = False
self.vlan_deleted = False
self.ad_deleted = False
self.ldap_deleted = False
self.dns_deleted = False
def fake_teardown_call(*args, **kwargs):
url = args[0]
method = args[2]
if url.startswith("/LIF"):
if method == "GET":
data = """{"error":{"code":0},"data":[{
"ID":"1"}]}"""
elif method == "DELETE":
data = """{"error":{"code":0}}"""
self.logical_port_deleted = True
elif url.startswith("/vlan"):
if method == "GET":
data = """{"error":{"code":0},"data":[{
"ID":"2"}]}"""
elif method == "DELETE":
data = """{"error":{"code":1073813505}}"""
self.vlan_deleted = True
elif url == "/AD_CONFIG":
if method == "PUT":
data = """{"error":{"code":0}}"""
self.ad_deleted = True
elif method == "GET":
if self.ad_deleted:
data = """{"error":{"code":0},"data":{
"DOMAINSTATUS":"0"}}"""
else:
data = """{"error":{"code":0},"data":{
"DOMAINSTATUS":"1",
"FULLDOMAINNAME":"huawei.com"}}"""
else:
data = """{"error":{"code":0}}"""
elif url == "/LDAP_CONFIG":
if method == "DELETE":
data = """{"error":{"code":0}}"""
self.ldap_deleted = True
elif method == "GET":
if self.ldap_deleted:
data = """{"error":{"code":0}}"""
else:
data = """{"error":{"code":0},"data":{
"LDAPSERVER":"100.97.5.87",
"BASEDN":"dc=huawei,dc=com"}}"""
else:
data = """{"error":{"code":0}}"""
elif url == "/DNS_Server":
if method == "GET":
data = "{\"error\":{\"code\":0},\"data\":{\
\"ADDRESS\":\"[\\\"100.97.5.5\\\",\\\"\\\"]\"}}"
elif method == "PUT":
data = """{"error":{"code":0}}"""
self.dns_deleted = True
else:
data = """{"error":{"code":0}}"""
else:
return self.driver.plugin.helper.do_call(*args, **kwargs)
res_json = jsonutils.loads(data)
return res_json
with mock.patch.object(self.driver.plugin.helper,
'call') as mock_call:
mock_call.side_effect = fake_teardown_call
self.driver._teardown_server(server_details, security_services)
self.assertTrue(self.logical_port_deleted)
self.assertTrue(self.vlan_deleted)
self.assertTrue(self.ad_deleted)
self.assertTrue(self.ldap_deleted)
self.assertTrue(self.dns_deleted)
def test_teardown_server_with_already_deleted(self):
server_details = {
"logical_port_id": "1",
"vlan_id": "2",
"ad_created": "1",
"ldap_created": "1",
}
security_services = [
self.fake_ldap,
self.fake_active_directory
]
self.mock_object(self.driver.plugin.helper,
"check_logical_port_exists_by_id",
mock.Mock(return_value=False))
self.mock_object(self.driver.plugin.helper,
"check_vlan_exists_by_id",
mock.Mock(return_value=False))
self.mock_object(self.driver.plugin.helper,
"get_DNS_ip_address",
mock.Mock(return_value=None))
self.mock_object(self.driver.plugin.helper,
"get_AD_domain_name",
mock.Mock(return_value=(False, None)))
self.mock_object(self.driver.plugin.helper,
"get_LDAP_domain_server",
mock.Mock(return_value=(False, None)))
self.driver._teardown_server(server_details, security_services)
self.assertEqual(1, (self.driver.plugin.helper.
check_logical_port_exists_by_id.call_count))
self.assertEqual(1, (self.driver.plugin.helper.
check_vlan_exists_by_id.call_count))
self.assertEqual(1, (self.driver.plugin.helper.
get_DNS_ip_address.call_count))
self.assertEqual(1, (self.driver.plugin.helper.
get_AD_domain_name.call_count))
self.assertEqual(1, (self.driver.plugin.helper.
get_LDAP_domain_server.call_count))
def test_teardown_server_with_vlan_logical_port_deleted(self):
server_details = {
"logical_port_id": "1",
"vlan_id": "2",
}
self.mock_object(self.driver.plugin.helper,
'get_all_logical_port',
mock.Mock(return_value=[{'ID': '4'}]))
self.mock_object(self.driver.plugin.helper,
'get_all_vlan',
mock.Mock(return_value=[{'ID': '4'}]))
self.driver._teardown_server(server_details, None)
self.assertEqual(1, (self.driver.plugin.helper.
get_all_logical_port.call_count))
self.assertEqual(1, (self.driver.plugin.helper.
get_all_vlan.call_count))
def test_teardown_server_with_empty_detail(self):
server_details = {}
with mock.patch.object(connection.LOG, 'debug') as mock_debug:
self.driver._teardown_server(server_details, None)
mock_debug.assert_called_with('Server details are empty.')
@ddt.data({"share_proto": "NFS",
"path": ["100.115.10.68:/share_fake_uuid"]},
{"share_proto": "CIFS",
"path": ["\\\\100.115.10.68\\share_fake_uuid"]})
@ddt.unpack
def test_ensure_share_sucess(self, share_proto, path):
share = self._get_share_by_proto(share_proto)
self.driver.plugin.helper.login()
location = self.driver.ensure_share(self._context,
share,
self.share_server)
self.assertEqual(path, location)
@ddt.data({"share_proto": "NFS",
"path": ["111.111.111.109:/share_fake_uuid"]},
{"share_proto": "CIFS",
"path": ["\\\\111.111.111.109\\share_fake_uuid"]})
@ddt.unpack
@dec_driver_handles_share_servers
def test_ensure_share_with_share_server_sucess(self, share_proto, path):
share = self._get_share_by_proto(share_proto)
backend_details = self.driver.setup_server(self.fake_network_info)
fake_share_server = {'backend_details': backend_details}
location = self.driver.ensure_share(self._context,
share,
fake_share_server)
self.assertEqual(path, location)
@ddt.data({"share_proto": "NFS"},
{"share_proto": "CIFS"})
@ddt.unpack
def test_ensure_share_get_share_fail(self, share_proto):
share = self._get_share_by_proto(share_proto)
self.mock_object(self.driver.plugin.helper,
'_get_share_by_name',
mock.Mock(return_value={}))
self.driver.plugin.helper.login()
self.assertRaises(exception.ShareResourceNotFound,
self.driver.ensure_share,
self._context,
share,
self.share_server)
def test_ensure_share_get_filesystem_status_fail(self):
self.driver.plugin.helper.fs_status_flag = False
share = self.share_nfs_thickfs
self.driver.plugin.helper.login()
self.assertRaises(exception.StorageResourceException,
self.driver.ensure_share,
self._context,
share,
self.share_server)
def _add_conf_file_element(self, doc, parent_element, name, value=None):
new_element = doc.createElement(name)
if value:
new_text = doc.createTextNode(value)
new_element.appendChild(new_text)
parent_element.appendChild(new_element)
def create_fake_conf_file(self, fake_conf_file,
product_flag=True, username_flag=True,
pool_node_flag=True, timeout_flag=True,
wait_interval_flag=True,
sectorsize_value='4',
multi_url=False,
logical_port='100.115.10.68',
snapshot_support=True,
replication_support=False):
doc = xml.dom.minidom.Document()
config = doc.createElement('Config')
doc.appendChild(config)
storage = doc.createElement('Storage')
config.appendChild(storage)
if self.configuration.driver_handles_share_servers:
port0 = doc.createElement('Port')
port0_text = doc.createTextNode(logical_port)
port0.appendChild(port0_text)
storage.appendChild(port0)
else:
controllerip0 = doc.createElement('LogicalPortIP')
controllerip0_text = doc.createTextNode(logical_port)
controllerip0.appendChild(controllerip0_text)
storage.appendChild(controllerip0)
if product_flag:
product_text = doc.createTextNode('V3')
else:
product_text = doc.createTextNode('V3_fail')
product = doc.createElement('Product')
product.appendChild(product_text)
storage.appendChild(product)
if username_flag:
username_text = doc.createTextNode('admin')
else:
username_text = doc.createTextNode('')
username = doc.createElement('UserName')
username.appendChild(username_text)
storage.appendChild(username)
userpassword = doc.createElement('UserPassword')
userpassword_text = doc.createTextNode('Admin@storage')
userpassword.appendChild(userpassword_text)
storage.appendChild(userpassword)
url = doc.createElement('RestURL')
if multi_url:
url_text = doc.createTextNode('http://100.115.10.69:8082/'
'deviceManager/rest/;'
'http://100.115.10.70:8082/'
'deviceManager/rest/')
else:
url_text = doc.createTextNode('http://100.115.10.69:8082/'
'deviceManager/rest/')
url.appendChild(url_text)
storage.appendChild(url)
if snapshot_support:
self._add_conf_file_element(
doc, storage, 'SnapshotSupport', 'True')
if replication_support:
self._add_conf_file_element(
doc, storage, 'ReplicationSupport', 'True')
lun = doc.createElement('Filesystem')
config.appendChild(lun)
storagepool = doc.createElement('StoragePool')
if pool_node_flag:
pool_text = doc.createTextNode('OpenStack_Pool;OpenStack_Pool2; ;')
else:
pool_text = doc.createTextNode('')
storagepool.appendChild(pool_text)
timeout = doc.createElement('Timeout')
if timeout_flag:
timeout_text = doc.createTextNode('60')
else:
timeout_text = doc.createTextNode('')
timeout.appendChild(timeout_text)
waitinterval = doc.createElement('WaitInterval')
if wait_interval_flag:
waitinterval_text = doc.createTextNode('3')
else:
waitinterval_text = doc.createTextNode('')
waitinterval.appendChild(waitinterval_text)
NFSClient = doc.createElement('NFSClient')
virtualip = doc.createElement('IP')
virtualip_text = doc.createTextNode('100.112.0.1')
virtualip.appendChild(virtualip_text)
NFSClient.appendChild(virtualip)
CIFSClient = doc.createElement('CIFSClient')
username = doc.createElement('UserName')
username_text = doc.createTextNode('user_name')
username.appendChild(username_text)
CIFSClient.appendChild(username)
userpassword = doc.createElement('UserPassword')
userpassword_text = doc.createTextNode('user_password')
userpassword.appendChild(userpassword_text)
CIFSClient.appendChild(userpassword)
lun.appendChild(NFSClient)
lun.appendChild(CIFSClient)
lun.appendChild(timeout)
lun.appendChild(waitinterval)
lun.appendChild(storagepool)
if sectorsize_value:
sectorsize = doc.createElement('SectorSize')
sectorsize_text = doc.createTextNode(sectorsize_value)
sectorsize.appendChild(sectorsize_text)
lun.appendChild(sectorsize)
prefetch = doc.createElement('Prefetch')
prefetch.setAttribute('Type', '0')
prefetch.setAttribute('Value', '0')
lun.appendChild(prefetch)
fakefile = open(fake_conf_file, 'w')
fakefile.write(doc.toprettyxml(indent=''))
fakefile.close()
def recreate_fake_conf_file(self, product_flag=True, username_flag=True,
pool_node_flag=True, timeout_flag=True,
wait_interval_flag=True,
sectorsize_value='4',
multi_url=False,
logical_port='100.115.10.68',
snapshot_support=True,
replication_support=False):
self.tmp_dir = tempfile.mkdtemp()
self.fake_conf_file = self.tmp_dir + '/manila_huawei_conf.xml'
self.addCleanup(shutil.rmtree, self.tmp_dir)
self.create_fake_conf_file(self.fake_conf_file, product_flag,
username_flag, pool_node_flag,
timeout_flag, wait_interval_flag,
sectorsize_value,
multi_url, logical_port,
snapshot_support, replication_support)
self.addCleanup(os.remove, self.fake_conf_file)
@ddt.data(common_constants.STATUS_ERROR,
common_constants.REPLICA_STATE_IN_SYNC,
common_constants.REPLICA_STATE_OUT_OF_SYNC)
def test_create_replica_success(self, replica_state):
share_type = self.fake_type_not_extra['test_with_extra']
self.mock_object(db, 'share_type_get',
mock.Mock(return_value=share_type))
if replica_state == common_constants.STATUS_ERROR:
self.driver.plugin.helper.custom_results[
'/REPLICATIONPAIR/fake_pair_id'] = {
"GET": """{"error":{"code":0},
"data":{"HEALTHSTATUS": "2"}}"""}
elif replica_state == common_constants.REPLICA_STATE_OUT_OF_SYNC:
self.driver.plugin.helper.custom_results[
'/REPLICATIONPAIR/fake_pair_id'] = {
"GET": """{"error":{"code":0},
"data":{"HEALTHSTATUS": "1",
"RUNNINGSTATUS": "1",
"SECRESDATASTATUS": "5"}}"""}
result = self.driver.create_replica(
self._context,
[self.active_replica, self.new_replica],
self.new_replica,
[], [], None)
expected = {
'export_locations': ['100.115.10.68:/share_fake_new_uuid'],
'replica_state': replica_state,
'access_rules_status': common_constants.STATUS_ACTIVE,
}
self.assertEqual(expected, result)
self.assertEqual('fake_pair_id',
self.driver.plugin.private_storage.get(
'fake_share_id', 'replica_pair_id'))
@ddt.data({'url': '/FILESYSTEM?filter=NAME::share_fake_uuid'
'&range=[0-8191]',
'url_result': '{"error":{"code":0}}',
'expected_exception': exception.ReplicationException},
{'url': '/NFSHARE',
'url_result': '{"error":{"code":-403}}',
'expected_exception': exception.InvalidShare},
{'url': '/REPLICATIONPAIR',
'url_result': '{"error":{"code":-403}}',
'expected_exception': exception.InvalidShare},)
@ddt.unpack
def test_create_replica_fail(self, url, url_result, expected_exception):
share_type = self.fake_type_not_extra['test_with_extra']
self.mock_object(db, 'share_type_get',
mock.Mock(return_value=share_type))
self.driver.plugin.helper.custom_results[url] = url_result
self.assertRaises(expected_exception,
self.driver.create_replica,
self._context,
[self.active_replica, self.new_replica],
self.new_replica,
[], [], None)
self.assertIsNone(self.driver.plugin.private_storage.get(
'fake_share_id', 'replica_pair_id'))
def test_create_replica_with_get_state_fail(self):
share_type = self.fake_type_not_extra['test_with_extra']
self.mock_object(db, 'share_type_get',
mock.Mock(return_value=share_type))
self.driver.plugin.helper.custom_results[
'/REPLICATIONPAIR/fake_pair_id'] = {
"GET": """{"error":{"code":-403}}"""}
result = self.driver.create_replica(
self._context,
[self.active_replica, self.new_replica],
self.new_replica,
[], [], None)
expected = {
'export_locations': ['100.115.10.68:/share_fake_new_uuid'],
'replica_state': common_constants.STATUS_ERROR,
'access_rules_status': common_constants.STATUS_ACTIVE,
}
self.assertEqual(expected, result)
self.assertEqual('fake_pair_id',
self.driver.plugin.private_storage.get(
'fake_share_id', 'replica_pair_id'))
def test_create_replica_with_already_exists(self):
self.driver.plugin.private_storage.update(
'fake_share_id',
{'replica_pair_id': 'fake_pair_id'})
self.assertRaises(exception.ReplicationException,
self.driver.create_replica,
self._context,
[self.active_replica, self.new_replica],
self.new_replica,
[], [], None)
@ddt.data({'pair_info': """{"HEALTHSTATUS": "2",
"SECRESDATASTATUS": "2",
"ISPRIMARY": "false",
"SECRESACCESS": "1",
"RUNNINGSTATUS": "1"}""",
'assert_method': 'get_replication_pair_by_id'},
{'pair_info': """{"HEALTHSTATUS": "1",
"SECRESDATASTATUS": "2",
"ISPRIMARY": "true",
"SECRESACCESS": "1",
"RUNNINGSTATUS": "1"}""",
'assert_method': 'switch_replication_pair'},
{'pair_info': """{"HEALTHSTATUS": "1",
"SECRESDATASTATUS": "2",
"ISPRIMARY": "false",
"SECRESACCESS": "3",
"RUNNINGSTATUS": "1"}""",
'assert_method': 'set_pair_secondary_write_lock'},
{'pair_info': """{"HEALTHSTATUS": "1",
"SECRESDATASTATUS": "2",
"ISPRIMARY": "false",
"SECRESACCESS": "1",
"RUNNINGSTATUS": "33"}""",
'assert_method': 'sync_replication_pair'},)
@ddt.unpack
def test_update_replica_state_success(self, pair_info, assert_method):
self.driver.plugin.private_storage.update(
'fake_share_id',
{'replica_pair_id': 'fake_pair_id'})
helper_method = getattr(self.driver.plugin.helper, assert_method)
mocker = self.mock_object(self.driver.plugin.helper,
assert_method,
mock.Mock(wraps=helper_method))
self.driver.plugin.helper.custom_results[
'/REPLICATIONPAIR/fake_pair_id'] = {
"GET": """{"error":{"code":0},
"data":%s}""" % pair_info}
self.driver.update_replica_state(
self._context,
[self.active_replica, self.new_replica],
self.new_replica,
[], [], None)
mocker.assert_called_with('fake_pair_id')
@ddt.data({'pair_info': """{"HEALTHSTATUS": "1",
"SECRESDATASTATUS": "2",
"ISPRIMARY": "true",
"SECRESACCESS": "1",
"RUNNINGSTATUS": "1"}""",
'assert_method': 'switch_replication_pair',
'error_url': '/REPLICATIONPAIR/switch'},
{'pair_info': """{"HEALTHSTATUS": "1",
"SECRESDATASTATUS": "2",
"ISPRIMARY": "false",
"SECRESACCESS": "3",
"RUNNINGSTATUS": "1"}""",
'assert_method': 'set_pair_secondary_write_lock',
'error_url': '/REPLICATIONPAIR/SET_SECODARY_WRITE_LOCK'},
{'pair_info': """{"HEALTHSTATUS": "1",
"SECRESDATASTATUS": "2",
"ISPRIMARY": "false",
"SECRESACCESS": "1",
"RUNNINGSTATUS": "26"}""",
'assert_method': 'sync_replication_pair',
'error_url': '/REPLICATIONPAIR/sync'},)
@ddt.unpack
def test_update_replica_state_with_exception_ignore(
self, pair_info, assert_method, error_url):
self.driver.plugin.private_storage.update(
'fake_share_id',
{'replica_pair_id': 'fake_pair_id'})
helper_method = getattr(self.driver.plugin.helper, assert_method)
mocker = self.mock_object(self.driver.plugin.helper,
assert_method,
mock.Mock(wraps=helper_method))
self.driver.plugin.helper.custom_results[
error_url] = """{"error":{"code":-403}}"""
self.driver.plugin.helper.custom_results[
'/REPLICATIONPAIR/fake_pair_id'] = {
"GET": """{"error":{"code":0},
"data":%s}""" % pair_info}
self.driver.update_replica_state(
self._context,
[self.active_replica, self.new_replica],
self.new_replica,
[], [], None)
mocker.assert_called_once_with('fake_pair_id')
def test_update_replica_state_with_replication_abnormal(self):
self.driver.plugin.private_storage.update(
'fake_share_id',
{'replica_pair_id': 'fake_pair_id'})
self.driver.plugin.helper.custom_results[
'/REPLICATIONPAIR/fake_pair_id'] = {
"GET": """{"error":{"code":0},
"data":{"HEALTHSTATUS": "2"}}"""}
result = self.driver.update_replica_state(
self._context,
[self.active_replica, self.new_replica],
self.new_replica,
[], [], None)
self.assertEqual(common_constants.STATUS_ERROR, result)
def test_update_replica_state_with_no_pair_id(self):
result = self.driver.update_replica_state(
self._context,
[self.active_replica, self.new_replica],
self.new_replica,
[], [], None)
self.assertEqual(common_constants.STATUS_ERROR, result)
@ddt.data('true', 'false')
def test_promote_replica_success(self, is_primary):
self.driver.plugin.private_storage.update(
'fake_share_id',
{'replica_pair_id': 'fake_pair_id'})
self.driver.plugin.helper.custom_results[
'/REPLICATIONPAIR/fake_pair_id'] = {
"GET": """{"error": {"code": 0},
"data": {"HEALTHSTATUS": "1",
"RUNNINGSTATUS": "1",
"SECRESDATASTATUS": "2",
"ISPRIMARY": "%s"}}""" % is_primary}
result = self.driver.promote_replica(
self._context,
[self.active_replica, self.new_replica],
self.new_replica,
[], None)
expected = [
{'id': self.new_replica['id'],
'replica_state': common_constants.REPLICA_STATE_ACTIVE,
'access_rules_status': common_constants.STATUS_ACTIVE},
{'id': self.active_replica['id'],
'replica_state': common_constants.REPLICA_STATE_IN_SYNC,
'access_rules_status':
common_constants.SHARE_INSTANCE_RULES_SYNCING},
]
self.assertEqual(expected, result)
@ddt.data({'mock_method': 'update_access',
'new_access_status':
common_constants.SHARE_INSTANCE_RULES_SYNCING,
'old_access_status':
common_constants.SHARE_INSTANCE_RULES_SYNCING},
{'mock_method': 'clear_access',
'new_access_status':
common_constants.SHARE_INSTANCE_RULES_SYNCING,
'old_access_status': common_constants.STATUS_ACTIVE},)
@ddt.unpack
def test_promote_replica_with_access_update_error(
self, mock_method, new_access_status, old_access_status):
self.driver.plugin.private_storage.update(
'fake_share_id',
{'replica_pair_id': 'fake_pair_id'})
self.driver.plugin.helper.custom_results[
'/REPLICATIONPAIR/fake_pair_id'] = {
"GET": """{"error": {"code": 0},
"data": {"HEALTHSTATUS": "1",
"RUNNINGSTATUS": "1",
"SECRESDATASTATUS": "2",
"ISPRIMARY": "false"}}"""}
mocker = self.mock_object(self.driver.plugin,
mock_method,
mock.Mock(side_effect=Exception('err')))
result = self.driver.promote_replica(
self._context,
[self.active_replica, self.new_replica],
self.new_replica,
[], None)
expected = [
{'id': self.new_replica['id'],
'replica_state': common_constants.REPLICA_STATE_ACTIVE,
'access_rules_status': new_access_status},
{'id': self.active_replica['id'],
'replica_state': common_constants.REPLICA_STATE_IN_SYNC,
'access_rules_status': old_access_status},
]
self.assertEqual(expected, result)
mocker.assert_called()
@ddt.data({'error_url': '/REPLICATIONPAIR/split',
'assert_method': 'split_replication_pair'},
{'error_url': '/REPLICATIONPAIR/switch',
'assert_method': 'switch_replication_pair'},
{'error_url': '/REPLICATIONPAIR/SET_SECODARY_WRITE_LOCK',
'assert_method': 'set_pair_secondary_write_lock'},
{'error_url': '/REPLICATIONPAIR/sync',
'assert_method': 'sync_replication_pair'},)
@ddt.unpack
def test_promote_replica_with_error_ignore(self, error_url, assert_method):
self.driver.plugin.private_storage.update(
'fake_share_id',
{'replica_pair_id': 'fake_pair_id'})
helper_method = getattr(self.driver.plugin.helper, assert_method)
mocker = self.mock_object(self.driver.plugin.helper,
assert_method,
mock.Mock(wraps=helper_method))
self.driver.plugin.helper.custom_results[
error_url] = '{"error":{"code":-403}}'
fake_pair_infos = [{'ISPRIMARY': 'False',
'HEALTHSTATUS': '1',
'RUNNINGSTATUS': '1',
'SECRESDATASTATUS': '1'},
{'HEALTHSTATUS': '2'}]
self.mock_object(self.driver.plugin.replica_mgr,
'_get_replication_pair_info',
mock.Mock(side_effect=fake_pair_infos))
result = self.driver.promote_replica(
self._context,
[self.active_replica, self.new_replica],
self.new_replica,
[], None)
expected = [
{'id': self.new_replica['id'],
'replica_state': common_constants.REPLICA_STATE_ACTIVE,
'access_rules_status': common_constants.STATUS_ACTIVE},
{'id': self.active_replica['id'],
'replica_state': common_constants.STATUS_ERROR,
'access_rules_status':
common_constants.SHARE_INSTANCE_RULES_SYNCING},
]
self.assertEqual(expected, result)
mocker.assert_called_once_with('fake_pair_id')
@ddt.data({'error_url': '/REPLICATIONPAIR/fake_pair_id',
'url_result': """{"error":{"code":0},
"data":{"HEALTHSTATUS": "1",
"ISPRIMARY": "false",
"RUNNINGSTATUS": "1",
"SECRESDATASTATUS": "5"}}""",
'expected_exception': exception.ReplicationException},
{'error_url': '/REPLICATIONPAIR/CANCEL_SECODARY_WRITE_LOCK',
'url_result': """{"error":{"code":-403}}""",
'expected_exception': exception.InvalidShare},)
@ddt.unpack
def test_promote_replica_fail(self, error_url, url_result,
expected_exception):
self.driver.plugin.private_storage.update(
'fake_share_id',
{'replica_pair_id': 'fake_pair_id'})
self.driver.plugin.helper.custom_results[error_url] = url_result
self.assertRaises(expected_exception,
self.driver.promote_replica,
self._context,
[self.active_replica, self.new_replica],
self.new_replica,
[], None)
def test_promote_replica_with_no_pair_id(self):
self.assertRaises(exception.ReplicationException,
self.driver.promote_replica,
self._context,
[self.active_replica, self.new_replica],
self.new_replica,
[], None)
@ddt.data({'url': '/REPLICATIONPAIR/split',
'url_result': '{"error":{"code":-403}}'},
{'url': '/REPLICATIONPAIR/fake_pair_id',
'url_result': '{"error":{"code":1077937923}}'},
{'url': '/REPLICATIONPAIR/fake_pair_id',
'url_result': '{"error":{"code":0}}'},)
@ddt.unpack
def test_delete_replica_success(self, url, url_result):
self.driver.plugin.private_storage.update(
'fake_share_id',
{'replica_pair_id': 'fake_pair_id'})
self.driver.plugin.helper.custom_results['/filesystem/8'] = {
"DELETE": '{"error":{"code":0}}'}
self.driver.plugin.helper.custom_results[url] = url_result
self.driver.delete_replica(self._context,
[self.active_replica, self.new_replica],
[], self.new_replica, None)
self.assertIsNone(self.driver.plugin.private_storage.get(
'fake_share_id', 'replica_pair_id'))
@ddt.data({'url': '/REPLICATIONPAIR/fake_pair_id',
'expected': 'fake_pair_id'},
{'url': '/filesystem/8',
'expected': None},)
@ddt.unpack
def test_delete_replica_fail(self, url, expected):
self.driver.plugin.private_storage.update(
'fake_share_id',
{'replica_pair_id': 'fake_pair_id'})
self.driver.plugin.helper.custom_results[url] = {
"DELETE": '{"error":{"code":-403}}'}
self.assertRaises(exception.InvalidShare,
self.driver.delete_replica,
self._context,
[self.active_replica, self.new_replica],
[], self.new_replica, None)
self.assertEqual(expected,
self.driver.plugin.private_storage.get(
'fake_share_id', 'replica_pair_id'))
def test_delete_replica_with_no_pair_id(self):
self.driver.plugin.helper.custom_results['/filesystem/8'] = {
"DELETE": '{"error":{"code":0}}'}
self.driver.delete_replica(self._context,
[self.active_replica, self.new_replica],
[], self.new_replica, None)
@ddt.data({'pair_info': """{"HEALTHSTATUS": "2"}""",
'expected_state': common_constants.STATUS_ERROR},
{'pair_info': """{"HEALTHSTATUS": "1",
"RUNNINGSTATUS": "26"}""",
'expected_state': common_constants.REPLICA_STATE_OUT_OF_SYNC},
{'pair_info': """{"HEALTHSTATUS": "1",
"RUNNINGSTATUS": "33"}""",
'expected_state': common_constants.REPLICA_STATE_OUT_OF_SYNC},
{'pair_info': """{"HEALTHSTATUS": "1",
"RUNNINGSTATUS": "34"}""",
'expected_state': common_constants.STATUS_ERROR},
{'pair_info': """{"HEALTHSTATUS": "1",
"RUNNINGSTATUS": "35"}""",
'expected_state': common_constants.STATUS_ERROR},
{'pair_info': """{"HEALTHSTATUS": "1",
"SECRESDATASTATUS": "1",
"RUNNINGSTATUS": "1"}""",
'expected_state': common_constants.REPLICA_STATE_IN_SYNC},
{'pair_info': """{"HEALTHSTATUS": "1",
"SECRESDATASTATUS": "2",
"RUNNINGSTATUS": "1"}""",
'expected_state': common_constants.REPLICA_STATE_IN_SYNC},
{'pair_info': """{"HEALTHSTATUS": "1",
"SECRESDATASTATUS": "5",
"RUNNINGSTATUS": "1"}""",
'expected_state': common_constants.REPLICA_STATE_OUT_OF_SYNC})
@ddt.unpack
def test_get_replica_state(self, pair_info, expected_state):
self.driver.plugin.helper.custom_results[
'/REPLICATIONPAIR/fake_pair_id'] = {
"GET": """{"error":{"code":0},
"data":%s}""" % pair_info}
result_state = self.driver.plugin.replica_mgr.get_replica_state(
'fake_pair_id')
self.assertEqual(expected_state, result_state)
@ddt.data(*constants.QOS_STATUSES)
def test_delete_qos(self, qos_status):
self.driver.plugin.helper.custom_results['/ioclass/11'] = {
"GET": """{"error":{"code":0}, "data":{"RUNNINGSTATUS": "%s"}}""" %
qos_status
}
activate_deactivate_qos_mock = self.mock_object(
self.driver.plugin.helper,
'activate_deactivate_qos')
delete_qos_mock = self.mock_object(
self.driver.plugin.helper,
'delete_qos_policy')
qos = smartx.SmartQos(self.driver.plugin.helper)
qos.delete_qos('11')
if qos_status == constants.STATUS_QOS_INACTIVATED:
activate_deactivate_qos_mock.assert_not_called()
else:
activate_deactivate_qos_mock.assert_called_once_with('11', False)
delete_qos_mock.assert_called_once_with('11')
def test_username_password_encode_decode(self):
for i in (1, 2):
# First loop will encode the username/password and
# write back to configuration.
# Second loop will get the encoded username/password and
# decode them.
logininfo = self.driver.plugin.helper._get_login_info()
self.assertEqual('admin', logininfo['UserName'])
self.assertEqual('Admin@storage', logininfo['UserPassword'])
@ddt.data({
'username': 'abc',
'password': '123456',
'expect_username': 'abc',
'expect_password': '123456',
}, {
'username': '!$$$YWJj',
'password': '!$$$MTIzNDU2',
'expect_username': 'abc',
'expect_password': '123456',
}, {
'username': 'ab!$$$c',
'password': '123!$$$456',
'expect_username': 'ab!$$$c',
'expect_password': '123!$$$456',
})
@ddt.unpack
def test__get_login_info(self, username, password, expect_username,
expect_password):
configs = {
'Storage/RestURL': 'https://123456',
'Storage/UserName': username,
'Storage/UserPassword': password,
}
self.mock_object(
ET, 'parse',
mock.Mock(return_value=FakeConfigParseTree(configs)))
result = self.driver.plugin.helper._get_login_info()
self.assertEqual(expect_username, result['UserName'])
self.assertEqual(expect_password, result['UserPassword'])
ET.parse.assert_called_once_with(self.fake_conf_file)
def test_revert_to_snapshot_success(self):
snapshot = {'id': 'fake-fs-id',
'share_name': 'share_fake_uuid'}
with mock.patch.object(
self.driver.plugin.helper, 'call') as mock_call:
mock_call.return_value = {
"error": {"code": 0},
"data": [{"ID": "4", "NAME": "share_fake_uuid"}]
}
self.driver.revert_to_snapshot(None, snapshot, None, None)
expect_snapshot_id = "4@share_snapshot_fake_fs_id"
mock_call.assert_called_with(
"/FSSNAPSHOT/ROLLBACK_FSSNAPSHOT",
jsonutils.dumps({"ID": expect_snapshot_id}), 'PUT')
def test_revert_to_snapshot_exception(self):
snapshot = {'id': 'fake-snap-id',
'share_name': 'not_exist_share_name',
'share_id': 'fake_share_id'}
self.assertRaises(exception.ShareResourceNotFound,
self.driver.revert_to_snapshot,
None, snapshot, None, None)
@ddt.data({'name': 'fake_name',
'share_proto': 'NFS',
'mount_path': 'fake_nfs_mount_path',
'mount_src': '/mnt/test'},
{'name': 'fake_name',
'share_proto': 'CIFS',
'mount_path': 'fake_cifs_mount_path',
'mount_src': '/mnt/test'},
)
def test_mount_share_to_host(self, share):
access = {'access_to': 'cifs_user',
'access_password': 'cifs_password'}
mocker = self.mock_object(utils, 'execute')
self.driver.plugin.mount_share_to_host(share, access)
if share['share_proto'] == 'NFS':
mocker.assert_called_once_with(
'mount', '-t', 'nfs', 'fake_nfs_mount_path', '/mnt/test',
run_as_root=True)
else:
mocker.assert_called_once_with(
'mount', '-t', 'cifs', 'fake_cifs_mount_path', '/mnt/test',
'-o', 'username=cifs_user,password=cifs_password',
run_as_root=True)
@ddt.ddt
class HuaweiDriverHelperTestCase(test.TestCase):
def setUp(self):
super(HuaweiDriverHelperTestCase, self).setUp()
self.helper = helper.RestHelper(None)
def test_init_http_head(self):
self.helper.init_http_head()
self.assertIsNone(self.helper.url)
self.assertFalse(self.helper.session.verify)
self.assertEqual("keep-alive",
self.helper.session.headers["Connection"])
self.assertEqual("application/json",
self.helper.session.headers["Content-Type"])
@ddt.data(('fake_data', 'POST'),
(None, 'POST'),
(None, 'PUT'),
(None, 'GET'),
('fake_data', 'PUT'),
(None, 'DELETE'),
)
@ddt.unpack
def test_do_call_with_valid_method(self, data, method):
self.helper.init_http_head()
mocker = self.mock_object(self.helper.session, method.lower())
self.helper.do_call("fake-rest-url", data, method)
kwargs = {'timeout': constants.SOCKET_TIMEOUT}
if data:
kwargs['data'] = data
mocker.assert_called_once_with("fake-rest-url", **kwargs)
def test_do_call_with_invalid_method(self):
self.assertRaises(exception.ShareBackendException,
self.helper.do_call,
"fake-rest-url", None, 'fake-method')
def test_do_call_with_http_error(self):
self.helper.init_http_head()
fake_res = requests.Response()
fake_res.reason = 'something wrong'
fake_res.status_code = 500
fake_res.url = "fake-rest-url"
self.mock_object(self.helper.session, 'post',
mock.Mock(return_value=fake_res))
res = self.helper.do_call("fake-rest-url", None, 'POST')
expected = {
"error": {
"code": 500,
"description": '500 Server Error: something wrong for '
'url: fake-rest-url'}
}
self.assertDictEqual(expected, res)
| 41.74003
| 79
| 0.538558
|
8a83b977601f60094fa000a41455789dc9c8a1f2
| 274
|
py
|
Python
|
django_structlog/__init__.py
|
kashewnuts/django-structlog
|
1b4849039ee92688b53c502853a152c78a2dfe51
|
[
"MIT"
] | null | null | null |
django_structlog/__init__.py
|
kashewnuts/django-structlog
|
1b4849039ee92688b53c502853a152c78a2dfe51
|
[
"MIT"
] | null | null | null |
django_structlog/__init__.py
|
kashewnuts/django-structlog
|
1b4849039ee92688b53c502853a152c78a2dfe51
|
[
"MIT"
] | null | null | null |
""" ``django-structlog`` is a structured logging integration for ``Django`` project using ``structlog``.
"""
default_app_config = "django_structlog.apps.DjangoStructLogConfig"
name = "django_structlog"
VERSION = (2, 1, 0)
__version__ = ".".join(str(v) for v in VERSION)
| 24.909091
| 104
| 0.718978
|
e484776bd34671098e0506ad5be8a04c5cb55158
| 8,265
|
py
|
Python
|
docs/conf.py
|
jkj728/yara
|
6fe1fb57edcb34815221bb58890ffbfef54e4067
|
[
"BSD-3-Clause"
] | null | null | null |
docs/conf.py
|
jkj728/yara
|
6fe1fb57edcb34815221bb58890ffbfef54e4067
|
[
"BSD-3-Clause"
] | null | null | null |
docs/conf.py
|
jkj728/yara
|
6fe1fb57edcb34815221bb58890ffbfef54e4067
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# yara documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 8 11:04:03 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'yara'
copyright = u'2014-2019, VirusTotal'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '4.0'
# The full version, including alpha/beta/rc tags.
release = '4.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
try:
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
except:
html_theme = "default"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'yaradoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'yara.tex', u'yara Documentation',
u'Victor M. Alvarez', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'yara', u'yara Documentation',
[u'Victor M. Alvarez'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'yara', u'yara Documentation',
u'Victor M. Alvarez', 'yara', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| 30.839552
| 79
| 0.717362
|
0028aa6303a294fbf64f37d98cac62c570371c12
| 66,302
|
py
|
Python
|
wagtail/users/tests.py
|
NabilMostafa/wagtail
|
d949946434508fb4b118f5ab83966463ce510163
|
[
"BSD-3-Clause"
] | 2
|
2020-10-19T13:10:14.000Z
|
2020-11-29T05:17:13.000Z
|
wagtail/users/tests.py
|
NabilMostafa/wagtail
|
d949946434508fb4b118f5ab83966463ce510163
|
[
"BSD-3-Clause"
] | 3
|
2015-03-10T12:19:01.000Z
|
2021-10-14T22:24:06.000Z
|
wagtail/users/tests.py
|
NabilMostafa/wagtail
|
d949946434508fb4b118f5ab83966463ce510163
|
[
"BSD-3-Clause"
] | 1
|
2016-04-04T12:58:24.000Z
|
2016-04-04T12:58:24.000Z
|
import unittest.mock
from django import forms
from django.apps import apps
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group, Permission
from django.core.exceptions import ImproperlyConfigured
from django.core.files.uploadedfile import SimpleUploadedFile
from django.http import HttpRequest, HttpResponse
from django.test import TestCase, override_settings
from django.urls import reverse
from wagtail.core import hooks
from wagtail.core.compat import AUTH_USER_APP_LABEL, AUTH_USER_MODEL_NAME
from wagtail.core.models import Collection, GroupCollectionPermission, GroupPagePermission, Page
from wagtail.tests.utils import WagtailTestUtils
from wagtail.users.forms import UserCreationForm, UserEditForm
from wagtail.users.models import UserProfile
from wagtail.users.views.groups import GroupViewSet
from wagtail.users.views.users import get_user_creation_form, get_user_edit_form
from wagtail.users.wagtail_hooks import get_group_viewset_cls
delete_user_perm_codename = "delete_{0}".format(AUTH_USER_MODEL_NAME.lower())
change_user_perm_codename = "change_{0}".format(AUTH_USER_MODEL_NAME.lower())
def test_avatar_provider(user, default, size=50):
return '/nonexistent/path/to/avatar.png'
class CustomUserCreationForm(UserCreationForm):
country = forms.CharField(required=True, label="Country")
attachment = forms.FileField(required=True, label="Attachment")
class CustomUserEditForm(UserEditForm):
country = forms.CharField(required=True, label="Country")
attachment = forms.FileField(required=True, label="Attachment")
class CustomGroupViewSet(GroupViewSet):
icon = 'custom-icon'
class TestUserFormHelpers(TestCase):
def test_get_user_edit_form_with_default_form(self):
user_form = get_user_edit_form()
self.assertIs(user_form, UserEditForm)
def test_get_user_creation_form_with_default_form(self):
user_form = get_user_creation_form()
self.assertIs(user_form, UserCreationForm)
@override_settings(
WAGTAIL_USER_CREATION_FORM='wagtail.users.tests.CustomUserCreationForm'
)
def test_get_user_creation_form_with_custom_form(self):
user_form = get_user_creation_form()
self.assertIs(user_form, CustomUserCreationForm)
@override_settings(
WAGTAIL_USER_EDIT_FORM='wagtail.users.tests.CustomUserEditForm'
)
def test_get_user_edit_form_with_custom_form(self):
user_form = get_user_edit_form()
self.assertIs(user_form, CustomUserEditForm)
@override_settings(
WAGTAIL_USER_CREATION_FORM='wagtail.users.tests.CustomUserCreationFormDoesNotExist'
)
def test_get_user_creation_form_with_invalid_form(self):
self.assertRaises(ImproperlyConfigured, get_user_creation_form)
@override_settings(
WAGTAIL_USER_EDIT_FORM='wagtail.users.tests.CustomUserEditFormDoesNotExist'
)
def test_get_user_edit_form_with_invalid_form(self):
self.assertRaises(ImproperlyConfigured, get_user_edit_form)
class TestGroupUsersView(TestCase, WagtailTestUtils):
def setUp(self):
# create a user that should be visible in the listing
self.test_user = self.create_user(
username='testuser',
email='testuser@email.com',
password='password',
first_name='First Name',
last_name='Last Name'
)
self.test_group = Group.objects.create(name='Test Group')
self.test_user.groups.add(self.test_group)
self.login()
def get(self, params={}, group_id=None):
return self.client.get(reverse('wagtailusers_groups:users', args=(group_id or self.test_group.pk, )), params)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/users/index.html')
self.assertContains(response, 'testuser')
def test_inexisting_group(self):
response = self.get(group_id=9999)
self.assertEqual(response.status_code, 404)
def test_search(self):
response = self.get({'q': "Hello"})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['query_string'], "Hello")
def test_search_query_one_field(self):
response = self.get({'q': "first name"})
self.assertEqual(response.status_code, 200)
results = response.context['users'].object_list
self.assertIn(self.test_user, results)
def test_search_query_multiple_fields(self):
response = self.get({'q': "first name last name"})
self.assertEqual(response.status_code, 200)
results = response.context['users'].object_list
self.assertIn(self.test_user, results)
def test_pagination(self):
pages = ['0', '1', '-1', '9999', 'Not a page']
for page in pages:
response = self.get({'p': page})
self.assertEqual(response.status_code, 200)
class TestUserIndexView(TestCase, WagtailTestUtils):
def setUp(self):
# create a user that should be visible in the listing
self.test_user = self.create_user(
username='testuser',
email='testuser@email.com',
password='password',
first_name='First Name',
last_name='Last Name'
)
self.login()
def get(self, params={}):
return self.client.get(reverse('wagtailusers_users:index'), params)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/users/index.html')
self.assertContains(response, 'testuser')
@unittest.skipIf(settings.AUTH_USER_MODEL == 'emailuser.EmailUser', 'Negative UUID not possible')
def test_allows_negative_ids(self):
# see https://github.com/wagtail/wagtail/issues/565
self.create_user('guardian', 'guardian@example.com', 'gu@rd14n', pk=-1)
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'testuser')
self.assertContains(response, 'guardian')
def test_search(self):
response = self.get({'q': "Hello"})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['query_string'], "Hello")
def test_search_query_one_field(self):
response = self.get({'q': "first name"})
self.assertEqual(response.status_code, 200)
results = response.context['users'].object_list
self.assertIn(self.test_user, results)
def test_search_query_multiple_fields(self):
response = self.get({'q': "first name last name"})
self.assertEqual(response.status_code, 200)
results = response.context['users'].object_list
self.assertIn(self.test_user, results)
def test_pagination(self):
pages = ['0', '1', '-1', '9999', 'Not a page']
for page in pages:
response = self.get({'p': page})
self.assertEqual(response.status_code, 200)
class TestUserCreateView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
def get(self, params={}):
return self.client.get(reverse('wagtailusers_users:add'), params)
def post(self, post_data={}):
return self.client.post(reverse('wagtailusers_users:add'), post_data)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/users/create.html')
self.assertContains(response, 'Password:')
self.assertContains(response, 'Password confirmation:')
def test_create(self):
response = self.post({
'username': "testuser",
'email': "test@user.com",
'first_name': "Test",
'last_name': "User",
'password1': "password",
'password2': "password",
})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailusers_users:index'))
# Check that the user was created
users = get_user_model().objects.filter(email='test@user.com')
self.assertEqual(users.count(), 1)
@unittest.skipUnless(settings.AUTH_USER_MODEL == 'customuser.CustomUser', "Only applicable to CustomUser")
@override_settings(
WAGTAIL_USER_CREATION_FORM='wagtail.users.tests.CustomUserCreationForm',
WAGTAIL_USER_CUSTOM_FIELDS=['country', 'document'],
)
def test_create_with_custom_form(self):
response = self.post({
'username': "testuser",
'email': "test@user.com",
'first_name': "Test",
'last_name': "User",
'password1': "password",
'password2': "password",
'country': "testcountry",
'attachment': SimpleUploadedFile('test.txt', b"Uploaded file"),
})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailusers_users:index'))
# Check that the user was created
users = get_user_model().objects.filter(email='test@user.com')
self.assertEqual(users.count(), 1)
self.assertEqual(users.first().country, 'testcountry')
self.assertEqual(users.first().attachment.read(), b"Uploaded file")
def test_create_with_password_mismatch(self):
response = self.post({
'username': "testuser",
'email': "test@user.com",
'first_name': "Test",
'last_name': "User",
'password1': "password1",
'password2': "password2",
})
# Should remain on page
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/users/create.html')
self.assertTrue(response.context['form'].errors['password2'])
# Check that the user was not created
users = get_user_model().objects.filter(email='test@user.com')
self.assertEqual(users.count(), 0)
@override_settings(
AUTH_PASSWORD_VALIDATORS=[
{'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator'},
],
)
def test_create_with_password_validation(self):
"""
Test that the Django password validators are run when creating a user.
Specifically test that the UserAttributeSimilarityValidator works,
which requires a full-populated user model before the validation works.
"""
# Create a user with a password the same as their name
response = self.post({
'username': "testuser",
'email': "test@user.com",
'first_name': "Example",
'last_name': "Name",
'password1': "example name",
'password2': "example name",
})
# Should remain on page
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/users/create.html')
# Password field should have an error
errors = response.context['form'].errors.as_data()
self.assertIn('password2', errors)
self.assertEqual(errors['password2'][0].code, 'password_too_similar')
# Check that the user was not created
users = get_user_model().objects.filter(email='test@user.com')
self.assertEqual(users.count(), 0)
def test_create_with_missing_password(self):
"""Password should be required by default"""
response = self.post({
'username': "testuser",
'email': "test@user.com",
'first_name': "Test",
'last_name': "User",
'password1': "",
'password2': "",
})
# Should remain on page
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/users/create.html')
self.assertTrue(response.context['form'].errors['password1'])
# Check that the user was not created
users = get_user_model().objects.filter(email='test@user.com')
self.assertEqual(users.count(), 0)
@override_settings(WAGTAILUSERS_PASSWORD_REQUIRED=False)
def test_password_fields_exist_when_not_required(self):
"""Password fields should still be shown if WAGTAILUSERS_PASSWORD_REQUIRED is False"""
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/users/create.html')
self.assertContains(response, 'Password:')
self.assertContains(response, 'Password confirmation:')
@override_settings(WAGTAILUSERS_PASSWORD_REQUIRED=False)
def test_create_with_password_not_required(self):
"""Password should not be required if WAGTAILUSERS_PASSWORD_REQUIRED is False"""
response = self.post({
'username': "testuser",
'email': "test@user.com",
'first_name': "Test",
'last_name': "User",
'password1': "",
'password2': "",
})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailusers_users:index'))
# Check that the user was created
users = get_user_model().objects.filter(email='test@user.com')
self.assertEqual(users.count(), 1)
self.assertEqual(users.first().password, '')
@override_settings(WAGTAILUSERS_PASSWORD_REQUIRED=False)
def test_optional_password_is_still_validated(self):
"""When WAGTAILUSERS_PASSWORD_REQUIRED is False, password validation should still apply if a password _is_ supplied"""
response = self.post({
'username': "testuser",
'email': "test@user.com",
'first_name': "Test",
'last_name': "User",
'password1': "banana",
'password2': "kumquat",
})
# Should remain on page
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/users/create.html')
self.assertTrue(response.context['form'].errors['password2'])
# Check that the user was not created
users = get_user_model().objects.filter(email='test@user.com')
self.assertEqual(users.count(), 0)
@override_settings(WAGTAILUSERS_PASSWORD_REQUIRED=False)
def test_password_still_accepted_when_optional(self):
"""When WAGTAILUSERS_PASSWORD_REQUIRED is False, we should still allow a password to be set"""
response = self.post({
'username': "testuser",
'email': "test@user.com",
'first_name': "Test",
'last_name': "User",
'password1': "banana",
'password2': "banana",
})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailusers_users:index'))
# Check that the user was created
users = get_user_model().objects.filter(email='test@user.com')
self.assertEqual(users.count(), 1)
self.assertTrue(users.first().check_password('banana'))
@override_settings(WAGTAILUSERS_PASSWORD_ENABLED=False)
def test_password_fields_not_shown_when_disabled(self):
"""WAGTAILUSERS_PASSWORD_ENABLED=False should cause password fields to be removed"""
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/users/create.html')
self.assertNotContains(response, 'Password:')
self.assertNotContains(response, 'Password confirmation:')
@override_settings(WAGTAILUSERS_PASSWORD_ENABLED=False)
def test_password_fields_ignored_when_disabled(self):
"""When WAGTAILUSERS_PASSWORD_ENABLED is False, users should always be created without a usable password"""
response = self.post({
'username': "testuser",
'email': "test@user.com",
'first_name': "Test",
'last_name': "User",
'password1': "banana", # not part of the form - should be ignored
'password2': "kumquat", # not part of the form - should be ignored
})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailusers_users:index'))
# Check that the user was created
users = get_user_model().objects.filter(email='test@user.com')
self.assertEqual(users.count(), 1)
self.assertEqual(users.first().password, '')
def test_before_create_user_hook(self):
def hook_func(request):
self.assertIsInstance(request, HttpRequest)
return HttpResponse("Overridden!")
with self.register_hook('before_create_user', hook_func):
response = self.client.get(
reverse('wagtailusers_users:add')
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b"Overridden!")
def test_before_create_user_hook_post(self):
def hook_func(request):
self.assertIsInstance(request, HttpRequest)
return HttpResponse("Overridden!")
with self.register_hook('before_create_user', hook_func):
post_data = {
'username': "testuser",
'email': "testuser@test.com",
'password1': 'password12',
'password2': 'password12',
'first_name': 'test',
'last_name': 'user',
}
response = self.client.post(
reverse('wagtailusers_users:add'),
post_data
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b"Overridden!")
def test_after_create_user_hook(self):
def hook_func(request, user):
self.assertIsInstance(request, HttpRequest)
self.assertIsInstance(user, get_user_model())
return HttpResponse("Overridden!")
with self.register_hook('after_create_user', hook_func):
post_data = {
'username': "testuser",
'email': "testuser@test.com",
'password1': 'password12',
'password2': 'password12',
'first_name': 'test',
'last_name': 'user',
}
response = self.client.post(
reverse('wagtailusers_users:add'),
post_data
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b"Overridden!")
class TestUserDeleteView(TestCase, WagtailTestUtils):
def setUp(self):
# create a user that should be visible in the listing
self.test_user = self.create_user(
username='testuser',
email='testuser@email.com',
password='password'
)
# also create a superuser to delete
self.superuser = self.create_superuser(
username='testsuperuser',
email='testsuperuser@email.com',
password='password'
)
self.current_user = self.login()
def get(self, params={}):
return self.client.get(reverse('wagtailusers_users:delete', args=(self.test_user.pk,)), params)
def post(self, post_data={}):
return self.client.post(reverse('wagtailusers_users:delete', args=(self.test_user.pk,)), post_data)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/users/confirm_delete.html')
def test_delete(self):
response = self.post()
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailusers_users:index'))
# Check that the user was deleted
users = get_user_model().objects.filter(email='testuser@email.com')
self.assertEqual(users.count(), 0)
def test_user_cannot_delete_self(self):
response = self.client.get(reverse('wagtailusers_users:delete', args=(self.current_user.pk,)))
# Should redirect to admin index (permission denied)
self.assertRedirects(response, reverse('wagtailadmin_home'))
# Check user was not deleted
self.assertTrue(get_user_model().objects.filter(pk=self.current_user.pk).exists())
def test_user_can_delete_other_superuser(self):
response = self.client.get(reverse('wagtailusers_users:delete', args=(self.superuser.pk,)))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/users/confirm_delete.html')
response = self.client.post(reverse('wagtailusers_users:delete', args=(self.superuser.pk,)))
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailusers_users:index'))
# Check that the user was deleted
users = get_user_model().objects.filter(email='testsuperuser@email.com')
self.assertEqual(users.count(), 0)
def test_before_delete_user_hook(self):
def hook_func(request, user):
self.assertIsInstance(request, HttpRequest)
self.assertEqual(user.pk, self.test_user.pk)
return HttpResponse("Overridden!")
with self.register_hook('before_delete_user', hook_func):
response = self.client.get(reverse('wagtailusers_users:delete', args=(self.test_user.pk, )))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b"Overridden!")
def test_before_delete_user_hook_post(self):
def hook_func(request, user):
self.assertIsInstance(request, HttpRequest)
self.assertEqual(user.pk, self.test_user.pk)
return HttpResponse("Overridden!")
with self.register_hook('before_delete_user', hook_func):
response = self.client.post(reverse('wagtailusers_users:delete', args=(self.test_user.pk, )))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b"Overridden!")
def test_after_delete_user_hook(self):
def hook_func(request, user):
self.assertIsInstance(request, HttpRequest)
self.assertEqual(user.email, self.test_user.email)
return HttpResponse("Overridden!")
with self.register_hook('after_delete_user', hook_func):
response = self.client.post(reverse('wagtailusers_users:delete', args=(self.test_user.pk, )))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b"Overridden!")
class TestUserDeleteViewForNonSuperuser(TestCase, WagtailTestUtils):
def setUp(self):
# create a user that should be visible in the listing
self.test_user = self.create_user(
username='testuser',
email='testuser@email.com',
password='password'
)
# create a user with delete permission
self.deleter_user = self.create_user(
username='deleter',
password='password'
)
deleters_group = Group.objects.create(name='User deleters')
deleters_group.permissions.add(Permission.objects.get(codename='access_admin'))
deleters_group.permissions.add(Permission.objects.get(
content_type__app_label=AUTH_USER_APP_LABEL, codename=delete_user_perm_codename
))
self.deleter_user.groups.add(deleters_group)
self.superuser = self.create_test_user()
self.login(username='deleter', password='password')
def test_simple(self):
response = self.client.get(reverse('wagtailusers_users:delete', args=(self.test_user.pk,)))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/users/confirm_delete.html')
def test_delete(self):
response = self.client.post(reverse('wagtailusers_users:delete', args=(self.test_user.pk,)))
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailusers_users:index'))
# Check that the user was deleted
users = get_user_model().objects.filter(email='testuser@email.com')
self.assertEqual(users.count(), 0)
def test_user_cannot_delete_self(self):
response = self.client.post(reverse('wagtailusers_users:delete', args=(self.deleter_user.pk,)))
# Should redirect to admin index (permission denied)
self.assertRedirects(response, reverse('wagtailadmin_home'))
# Check user was not deleted
self.assertTrue(get_user_model().objects.filter(pk=self.deleter_user.pk).exists())
def test_user_cannot_delete_superuser(self):
response = self.client.post(reverse('wagtailusers_users:delete', args=(self.superuser.pk,)))
# Should redirect to admin index (permission denied)
self.assertRedirects(response, reverse('wagtailadmin_home'))
# Check user was not deleted
self.assertTrue(get_user_model().objects.filter(pk=self.superuser.pk).exists())
class TestUserEditView(TestCase, WagtailTestUtils):
def setUp(self):
# Create a user to edit
self.test_user = self.create_user(
username='testuser',
email='testuser@email.com',
first_name='Original',
last_name='User',
password='password'
)
# Login
self.current_user = self.login()
def get(self, params={}, user_id=None):
return self.client.get(reverse('wagtailusers_users:edit', args=(user_id or self.test_user.pk, )), params)
def post(self, post_data={}, user_id=None):
return self.client.post(reverse('wagtailusers_users:edit', args=(user_id or self.test_user.pk, )), post_data)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/users/edit.html')
self.assertContains(response, 'Password:')
self.assertContains(response, 'Password confirmation:')
def test_nonexistant_redirect(self):
invalid_id = '99999999-9999-9999-9999-999999999999' if settings.AUTH_USER_MODEL == 'emailuser.EmailUser' else 100000
self.assertEqual(self.get(user_id=invalid_id).status_code, 404)
def test_simple_post(self):
response = self.post({
'username': "testuser",
'email': "test@user.com",
'first_name': "Edited",
'last_name': "User",
'password1': "newpassword",
'password2': "newpassword",
'is_active': 'on'
})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailusers_users:index'))
# Check that the user was edited
user = get_user_model().objects.get(pk=self.test_user.pk)
self.assertEqual(user.first_name, 'Edited')
self.assertTrue(user.check_password('newpassword'))
def test_password_optional(self):
"""Leaving password fields blank should leave it unchanged"""
response = self.post({
'username': "testuser",
'email': "test@user.com",
'first_name': "Edited",
'last_name': "User",
'password1': "",
'password2': "",
'is_active': 'on'
})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailusers_users:index'))
# Check that the user was edited but password is unchanged
user = get_user_model().objects.get(pk=self.test_user.pk)
self.assertEqual(user.first_name, 'Edited')
self.assertTrue(user.check_password('password'))
def test_passwords_match(self):
"""Password fields should be validated if supplied"""
response = self.post({
'username': "testuser",
'email': "test@user.com",
'first_name': "Edited",
'last_name': "User",
'password1': "banana",
'password2': "kumquat",
'is_active': 'on'
})
# Should remain on page
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/users/edit.html')
self.assertTrue(response.context['form'].errors['password2'])
# Check that the user was not edited
user = get_user_model().objects.get(pk=self.test_user.pk)
self.assertEqual(user.first_name, 'Original')
self.assertTrue(user.check_password('password'))
@override_settings(
AUTH_PASSWORD_VALIDATORS=[
{'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator'},
],
)
def test_edit_with_password_validation(self):
"""
Test that the Django password validators are run when editing a user.
Specifically test that the UserAttributeSimilarityValidator works,
which requires a full-populated user model before the validation works.
"""
# Create a user with a password the same as their name
response = self.post({
'username': "testuser",
'email': "test@user.com",
'first_name': "Edited",
'last_name': "Name",
'password1': "edited name",
'password2': "edited name",
})
# Should remain on page
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/users/edit.html')
# Password field should have an error
errors = response.context['form'].errors.as_data()
self.assertIn('password2', errors)
self.assertEqual(errors['password2'][0].code, 'password_too_similar')
# Check that the user was not edited
user = get_user_model().objects.get(pk=self.test_user.pk)
self.assertEqual(user.first_name, 'Original')
self.assertTrue(user.check_password('password'))
def test_edit_and_deactivate(self):
response = self.post({
'username': "testuser",
'email': "test@user.com",
'first_name': "Edited",
'last_name': "User",
'password1': "password",
'password2': "password",
# Leaving out these fields, thus setting them to False:
# 'is_active': 'on'
# 'is_superuser': 'on',
})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailusers_users:index'))
# Check that the user was edited
user = get_user_model().objects.get(pk=self.test_user.pk)
self.assertEqual(user.first_name, 'Edited')
# Check that the user is no longer superuser
self.assertEqual(user.is_superuser, False)
# Check that the user is no longer active
self.assertEqual(user.is_active, False)
def test_edit_and_make_superuser(self):
response = self.post({
'username': "testuser",
'email': "test@user.com",
'first_name': "Edited",
'last_name': "User",
'password1': "password",
'password2': "password",
'is_active': 'on',
'is_superuser': 'on',
})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailusers_users:index'))
# Check that the user was edited
user = get_user_model().objects.get(pk=self.test_user.pk)
# Check that the user is now superuser
self.assertEqual(user.is_superuser, True)
# Check that the user is now active
self.assertEqual(user.is_active, True)
def test_edit_self(self):
response = self.post({
'username': 'test@email.com',
'email': 'test@email.com',
'first_name': "Edited Myself",
'last_name': "User",
# 'password1': "password",
# 'password2': "password",
'is_active': 'on',
'is_superuser': 'on',
}, self.current_user.pk)
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailusers_users:index'))
# Check that the user was edited
user = get_user_model().objects.get(pk=self.current_user.pk)
self.assertEqual(user.first_name, 'Edited Myself')
# Check that the user is still superuser
self.assertEqual(user.is_superuser, True)
# Check that the user is still active
self.assertEqual(user.is_active, True)
def test_editing_own_password_does_not_log_out(self):
response = self.post({
'username': 'test@email.com',
'email': 'test@email.com',
'first_name': "Edited Myself",
'last_name': "User",
'password1': "c0rrecth0rse",
'password2': "c0rrecth0rse",
'is_active': 'on',
'is_superuser': 'on',
}, self.current_user.pk)
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailusers_users:index'))
# Check that the user was edited
user = get_user_model().objects.get(pk=self.current_user.pk)
self.assertEqual(user.first_name, 'Edited Myself')
# Check user is not logged out
response = self.client.get(reverse('wagtailusers_users:index'))
self.assertEqual(response.status_code, 200)
def test_cannot_demote_self(self):
"""
check that unsetting a user's own is_active or is_superuser flag has no effect
"""
response = self.post({
'username': 'test@email.com',
'email': 'test@email.com',
'first_name': "Edited Myself",
'last_name': "User",
# 'password1': "password",
# 'password2': "password",
# failing to submit is_active or is_superuser would unset those flags,
# if we didn't explicitly prevent that when editing self
# 'is_active': 'on',
# 'is_superuser': 'on',
}, self.current_user.pk)
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailusers_users:index'))
# Check that the user was edited
user = get_user_model().objects.get(pk=self.current_user.pk)
self.assertEqual(user.first_name, 'Edited Myself')
# Check that the user is still superuser
self.assertEqual(user.is_superuser, True)
# Check that the user is still active
self.assertEqual(user.is_active, True)
@unittest.skipUnless(settings.AUTH_USER_MODEL == 'customuser.CustomUser', "Only applicable to CustomUser")
@override_settings(
WAGTAIL_USER_EDIT_FORM='wagtail.users.tests.CustomUserEditForm',
)
def test_edit_with_custom_form(self):
response = self.post({
'username': "testuser",
'email': "test@user.com",
'first_name': "Edited",
'last_name': "User",
'password1': "password",
'password2': "password",
'country': "testcountry",
'attachment': SimpleUploadedFile('test.txt', b"Uploaded file"),
})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailusers_users:index'))
# Check that the user was edited
user = get_user_model().objects.get(pk=self.test_user.pk)
self.assertEqual(user.first_name, 'Edited')
self.assertEqual(user.country, 'testcountry')
self.assertEqual(user.attachment.read(), b"Uploaded file")
@unittest.skipIf(settings.AUTH_USER_MODEL == 'emailuser.EmailUser', "Not applicable to EmailUser")
def test_edit_validation_error(self):
# Leave "username" field blank. This should give a validation error
response = self.post({
'username': "",
'email': "test@user.com",
'first_name': "Teset",
'last_name': "User",
'password1': "password",
'password2': "password",
})
# Should not redirect to index
self.assertEqual(response.status_code, 200)
@override_settings(WAGTAILUSERS_PASSWORD_ENABLED=False)
def test_password_fields_not_shown_when_disabled(self):
"""WAGTAILUSERS_PASSWORD_ENABLED=False should cause password fields to be removed"""
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/users/edit.html')
self.assertNotContains(response, 'Password:')
self.assertNotContains(response, 'Password confirmation:')
@override_settings(WAGTAILUSERS_PASSWORD_ENABLED=False)
def test_password_fields_ignored_when_disabled(self):
"""When WAGTAILUSERS_PASSWORD_REQUIRED is False, existing password should be left unchanged"""
response = self.post({
'username': "testuser",
'email': "test@user.com",
'first_name': "Edited",
'last_name': "User",
'is_active': 'on',
'password1': "banana", # not part of the form - should be ignored
'password2': "kumquat", # not part of the form - should be ignored
})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailusers_users:index'))
# Check that the user was edited but password is unchanged
user = get_user_model().objects.get(pk=self.test_user.pk)
self.assertEqual(user.first_name, 'Edited')
self.assertTrue(user.check_password('password'))
def test_before_edit_user_hook(self):
def hook_func(request, user):
self.assertIsInstance(request, HttpRequest)
self.assertEqual(user.pk, self.test_user.pk)
return HttpResponse("Overridden!")
with self.register_hook('before_edit_user', hook_func):
response = self.client.get(reverse('wagtailusers_users:edit', args=(self.test_user.pk, )))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b"Overridden!")
def test_before_edit_user_hook_post(self):
def hook_func(request, user):
self.assertIsInstance(request, HttpRequest)
self.assertEqual(user.pk, self.test_user.pk)
return HttpResponse("Overridden!")
with self.register_hook('before_edit_user', hook_func):
post_data = {
'username': "testuser",
'email': "test@user.com",
'first_name': "Edited",
'last_name': "User",
'password1': "password",
'password2': "password",
}
response = self.client.post(
reverse('wagtailusers_users:edit', args=(self.test_user.pk, )), post_data
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b"Overridden!")
def test_after_edit_user_hook_post(self):
def hook_func(request, user):
self.assertIsInstance(request, HttpRequest)
self.assertEqual(user.pk, self.test_user.pk)
return HttpResponse("Overridden!")
with self.register_hook('after_edit_user', hook_func):
post_data = {
'username': "testuser",
'email': "test@user.com",
'first_name': "Edited",
'last_name': "User",
'password1': "password",
'password2': "password",
}
response = self.client.post(
reverse('wagtailusers_users:edit', args=(self.test_user.pk, )), post_data
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b"Overridden!")
class TestUserProfileCreation(TestCase, WagtailTestUtils):
def setUp(self):
# Create a user
self.test_user = self.create_user(
username='testuser',
password='password',
)
def test_user_created_without_profile(self):
self.assertEqual(UserProfile.objects.filter(user=self.test_user).count(), 0)
with self.assertRaises(UserProfile.DoesNotExist):
self.test_user.wagtail_userprofile
def test_user_profile_created_when_method_called(self):
self.assertIsInstance(UserProfile.get_for_user(self.test_user), UserProfile)
# and get it from the db too
self.assertEqual(UserProfile.objects.filter(user=self.test_user).count(), 1)
def test_avatar_empty_on_profile_creation(self):
user_profile = UserProfile.get_for_user(self.test_user)
self.assertFalse(user_profile.avatar)
class TestUserEditViewForNonSuperuser(TestCase, WagtailTestUtils):
def setUp(self):
# create a user with edit permission
self.editor_user = self.create_user(
username='editor',
password='password'
)
editors_group = Group.objects.create(name='User editors')
editors_group.permissions.add(Permission.objects.get(codename='access_admin'))
editors_group.permissions.add(Permission.objects.get(
content_type__app_label=AUTH_USER_APP_LABEL, codename=change_user_perm_codename
))
self.editor_user.groups.add(editors_group)
self.login(username='editor', password='password')
def test_user_cannot_escalate_privileges(self):
"""
Check that a non-superuser cannot edit their own is_active or is_superuser flag.
(note: this doesn't necessarily guard against other routes to escalating privileges, such
as creating a new user with is_superuser=True or adding oneself to a group with additional
privileges - the latter will be dealt with by #537)
"""
editors_group = Group.objects.get(name='User editors')
post_data = {
'username': "editor",
'email': "editor@email.com",
'first_name': "Escalating",
'last_name': "User",
'password1': "",
'password2': "",
'groups': [editors_group.id, ],
# These should not be possible without manipulating the form in the DOM:
'is_superuser': 'on',
'is_active': 'on',
}
response = self.client.post(
reverse('wagtailusers_users:edit', args=(self.editor_user.pk, )),
post_data)
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailusers_users:index'))
user = get_user_model().objects.get(pk=self.editor_user.pk)
# check if user is still in the editors group
self.assertTrue(user.groups.filter(name='User editors').exists())
# check that non-permission-related edits went ahead
self.assertEqual(user.first_name, "Escalating")
# Check that the user did not escalate its is_superuser status
self.assertEqual(user.is_superuser, False)
class TestGroupIndexView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
def get(self, params={}):
return self.client.get(reverse('wagtailusers_groups:index'), params)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/groups/index.html')
def test_search(self):
response = self.get({'q': "Hello"})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['search_form']['q'].value(), "Hello")
class TestGroupCreateView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
self.add_doc_permission = Permission.objects.get(
content_type__app_label='wagtaildocs', codename='add_document'
)
self.change_doc_permission = Permission.objects.get(
content_type__app_label='wagtaildocs', codename='change_document'
)
def get(self, params={}):
return self.client.get(reverse('wagtailusers_groups:add'), params)
def post(self, post_data={}):
post_defaults = {
'page_permissions-TOTAL_FORMS': ['0'],
'page_permissions-MAX_NUM_FORMS': ['1000'],
'page_permissions-INITIAL_FORMS': ['0'],
'document_permissions-TOTAL_FORMS': ['0'],
'document_permissions-MAX_NUM_FORMS': ['1000'],
'document_permissions-INITIAL_FORMS': ['0'],
'image_permissions-TOTAL_FORMS': ['0'],
'image_permissions-MAX_NUM_FORMS': ['1000'],
'image_permissions-INITIAL_FORMS': ['0'],
}
for k, v in post_defaults.items():
post_data[k] = post_data.get(k, v)
return self.client.post(reverse('wagtailusers_groups:add'), post_data)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/groups/create.html')
def test_create_group(self):
response = self.post({'name': "test group"})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailusers_groups:index'))
# Check that the user was created
groups = Group.objects.filter(name='test group')
self.assertEqual(groups.count(), 1)
def test_group_create_adding_permissions(self):
response = self.post({
'name': "test group",
'page_permissions-0-page': ['1'],
'page_permissions-0-permission_types': ['edit', 'publish'],
'page_permissions-TOTAL_FORMS': ['1'],
'document_permissions-0-collection': [Collection.get_first_root_node().pk],
'document_permissions-0-permissions': [self.add_doc_permission.pk],
'document_permissions-TOTAL_FORMS': ['1'],
})
self.assertRedirects(response, reverse('wagtailusers_groups:index'))
# The test group now exists, with two page permissions
# and one 'add document' collection permission
new_group = Group.objects.get(name='test group')
self.assertEqual(new_group.page_permissions.all().count(), 2)
self.assertEqual(
new_group.collection_permissions.filter(permission=self.add_doc_permission).count(),
1
)
def test_duplicate_page_permissions_error(self):
# Try to submit multiple page permission entries for the same page
response = self.post({
'name': "test group",
'page_permissions-0-page': ['1'],
'page_permissions-0-permission_types': ['publish'],
'page_permissions-1-page': ['1'],
'page_permissions-1-permission_types': ['edit'],
'page_permissions-TOTAL_FORMS': ['2'],
})
self.assertEqual(response.status_code, 200)
# formset should have a non-form error about the duplication
self.assertTrue(response.context['permission_panels'][0].non_form_errors)
def test_duplicate_document_permissions_error(self):
# Try to submit multiple document permission entries for the same collection
root_collection = Collection.get_first_root_node()
response = self.post({
'name': "test group",
'document_permissions-0-collection': [root_collection.pk],
'document_permissions-0-permissions': [self.add_doc_permission.pk],
'document_permissions-1-collection': [root_collection.pk],
'document_permissions-1-permissions': [self.change_doc_permission.pk],
'document_permissions-TOTAL_FORMS': ['2'],
})
self.assertEqual(response.status_code, 200)
# formset should have a non-form error about the duplication
# (we don't know what index in permission_panels the formset will be,
# so just assert that it happens on at least one permission_panel)
self.assertTrue(
any(
hasattr(panel, 'non_form_errors') and panel.non_form_errors
for panel in response.context['permission_panels']
)
)
def test_can_submit_blank_permission_form(self):
# the formsets for page / collection permissions should gracefully
# handle (and ignore) forms that have been left entirely blank
response = self.post({
'name': "test group",
'page_permissions-0-page': [''],
'page_permissions-TOTAL_FORMS': ['1'],
'document_permissions-0-collection': [''],
'document_permissions-TOTAL_FORMS': ['1'],
})
self.assertRedirects(response, reverse('wagtailusers_groups:index'))
# The test group now exists, with no page / document permissions
new_group = Group.objects.get(name='test group')
self.assertEqual(new_group.page_permissions.all().count(), 0)
self.assertEqual(
new_group.collection_permissions.filter(permission=self.add_doc_permission).count(),
0
)
class TestGroupEditView(TestCase, WagtailTestUtils):
def setUp(self):
# Create a group to edit
self.test_group = Group.objects.create(name='test group')
self.root_page = Page.objects.get(pk=1)
self.root_add_permission = GroupPagePermission.objects.create(page=self.root_page,
permission_type='add',
group=self.test_group)
self.home_page = Page.objects.get(pk=2)
# Get the hook-registered permissions, and add one to this group
self.registered_permissions = Permission.objects.none()
for fn in hooks.get_hooks('register_permissions'):
self.registered_permissions = self.registered_permissions | fn()
self.existing_permission = self.registered_permissions.order_by('pk')[0]
self.another_permission = self.registered_permissions.order_by('pk')[1]
self.test_group.permissions.add(self.existing_permission)
# set up collections to test document permissions
self.root_collection = Collection.get_first_root_node()
self.evil_plans_collection = self.root_collection.add_child(name="Evil plans")
self.add_doc_permission = Permission.objects.get(
content_type__app_label='wagtaildocs', codename='add_document'
)
self.change_doc_permission = Permission.objects.get(
content_type__app_label='wagtaildocs', codename='change_document'
)
GroupCollectionPermission.objects.create(
group=self.test_group,
collection=self.evil_plans_collection,
permission=self.add_doc_permission,
)
# Login
self.login()
def get(self, params={}, group_id=None):
return self.client.get(reverse('wagtailusers_groups:edit', args=(group_id or self.test_group.pk, )), params)
def post(self, post_data={}, group_id=None):
post_defaults = {
'name': 'test group',
'permissions': [self.existing_permission.pk],
'page_permissions-TOTAL_FORMS': ['1'],
'page_permissions-MAX_NUM_FORMS': ['1000'],
'page_permissions-INITIAL_FORMS': ['1'],
'page_permissions-0-page': [self.root_page.pk],
'page_permissions-0-permission_types': ['add'],
'document_permissions-TOTAL_FORMS': ['1'],
'document_permissions-MAX_NUM_FORMS': ['1000'],
'document_permissions-INITIAL_FORMS': ['1'],
'document_permissions-0-collection': [self.evil_plans_collection.pk],
'document_permissions-0-permissions': [self.add_doc_permission.pk],
'image_permissions-TOTAL_FORMS': ['0'],
'image_permissions-MAX_NUM_FORMS': ['1000'],
'image_permissions-INITIAL_FORMS': ['0'],
}
for k, v in post_defaults.items():
post_data[k] = post_data.get(k, v)
return self.client.post(reverse(
'wagtailusers_groups:edit', args=(group_id or self.test_group.pk, )), post_data)
def add_non_registered_perm(self):
# Some groups may have django permissions assigned that are not
# hook-registered as part of the wagtail interface. We need to ensure
# that these permissions are not overwritten by our views.
# Tests that use this method are testing the aforementioned
# functionality.
self.non_registered_perms = Permission.objects.exclude(pk__in=self.registered_permissions)
self.non_registered_perm = self.non_registered_perms[0]
self.test_group.permissions.add(self.non_registered_perm)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/groups/edit.html')
def test_nonexistant_group_redirect(self):
self.assertEqual(self.get(group_id=100000).status_code, 404)
def test_group_edit(self):
response = self.post({'name': "test group edited"})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailusers_groups:index'))
# Check that the group was edited
group = Group.objects.get(pk=self.test_group.pk)
self.assertEqual(group.name, 'test group edited')
def test_group_edit_validation_error(self):
# Leave "name" field blank. This should give a validation error
response = self.post({'name': ""})
# Should not redirect to index
self.assertEqual(response.status_code, 200)
def test_group_edit_adding_page_permissions_same_page(self):
# The test group has one page permission to begin with - 'add' permission on root.
# Add two additional permission types on the root page
self.assertEqual(self.test_group.page_permissions.count(), 1)
response = self.post({
'page_permissions-0-permission_types': ['add', 'publish', 'edit'],
})
self.assertRedirects(response, reverse('wagtailusers_groups:index'))
# The test group now has three page permissions
self.assertEqual(self.test_group.page_permissions.count(), 3)
def test_group_edit_adding_document_permissions_same_collection(self):
# The test group has one document permission to begin with -
# 'add' permission on evil_plans.
# Add 'change' permission on evil_plans
self.assertEqual(
self.test_group.collection_permissions.filter(
permission__content_type__app_label='wagtaildocs'
).count(),
1
)
response = self.post({
'document_permissions-0-permissions': [
self.add_doc_permission.pk, self.change_doc_permission.pk
],
})
self.assertRedirects(response, reverse('wagtailusers_groups:index'))
# The test group now has two document permissions
self.assertEqual(
self.test_group.collection_permissions.filter(
permission__content_type__app_label='wagtaildocs'
).count(),
2
)
def test_group_edit_adding_document_permissions_different_collection(self):
# The test group has one document permission to begin with -
# 'add' permission on evil_plans.
# Add 'add' and 'change' permission on the root collection
self.assertEqual(
self.test_group.collection_permissions.filter(
permission__content_type__app_label='wagtaildocs'
).count(),
1
)
response = self.post({
'document_permissions-TOTAL_FORMS': ['2'],
'document_permissions-1-collection': [self.root_collection.pk],
'document_permissions-1-permissions': [
self.add_doc_permission.pk, self.change_doc_permission.pk
],
})
self.assertRedirects(response, reverse('wagtailusers_groups:index'))
# The test group now has three document permissions
self.assertEqual(
self.test_group.collection_permissions.filter(
permission__content_type__app_label='wagtaildocs'
).count(),
3
)
def test_group_edit_deleting_page_permissions(self):
# The test group has one page permission to begin with
self.assertEqual(self.test_group.page_permissions.count(), 1)
response = self.post({
'page_permissions-0-DELETE': ['1'],
})
self.assertRedirects(response, reverse('wagtailusers_groups:index'))
# The test group now has zero page permissions
self.assertEqual(self.test_group.page_permissions.count(), 0)
def test_group_edit_deleting_document_permissions(self):
# The test group has one document permission to begin with
self.assertEqual(
self.test_group.collection_permissions.filter(
permission__content_type__app_label='wagtaildocs'
).count(),
1
)
response = self.post({
'document_permissions-0-DELETE': ['1'],
})
self.assertRedirects(response, reverse('wagtailusers_groups:index'))
# The test group now has zero document permissions
self.assertEqual(
self.test_group.collection_permissions.filter(
permission__content_type__app_label='wagtaildocs'
).count(),
0
)
def test_group_edit_loads_with_django_permissions_shown(self):
# the checkbox for self.existing_permission should be ticked
response = self.get()
# use allow_extra_attrs because the input will also have an id (with an unpredictable value)
self.assertTagInHTML(
'<input name="permissions" type="checkbox" checked value="%s">' % self.existing_permission.id,
str(response.content),
allow_extra_attrs=True)
def test_group_edit_displays_collection_nesting(self):
# Add a child collection to Evil Plans.
self.evil_plans_collection.add_child(instance=Collection(name='Eviler Plans'))
response = self.get()
# "Eviler Plans" should be prefixed with ↳ (↳) and exactly 4 non-breaking spaces
# after the <option> tag.
# There are 3 instances because it appears twice in the form template javascript.
self.assertContains(response, '> ↳ Eviler Plans', count=3)
def test_group_edit_loads_with_page_permissions_shown(self):
# The test group has one page permission to begin with
self.assertEqual(self.test_group.page_permissions.count(), 1)
response = self.get()
page_permissions_formset = response.context['permission_panels'][0]
self.assertEqual(
page_permissions_formset.management_form['INITIAL_FORMS'].value(),
1
)
self.assertEqual(
page_permissions_formset.forms[0]['page'].value(),
self.root_page.pk
)
self.assertEqual(
page_permissions_formset.forms[0]['permission_types'].value(),
['add']
)
# add edit permission on root
GroupPagePermission.objects.create(
page=self.root_page, permission_type='edit', group=self.test_group
)
# The test group now has two page permissions on root (but only one form covering both)
self.assertEqual(self.test_group.page_permissions.count(), 2)
# Reload the page and check the form instances
response = self.get()
page_permissions_formset = response.context['permission_panels'][0]
self.assertEqual(page_permissions_formset.management_form['INITIAL_FORMS'].value(), 1)
self.assertEqual(len(page_permissions_formset.forms), 1)
self.assertEqual(
page_permissions_formset.forms[0]['page'].value(),
self.root_page.pk
)
self.assertEqual(
set(page_permissions_formset.forms[0]['permission_types'].value()),
set(['add', 'edit'])
)
# add edit permission on home
GroupPagePermission.objects.create(
page=self.home_page, permission_type='edit', group=self.test_group
)
# The test group now has three page permissions, over two forms
self.assertEqual(self.test_group.page_permissions.count(), 3)
# Reload the page and check the form instances
response = self.get()
page_permissions_formset = response.context['permission_panels'][0]
self.assertEqual(page_permissions_formset.management_form['INITIAL_FORMS'].value(), 2)
self.assertEqual(
page_permissions_formset.forms[0]['page'].value(),
self.root_page.pk
)
self.assertEqual(
set(page_permissions_formset.forms[0]['permission_types'].value()),
set(['add', 'edit'])
)
self.assertEqual(
page_permissions_formset.forms[1]['page'].value(),
self.home_page.pk
)
self.assertEqual(
page_permissions_formset.forms[1]['permission_types'].value(),
['edit']
)
def test_duplicate_page_permissions_error(self):
# Try to submit multiple page permission entries for the same page
response = self.post({
'page_permissions-1-page': [self.root_page.pk],
'page_permissions-1-permission_types': ['edit'],
'page_permissions-TOTAL_FORMS': ['2'],
})
self.assertEqual(response.status_code, 200)
# the formset should have a non-form error
self.assertTrue(response.context['permission_panels'][0].non_form_errors)
def test_duplicate_document_permissions_error(self):
# Try to submit multiple document permission entries for the same collection
response = self.post({
'document_permissions-1-page': [self.evil_plans_collection.pk],
'document_permissions-1-permissions': [self.change_doc_permission],
'document_permissions-TOTAL_FORMS': ['2'],
})
self.assertEqual(response.status_code, 200)
# the formset should have a non-form error
self.assertTrue(
any(
hasattr(panel, 'non_form_errors') and panel.non_form_errors
for panel in response.context['permission_panels']
)
)
def test_group_add_registered_django_permissions(self):
# The test group has one django permission to begin with
self.assertEqual(self.test_group.permissions.count(), 1)
response = self.post({
'permissions': [self.existing_permission.pk, self.another_permission.pk]
})
self.assertRedirects(response, reverse('wagtailusers_groups:index'))
self.assertEqual(self.test_group.permissions.count(), 2)
def test_group_retains_non_registered_permissions_when_editing(self):
self.add_non_registered_perm()
original_permissions = list(self.test_group.permissions.all()) # list() to force evaluation
# submit the form with no changes (only submitting the existing
# permission, as in the self.post function definition)
self.post()
# See that the group has the same permissions as before
self.assertEqual(list(self.test_group.permissions.all()), original_permissions)
self.assertEqual(self.test_group.permissions.count(), 2)
def test_group_retains_non_registered_permissions_when_adding(self):
self.add_non_registered_perm()
# Add a second registered permission
self.post({
'permissions': [self.existing_permission.pk, self.another_permission.pk]
})
# See that there are now three permissions in total
self.assertEqual(self.test_group.permissions.count(), 3)
# ...including the non-registered one
self.assertIn(self.non_registered_perm, self.test_group.permissions.all())
def test_group_retains_non_registered_permissions_when_deleting(self):
self.add_non_registered_perm()
# Delete all registered permissions
self.post({'permissions': []})
# See that the non-registered permission is still there
self.assertEqual(self.test_group.permissions.count(), 1)
self.assertEqual(self.test_group.permissions.all()[0], self.non_registered_perm)
class TestGroupViewSet(TestCase):
def setUp(self):
self.app_config = apps.get_app_config('wagtailusers')
def test_get_group_viewset_cls(self):
self.assertIs(get_group_viewset_cls(self.app_config), GroupViewSet)
def test_get_group_viewset_cls_with_custom_form(self):
with unittest.mock.patch.object(
self.app_config, 'group_viewset', new='wagtail.users.tests.CustomGroupViewSet'
):
group_viewset = get_group_viewset_cls(self.app_config)
self.assertIs(group_viewset, CustomGroupViewSet)
self.assertEqual(group_viewset.icon, 'custom-icon')
def test_get_group_viewset_cls_custom_form_invalid_value(self):
with unittest.mock.patch.object(self.app_config, 'group_viewset', new='asdfasdf'):
with self.assertRaises(ImproperlyConfigured) as exc_info:
get_group_viewset_cls(self.app_config)
self.assertIn(
"asdfasdf doesn't look like a module path", str(exc_info.exception)
)
def test_get_group_viewset_cls_custom_form_does_not_exist(self):
with unittest.mock.patch.object(
self.app_config, 'group_viewset', new='wagtail.users.tests.CustomClassDoesNotExist'
):
with self.assertRaises(ImproperlyConfigured) as exc_info:
get_group_viewset_cls(self.app_config)
self.assertIn(
'Module "wagtail.users.tests" does not define a "CustomClassDoesNotExist" attribute/class',
str(exc_info.exception)
)
| 40.85151
| 126
| 0.645335
|
db449fd9ce4c69c6ffec242a6b307280e77d3f17
| 131
|
py
|
Python
|
app.py
|
RuneHistory/runehistory-api
|
4e857c7fdbdf585d57cf4c7fe6214b565ac37a22
|
[
"MIT"
] | null | null | null |
app.py
|
RuneHistory/runehistory-api
|
4e857c7fdbdf585d57cf4c7fe6214b565ac37a22
|
[
"MIT"
] | 6
|
2018-06-14T13:58:43.000Z
|
2018-07-16T14:02:24.000Z
|
app.py
|
RuneHistory/runehistory-api
|
4e857c7fdbdf585d57cf4c7fe6214b565ac37a22
|
[
"MIT"
] | null | null | null |
from runehistory_api import make_app
app = make_app(__name__)
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=True)
| 18.714286
| 39
| 0.709924
|
2110f9c5fb79d3f73918a8c8d78043a4027f0ed6
| 4,236
|
py
|
Python
|
backend/api/migrations/0035_auto_20191105_1851.py
|
Kovszasz/MYG
|
fc932bef8b67d568ac60bba5604009550570fca9
|
[
"MIT"
] | null | null | null |
backend/api/migrations/0035_auto_20191105_1851.py
|
Kovszasz/MYG
|
fc932bef8b67d568ac60bba5604009550570fca9
|
[
"MIT"
] | 7
|
2020-06-06T00:58:09.000Z
|
2022-02-26T20:03:02.000Z
|
backend/api/migrations/0035_auto_20191105_1851.py
|
Kovszasz/MYG
|
fc932bef8b67d568ac60bba5604009550570fca9
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.7 on 2019-11-05 18:51
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('auth', '0011_update_proxy_permissions'),
('api', '0034_post_ispublic'),
]
operations = [
migrations.CreateModel(
name='MygUser',
fields=[
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL)),
('profile_pic', models.ImageField(blank=True, default='/media/profile/e2.png', upload_to='profile')),
('sex', models.BooleanField(default=True)),
('dad', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='DAD', to='api.MygUser')),
('mom', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='MOM', to='api.MygUser')),
],
),
migrations.CreateModel(
name='Sibling',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sibling1', models.ForeignKey(on_delete=False, related_name='User', to='api.MygUser')),
('sibling2', models.ForeignKey(on_delete=False, related_name='Sibling', to='api.MygUser')),
],
),
migrations.RemoveField(
model_name='action',
name='post',
),
migrations.RemoveField(
model_name='action',
name='user',
),
migrations.RemoveField(
model_name='advertsettings',
name='admin',
),
migrations.RemoveField(
model_name='comment',
name='post',
),
migrations.RemoveField(
model_name='comment',
name='user',
),
migrations.RemoveField(
model_name='follow',
name='channel',
),
migrations.RemoveField(
model_name='follow',
name='follower',
),
migrations.RemoveField(
model_name='memecontent',
name='post',
),
migrations.DeleteModel(
name='Message',
),
migrations.RemoveField(
model_name='mimeuser',
name='user',
),
migrations.DeleteModel(
name='Mods',
),
migrations.RemoveField(
model_name='personalscoringprofile',
name='label',
),
migrations.RemoveField(
model_name='personalscoringprofile',
name='user',
),
migrations.RemoveField(
model_name='post',
name='user',
),
migrations.RemoveField(
model_name='postlabelling',
name='label',
),
migrations.RemoveField(
model_name='postlabelling',
name='post',
),
migrations.RemoveField(
model_name='template',
name='user',
),
migrations.RemoveField(
model_name='timeline',
name='content_post',
),
migrations.DeleteModel(
name='Action',
),
migrations.DeleteModel(
name='AdvertSettings',
),
migrations.DeleteModel(
name='Comment',
),
migrations.DeleteModel(
name='Follow',
),
migrations.DeleteModel(
name='Label',
),
migrations.DeleteModel(
name='MemeContent',
),
migrations.DeleteModel(
name='MimeUser',
),
migrations.DeleteModel(
name='PersonalScoringProfile',
),
migrations.DeleteModel(
name='Post',
),
migrations.DeleteModel(
name='PostLabelling',
),
migrations.DeleteModel(
name='Template',
),
migrations.DeleteModel(
name='TimeLine',
),
]
| 30.042553
| 156
| 0.524315
|
de764e7db93965f80056ec9ad57a7348c1e00dd7
| 15,004
|
py
|
Python
|
python/ray/tests/test_command_runner.py
|
77loopin/ray
|
9322f6aab53f4ca5baf5a3573e1ffde12feae519
|
[
"Apache-2.0"
] | 39
|
2021-02-02T23:09:31.000Z
|
2022-03-28T16:39:12.000Z
|
python/ray/tests/test_command_runner.py
|
77loopin/ray
|
9322f6aab53f4ca5baf5a3573e1ffde12feae519
|
[
"Apache-2.0"
] | 77
|
2021-06-05T07:04:56.000Z
|
2022-03-26T07:04:33.000Z
|
python/ray/tests/test_command_runner.py
|
77loopin/ray
|
9322f6aab53f4ca5baf5a3573e1ffde12feae519
|
[
"Apache-2.0"
] | 20
|
2021-02-05T05:51:39.000Z
|
2022-03-04T21:13:24.000Z
|
import logging
import pytest
from unittest.mock import patch
from ray.tests.test_autoscaler import MockProvider, MockProcessRunner
from ray.autoscaler.command_runner import CommandRunnerInterface
from ray.autoscaler._private.command_runner import SSHCommandRunner, \
DockerCommandRunner, KubernetesCommandRunner, _with_environment_variables
from ray.autoscaler.sdk import get_docker_host_mount_location
from getpass import getuser
import hashlib
auth_config = {
"ssh_user": "ray",
"ssh_private_key": "8265.pem",
}
def test_environment_variable_encoder_strings():
env_vars = {"var1": "quote between this \" and this", "var2": "123"}
res = _with_environment_variables("echo hello", env_vars)
expected = """export var1='"quote between this \\" and this"';export var2='"123"';echo hello""" # noqa: E501
assert res == expected
def test_environment_variable_encoder_dict():
env_vars = {"value1": "string1", "value2": {"a": "b", "c": 2}}
res = _with_environment_variables("echo hello", env_vars)
expected = """export value1='"string1"';export value2='{"a":"b","c":2}';echo hello""" # noqa: E501
assert res == expected
def test_command_runner_interface_abstraction_violation():
"""Enforces the CommandRunnerInterface functions on the subclasses.
This is important to make sure the subclasses do not violate the
function abstractions. If you need to add a new function to one of
the CommandRunnerInterface subclasses, you have to add it to
CommandRunnerInterface and all of its subclasses.
"""
cmd_runner_interface_public_functions = dir(CommandRunnerInterface)
allowed_public_interface_functions = {
func
for func in cmd_runner_interface_public_functions
if not func.startswith("_")
}
for subcls in [
SSHCommandRunner, DockerCommandRunner, KubernetesCommandRunner
]:
subclass_available_functions = dir(subcls)
subclass_public_functions = {
func
for func in subclass_available_functions
if not func.startswith("_")
}
assert allowed_public_interface_functions == subclass_public_functions
def test_ssh_command_runner():
process_runner = MockProcessRunner()
provider = MockProvider()
provider.create_node({}, {}, 1)
cluster_name = "cluster"
ssh_control_hash = hashlib.md5(cluster_name.encode()).hexdigest()
ssh_user_hash = hashlib.md5(getuser().encode()).hexdigest()
ssh_control_path = "/tmp/ray_ssh_{}/{}".format(ssh_user_hash[:10],
ssh_control_hash[:10])
args = {
"log_prefix": "prefix",
"node_id": 0,
"provider": provider,
"auth_config": auth_config,
"cluster_name": cluster_name,
"process_runner": process_runner,
"use_internal_ip": False,
}
cmd_runner = SSHCommandRunner(**args)
env_vars = {"var1": "quote between this \" and this", "var2": "123"}
cmd_runner.run(
"echo helloo",
port_forward=[(8265, 8265)],
environment_variables=env_vars)
expected = [
"ssh",
"-tt",
"-L",
"8265:localhost:8265",
"-i",
"8265.pem",
"-o",
"StrictHostKeyChecking=no",
"-o",
"UserKnownHostsFile=/dev/null",
"-o",
"IdentitiesOnly=yes",
"-o",
"ExitOnForwardFailure=yes",
"-o",
"ServerAliveInterval=5",
"-o",
"ServerAliveCountMax=3",
"-o",
"ControlMaster=auto",
"-o",
"ControlPath={}/%C".format(ssh_control_path),
"-o",
"ControlPersist=10s",
"-o",
"ConnectTimeout=120s",
"ray@1.2.3.4",
"bash",
"--login",
"-c",
"-i",
"""'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && (export var1='"'"'"quote between this \\" and this"'"'"';export var2='"'"'"123"'"'"';echo helloo)'""" # noqa: E501
]
# Much easier to debug this loop than the function call.
for x, y in zip(process_runner.calls[0], expected):
assert x == y
process_runner.assert_has_call("1.2.3.4", exact=expected)
def test_kubernetes_command_runner():
fail_cmd = "fail command"
process_runner = MockProcessRunner([fail_cmd])
provider = MockProvider()
provider.create_node({}, {}, 1)
args = {
"log_prefix": "prefix",
"namespace": "namespace",
"node_id": 0,
"auth_config": auth_config,
"process_runner": process_runner,
}
cmd_runner = KubernetesCommandRunner(**args)
env_vars = {"var1": "quote between this \" and this", "var2": "123"}
cmd_runner.run("echo helloo", environment_variables=env_vars)
expected = [
"kubectl",
"-n",
"namespace",
"exec",
"-it",
"0",
"--",
"bash",
"--login",
"-c",
"-i",
"""\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && (export var1=\'"\'"\'"quote between this \\" and this"\'"\'"\';export var2=\'"\'"\'"123"\'"\'"\';echo helloo)\'""" # noqa: E501
]
assert process_runner.calls[0] == " ".join(expected)
logger = logging.getLogger("ray.autoscaler._private.command_runner")
with pytest.raises(SystemExit) as pytest_wrapped_e, patch.object(
logger, "error") as mock_logger_error:
cmd_runner.run(fail_cmd, exit_on_fail=True)
failed_cmd_expected = f'prefixCommand failed: \n\n kubectl -n namespace exec -it 0 --\'bash --login -c -i \'"\'"\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && ({fail_cmd})\'"\'"\'\'\n' # noqa: E501
mock_logger_error.assert_called_once_with(failed_cmd_expected)
assert pytest_wrapped_e.type == SystemExit
assert pytest_wrapped_e.value.code == 1
def test_docker_command_runner():
process_runner = MockProcessRunner()
provider = MockProvider()
provider.create_node({}, {}, 1)
cluster_name = "cluster"
ssh_control_hash = hashlib.md5(cluster_name.encode()).hexdigest()
ssh_user_hash = hashlib.md5(getuser().encode()).hexdigest()
ssh_control_path = "/tmp/ray_ssh_{}/{}".format(ssh_user_hash[:10],
ssh_control_hash[:10])
docker_config = {"container_name": "container"}
args = {
"log_prefix": "prefix",
"node_id": 0,
"provider": provider,
"auth_config": auth_config,
"cluster_name": cluster_name,
"process_runner": process_runner,
"use_internal_ip": False,
"docker_config": docker_config,
}
cmd_runner = DockerCommandRunner(**args)
assert len(process_runner.calls) == 0, "No calls should be made in ctor"
env_vars = {"var1": "quote between this \" and this", "var2": "123"}
cmd_runner.run("echo hello", environment_variables=env_vars)
# This string is insane because there are an absurd number of embedded
# quotes. While this is a ridiculous string, the escape behavior is
# important and somewhat difficult to get right for environment variables.
cmd = """'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && (docker exec -it container /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && (export var1='"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"quote between this \\" and this"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"';export var2='"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"123"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"';echo hello)'"'"'"'"'"'"'"'"''"'"' )'""" # noqa: E501
expected = [
"ssh", "-tt", "-i", "8265.pem", "-o", "StrictHostKeyChecking=no", "-o",
"UserKnownHostsFile=/dev/null", "-o", "IdentitiesOnly=yes", "-o",
"ExitOnForwardFailure=yes", "-o", "ServerAliveInterval=5", "-o",
"ServerAliveCountMax=3", "-o", "ControlMaster=auto", "-o",
"ControlPath={}/%C".format(ssh_control_path), "-o",
"ControlPersist=10s", "-o", "ConnectTimeout=120s", "ray@1.2.3.4",
"bash", "--login", "-c", "-i", cmd
]
# Much easier to debug this loop than the function call.
for x, y in zip(process_runner.calls[0], expected):
print(f"expeted:\t{y}")
print(f"actual: \t{x}")
assert x == y
process_runner.assert_has_call("1.2.3.4", exact=expected)
def test_docker_rsync():
process_runner = MockProcessRunner()
provider = MockProvider()
provider.create_node({}, {}, 1)
cluster_name = "cluster"
docker_config = {"container_name": "container"}
args = {
"log_prefix": "prefix",
"node_id": 0,
"provider": provider,
"auth_config": auth_config,
"cluster_name": cluster_name,
"process_runner": process_runner,
"use_internal_ip": False,
"docker_config": docker_config,
}
cmd_runner = DockerCommandRunner(**args)
local_mount = "/home/ubuntu/base/mount/"
remote_mount = "/root/protected_mount/"
docker_mount_prefix = get_docker_host_mount_location(cluster_name)
remote_host_mount = f"{docker_mount_prefix}{remote_mount}"
local_file = "/home/ubuntu/base-file"
remote_file = "/root/protected-file"
remote_host_file = f"{docker_mount_prefix}{remote_file}"
process_runner.respond_to_call("docker inspect -f", ["true"])
cmd_runner.run_rsync_up(
local_mount, remote_mount, options={"docker_mount_if_possible": True})
# Make sure we do not copy directly to raw destination
process_runner.assert_not_has_call(
"1.2.3.4", pattern=f"-avz {local_mount} ray@1.2.3.4:{remote_mount}")
process_runner.assert_not_has_call(
"1.2.3.4", pattern=f"mkdir -p {remote_mount}")
# No docker cp for file_mounts
process_runner.assert_not_has_call("1.2.3.4", pattern="docker cp")
process_runner.assert_has_call(
"1.2.3.4",
pattern=f"-avz {local_mount} ray@1.2.3.4:{remote_host_mount}")
process_runner.clear_history()
##############################
process_runner.respond_to_call("docker inspect -f", ["true"])
cmd_runner.run_rsync_up(
local_file, remote_file, options={"docker_mount_if_possible": False})
# Make sure we do not copy directly to raw destination
process_runner.assert_not_has_call(
"1.2.3.4", pattern=f"-avz {local_file} ray@1.2.3.4:{remote_file}")
process_runner.assert_not_has_call(
"1.2.3.4", pattern=f"mkdir -p {remote_file}")
process_runner.assert_has_call("1.2.3.4", pattern="docker cp")
process_runner.assert_has_call(
"1.2.3.4", pattern=f"-avz {local_file} ray@1.2.3.4:{remote_host_file}")
process_runner.clear_history()
##############################
cmd_runner.run_rsync_down(
remote_mount, local_mount, options={"docker_mount_if_possible": True})
process_runner.assert_not_has_call("1.2.3.4", pattern="docker cp")
process_runner.assert_not_has_call(
"1.2.3.4", pattern=f"-avz ray@1.2.3.4:{remote_mount} {local_mount}")
process_runner.assert_has_call(
"1.2.3.4",
pattern=f"-avz ray@1.2.3.4:{remote_host_mount} {local_mount}")
process_runner.clear_history()
##############################
cmd_runner.run_rsync_down(
remote_file, local_file, options={"docker_mount_if_possible": False})
process_runner.assert_has_call("1.2.3.4", pattern="docker cp")
process_runner.assert_not_has_call(
"1.2.3.4", pattern=f"-avz ray@1.2.3.4:{remote_file} {local_file}")
process_runner.assert_has_call(
"1.2.3.4", pattern=f"-avz ray@1.2.3.4:{remote_host_file} {local_file}")
def test_rsync_exclude_and_filter():
process_runner = MockProcessRunner()
provider = MockProvider()
provider.create_node({}, {}, 1)
cluster_name = "cluster"
args = {
"log_prefix": "prefix",
"node_id": 0,
"provider": provider,
"auth_config": auth_config,
"cluster_name": cluster_name,
"process_runner": process_runner,
"use_internal_ip": False,
}
cmd_runner = SSHCommandRunner(**args)
local_mount = "/home/ubuntu/base/mount/"
remote_mount = "/root/protected_mount/"
process_runner.respond_to_call("docker inspect -f", ["true"])
cmd_runner.run_rsync_up(
local_mount,
remote_mount,
options={
"docker_mount_if_possible": True,
"rsync_exclude": ["test"],
"rsync_filter": [".ignore"]
})
process_runner.assert_has_call(
"1.2.3.4", pattern="--exclude test --filter dir-merge,- .ignore")
def test_rsync_without_exclude_and_filter():
process_runner = MockProcessRunner()
provider = MockProvider()
provider.create_node({}, {}, 1)
cluster_name = "cluster"
args = {
"log_prefix": "prefix",
"node_id": 0,
"provider": provider,
"auth_config": auth_config,
"cluster_name": cluster_name,
"process_runner": process_runner,
"use_internal_ip": False,
}
cmd_runner = SSHCommandRunner(**args)
local_mount = "/home/ubuntu/base/mount/"
remote_mount = "/root/protected_mount/"
process_runner.respond_to_call("docker inspect -f", ["true"])
cmd_runner.run_rsync_up(
local_mount,
remote_mount,
options={
"docker_mount_if_possible": True,
})
process_runner.assert_not_has_call("1.2.3.4", pattern="--exclude test")
process_runner.assert_not_has_call(
"1.2.3.4", pattern="--filter dir-merge,- .ignore")
@pytest.mark.parametrize("run_option_type",
["run_options", "head_run_options"])
def test_docker_shm_override(run_option_type):
process_runner = MockProcessRunner()
provider = MockProvider()
provider.create_node({}, {}, 1)
cluster_name = "cluster"
docker_config = {
"container_name": "container",
"image": "rayproject/ray:latest",
run_option_type: ["--shm-size=80g"]
}
args = {
"log_prefix": "prefix",
"node_id": 0,
"provider": provider,
"auth_config": auth_config,
"cluster_name": cluster_name,
"process_runner": process_runner,
"use_internal_ip": False,
"docker_config": docker_config,
}
cmd_runner = DockerCommandRunner(**args)
process_runner.respond_to_call("json .Config.Env", 2 * ["[]"])
cmd_runner.run_init(as_head=True, file_mounts={}, sync_run_yet=True)
# Ensure the user-provided SHM size is used.
process_runner.assert_has_call("1.2.3.4", pattern="--shm-size=80g")
# Ensure that SHM auto detection is bypassed
process_runner.assert_not_has_call("1.2.3.4", pattern="/proc/meminfo")
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| 36.864865
| 582
| 0.623834
|
0468f5539682f07b189c296e7886ce4965d0d697
| 18,063
|
py
|
Python
|
python/GafferImageTest/MergeTest.py
|
Tuftux/gaffer
|
5acaf7cbfadbae841dc06854121ca85dcc5c338c
|
[
"BSD-3-Clause"
] | 31
|
2017-07-10T10:02:07.000Z
|
2022-02-08T13:54:14.000Z
|
python/GafferImageTest/MergeTest.py
|
Tuftux/gaffer
|
5acaf7cbfadbae841dc06854121ca85dcc5c338c
|
[
"BSD-3-Clause"
] | null | null | null |
python/GafferImageTest/MergeTest.py
|
Tuftux/gaffer
|
5acaf7cbfadbae841dc06854121ca85dcc5c338c
|
[
"BSD-3-Clause"
] | 3
|
2017-11-04T15:30:11.000Z
|
2018-09-25T18:36:11.000Z
|
##########################################################################
#
# Copyright (c) 2013-2015, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import unittest
import six
import imath
import IECore
import Gaffer
import GafferTest
import GafferImage
import GafferImageTest
class MergeTest( GafferImageTest.ImageTestCase ) :
rPath = os.path.expandvars( "$GAFFER_ROOT/python/GafferImageTest/images/redWithDataWindow.100x100.exr" )
gPath = os.path.expandvars( "$GAFFER_ROOT/python/GafferImageTest/images/greenWithDataWindow.100x100.exr" )
bPath = os.path.expandvars( "$GAFFER_ROOT/python/GafferImageTest/images/blueWithDataWindow.100x100.exr" )
checkerPath = os.path.expandvars( "$GAFFER_ROOT/python/GafferImageTest/images/checkerboard.100x100.exr" )
checkerRGBPath = os.path.expandvars( "$GAFFER_ROOT/python/GafferImageTest/images/rgbOverChecker.100x100.exr" )
rgbPath = os.path.expandvars( "$GAFFER_ROOT/python/GafferImageTest/images/rgb.100x100.exr" )
# Do several tests to check the cache is working correctly:
def testHashes( self ) :
r1 = GafferImage.ImageReader()
r1["fileName"].setValue( self.checkerPath )
r2 = GafferImage.ImageReader()
r2["fileName"].setValue( self.gPath )
##########################################
# Test to see if the hash changes.
##########################################
merge = GafferImage.Merge()
merge["operation"].setValue( GafferImage.Merge.Operation.Over )
merge["in"][0].setInput( r1["out"] )
merge["in"][1].setInput( r2["out"] )
h1 = GafferImage.ImageAlgo.imageHash( merge["out"] )
# Switch the inputs.
merge["in"][1].setInput( r1["out"] )
merge["in"][0].setInput( r2["out"] )
h2 = GafferImage.ImageAlgo.imageHash( merge["out"] )
self.assertNotEqual( h1, h2 )
##########################################
# Test to see if the hash remains the same
# when the output should be the same but the
# input plugs used are not.
##########################################
merge = GafferImage.Merge()
merge["operation"].setValue( GafferImage.Merge.Operation.Over )
expectedHash = h1
# Connect up a load of inputs ...
merge["in"][0].setInput( r1["out"] )
merge["in"][1].setInput( r1["out"] )
merge["in"][2].setInput( r1["out"] )
merge["in"][3].setInput( r2["out"] )
# but then disconnect two so that the result should still be the same...
merge["in"][1].setInput( None )
merge["in"][2].setInput( None )
h1 = GafferImage.ImageAlgo.imageHash( merge["out"] )
self.assertEqual( h1, expectedHash )
# The pass through for disabled is working, but I don't see any sign that a pass through
# for a single input was ever implemented. ( For a long time this test was broken )
@unittest.expectedFailure
def testHashPassThrough( self ) :
r1 = GafferImage.ImageReader()
r1["fileName"].setValue( self.checkerPath )
##########################################
# Test to see if the input hash is always passed
# through if only the first input is connected.
##########################################
merge = GafferImage.Merge()
merge["operation"].setValue( GafferImage.Merge.Operation.Over )
expectedHash = GafferImage.ImageAlgo.imageHash( r1["out"] )
merge["in"][0].setInput( r1["out"] )
h1 = GafferImage.ImageAlgo.imageHash( merge["out"] )
self.assertEqual( h1, expectedHash )
##########################################
# Test that if we disable the node the hash gets passed through.
##########################################
merge["enabled"].setValue(False)
h1 = GafferImage.ImageAlgo.imageHash( merge["out"] )
self.assertEqual( h1, expectedHash )
# Overlay a red, green and blue tile of different data window sizes and check the data window is expanded on the result and looks as we expect.
def testOverRGBA( self ) :
r = GafferImage.ImageReader()
r["fileName"].setValue( self.rPath )
g = GafferImage.ImageReader()
g["fileName"].setValue( self.gPath )
b = GafferImage.ImageReader()
b["fileName"].setValue( self.bPath )
merge = GafferImage.Merge()
merge["operation"].setValue( GafferImage.Merge.Operation.Over )
merge["in"][0].setInput( r["out"] )
merge["in"][1].setInput( g["out"] )
merge["in"][2].setInput( b["out"] )
expected = GafferImage.ImageReader()
expected["fileName"].setValue( self.rgbPath )
self.assertImagesEqual( merge["out"], expected["out"], maxDifference = 0.001, ignoreMetadata = True )
# Overlay a red, green and blue tile of different data window sizes and check the data window is expanded on the result and looks as we expect.
def testOverRGBAonRGB( self ) :
c = GafferImage.ImageReader()
c["fileName"].setValue( self.checkerPath )
r = GafferImage.ImageReader()
r["fileName"].setValue( self.rPath )
g = GafferImage.ImageReader()
g["fileName"].setValue( self.gPath )
b = GafferImage.ImageReader()
b["fileName"].setValue( self.bPath )
merge = GafferImage.Merge()
merge["operation"].setValue( GafferImage.Merge.Operation.Over )
merge["in"][0].setInput( c["out"] )
merge["in"][1].setInput( r["out"] )
merge["in"][2].setInput( g["out"] )
merge["in"][3].setInput( b["out"] )
expected = GafferImage.ImageReader()
expected["fileName"].setValue( self.checkerRGBPath )
self.assertImagesEqual( merge["out"], expected["out"], maxDifference = 0.001, ignoreMetadata = True )
def testAffects( self ) :
c1 = GafferImage.Constant()
c2 = GafferImage.Constant()
m = GafferImage.Merge()
m["in"][0].setInput( c1["out"] )
m["in"][1].setInput( c2["out"] )
cs = GafferTest.CapturingSlot( m.plugDirtiedSignal() )
c1["color"]["r"].setValue( 0.1 )
self.assertEqual( len( cs ), 5 )
self.assertTrue( cs[0][0].isSame( m["in"][0]["channelData"] ) )
self.assertTrue( cs[1][0].isSame( m["in"][0] ) )
self.assertTrue( cs[2][0].isSame( m["in"] ) )
self.assertTrue( cs[3][0].isSame( m["out"]["channelData"] ) )
self.assertTrue( cs[4][0].isSame( m["out"] ) )
del cs[:]
c2["color"]["g"].setValue( 0.2 )
self.assertEqual( len( cs ), 5 )
self.assertTrue( cs[0][0].isSame( m["in"][1]["channelData"] ) )
self.assertTrue( cs[1][0].isSame( m["in"][1] ) )
self.assertTrue( cs[2][0].isSame( m["in"] ) )
self.assertTrue( cs[3][0].isSame( m["out"]["channelData"] ) )
self.assertTrue( cs[4][0].isSame( m["out"] ) )
def testEnabledAffects( self ) :
m = GafferImage.Merge()
affected = m.affects( m["enabled"] )
self.assertTrue( m["out"]["channelData"] in affected )
def testPassThrough( self ) :
c = GafferImage.Constant()
f = GafferImage.Resize()
f["in"].setInput( c["out"] )
f["format"].setValue( GafferImage.Format( imath.Box2i( imath.V2i( 0 ), imath.V2i( 10 ) ), 1 ) )
d = GafferImage.ImageMetadata()
d["metadata"].addChild( Gaffer.NameValuePlug( "comment", IECore.StringData( "reformated and metadata updated" ) ) )
d["in"].setInput( f["out"] )
m = GafferImage.Merge()
m["in"][0].setInput( c["out"] )
m["in"][1].setInput( d["out"] )
self.assertEqual( m["out"]["format"].hash(), c["out"]["format"].hash() )
self.assertEqual( m["out"]["metadata"].hash(), c["out"]["metadata"].hash() )
self.assertEqual( m["out"]["format"].getValue(), c["out"]["format"].getValue() )
self.assertEqual( m["out"]["metadata"].getValue(), c["out"]["metadata"].getValue() )
m["in"][0].setInput( d["out"] )
m["in"][1].setInput( c["out"] )
self.assertEqual( m["out"]["format"].hash(), d["out"]["format"].hash() )
self.assertEqual( m["out"]["metadata"].hash(), d["out"]["metadata"].hash() )
self.assertEqual( m["out"]["format"].getValue(), d["out"]["format"].getValue() )
self.assertEqual( m["out"]["metadata"].getValue(), d["out"]["metadata"].getValue() )
def testSmallDataWindowOverLarge( self ) :
b = GafferImage.Constant()
b["format"].setValue( GafferImage.Format( 500, 500, 1.0 ) )
b["color"].setValue( imath.Color4f( 1, 0, 0, 1 ) )
a = GafferImage.Constant()
a["format"].setValue( GafferImage.Format( 500, 500, 1.0 ) )
a["color"].setValue( imath.Color4f( 0, 1, 0, 1 ) )
aCrop = GafferImage.Crop()
aCrop["in"].setInput( a["out"] )
aCrop["areaSource"].setValue( aCrop.AreaSource.Area )
aCrop["area"].setValue( imath.Box2i( imath.V2i( 50 ), imath.V2i( 162 ) ) )
aCrop["affectDisplayWindow"].setValue( False )
m = GafferImage.Merge()
m["operation"].setValue( m.Operation.Over )
m["in"][0].setInput( b["out"] )
m["in"][1].setInput( aCrop["out"] )
redSampler = GafferImage.Sampler( m["out"], "R", m["out"]["format"].getValue().getDisplayWindow() )
greenSampler = GafferImage.Sampler( m["out"], "G", m["out"]["format"].getValue().getDisplayWindow() )
blueSampler = GafferImage.Sampler( m["out"], "B", m["out"]["format"].getValue().getDisplayWindow() )
def sample( x, y ) :
return imath.Color3f(
redSampler.sample( x, y ),
greenSampler.sample( x, y ),
blueSampler.sample( x, y ),
)
# We should only have overed green in areas which are inside
# the data window of aCrop. Everywhere else we should have
# red still.
self.assertEqual( sample( 49, 49 ), imath.Color3f( 1, 0, 0 ) )
self.assertEqual( sample( 50, 50 ), imath.Color3f( 0, 1, 0 ) )
self.assertEqual( sample( 161, 161 ), imath.Color3f( 0, 1, 0 ) )
self.assertEqual( sample( 162, 162 ), imath.Color3f( 1, 0, 0 ) )
def testLargeDataWindowAddedToSmall( self ) :
b = GafferImage.Constant()
b["format"].setValue( GafferImage.Format( 500, 500, 1.0 ) )
b["color"].setValue( imath.Color4f( 1, 0, 0, 1 ) )
a = GafferImage.Constant()
a["format"].setValue( GafferImage.Format( 500, 500, 1.0 ) )
a["color"].setValue( imath.Color4f( 0, 1, 0, 1 ) )
bCrop = GafferImage.Crop()
bCrop["in"].setInput( b["out"] )
bCrop["areaSource"].setValue( bCrop.AreaSource.Area )
bCrop["area"].setValue( imath.Box2i( imath.V2i( 50 ), imath.V2i( 162 ) ) )
bCrop["affectDisplayWindow"].setValue( False )
m = GafferImage.Merge()
m["operation"].setValue( m.Operation.Add )
m["in"][0].setInput( bCrop["out"] )
m["in"][1].setInput( a["out"] )
redSampler = GafferImage.Sampler( m["out"], "R", m["out"]["format"].getValue().getDisplayWindow() )
greenSampler = GafferImage.Sampler( m["out"], "G", m["out"]["format"].getValue().getDisplayWindow() )
blueSampler = GafferImage.Sampler( m["out"], "B", m["out"]["format"].getValue().getDisplayWindow() )
def sample( x, y ) :
return imath.Color3f(
redSampler.sample( x, y ),
greenSampler.sample( x, y ),
blueSampler.sample( x, y ),
)
# We should only have yellow in areas where the background exists,
# and should have just green everywhere else.
self.assertEqual( sample( 49, 49 ), imath.Color3f( 0, 1, 0 ) )
self.assertEqual( sample( 50, 50 ), imath.Color3f( 1, 1, 0 ) )
self.assertEqual( sample( 161, 161 ), imath.Color3f( 1, 1, 0 ) )
self.assertEqual( sample( 162, 162 ), imath.Color3f( 0, 1, 0 ) )
def testCrashWithResizedInput( self ) :
b = GafferImage.Constant()
b["format"].setValue( GafferImage.Format( 2048, 1556 ) )
bResized = GafferImage.Resize()
bResized["in"].setInput( b["out"] )
bResized["format"].setValue( GafferImage.Format( 1920, 1080 ) )
bResized["fitMode"].setValue( bResized.FitMode.Fit )
a = GafferImage.Constant()
a["format"].setValue( GafferImage.Format( 1920, 1080 ) )
merge = GafferImage.Merge()
merge["operation"].setValue( merge.Operation.Over )
merge["in"][0].setInput( bResized["out"] )
merge["in"][1].setInput( a["out"] )
GafferImageTest.processTiles( merge["out"] )
def testModes( self ) :
b = GafferImage.Constant()
b["color"].setValue( imath.Color4f( 0.1, 0.2, 0.3, 0.4 ) )
a = GafferImage.Constant()
a["color"].setValue( imath.Color4f( 1, 0.3, 0.1, 0.2 ) )
merge = GafferImage.Merge()
merge["in"][0].setInput( b["out"] )
merge["in"][1].setInput( a["out"] )
sampler = GafferImage.ImageSampler()
sampler["image"].setInput( merge["out"] )
sampler["pixel"].setValue( imath.V2f( 10 ) )
self.longMessage = True
for operation, expected in [
( GafferImage.Merge.Operation.Add, ( 1.1, 0.5, 0.4, 0.6 ) ),
( GafferImage.Merge.Operation.Atop, ( 0.48, 0.28, 0.28, 0.4 ) ),
( GafferImage.Merge.Operation.Divide, ( 10, 1.5, 1/3.0, 0.5 ) ),
( GafferImage.Merge.Operation.In, ( 0.4, 0.12, 0.04, 0.08 ) ),
( GafferImage.Merge.Operation.Out, ( 0.6, 0.18, 0.06, 0.12 ) ),
( GafferImage.Merge.Operation.Mask, ( 0.02, 0.04, 0.06, 0.08 ) ),
( GafferImage.Merge.Operation.Matte, ( 0.28, 0.22, 0.26, 0.36 ) ),
( GafferImage.Merge.Operation.Multiply, ( 0.1, 0.06, 0.03, 0.08 ) ),
( GafferImage.Merge.Operation.Over, ( 1.08, 0.46, 0.34, 0.52 ) ),
( GafferImage.Merge.Operation.Subtract, ( 0.9, 0.1, -0.2, -0.2 ) ),
( GafferImage.Merge.Operation.Difference, ( 0.9, 0.1, 0.2, 0.2 ) ),
( GafferImage.Merge.Operation.Under, ( 0.7, 0.38, 0.36, 0.52 ) ),
( GafferImage.Merge.Operation.Min, ( 0.1, 0.2, 0.1, 0.2 ) ),
( GafferImage.Merge.Operation.Max, ( 1, 0.3, 0.3, 0.4 ) )
] :
merge["operation"].setValue( operation )
self.assertAlmostEqual( sampler["color"]["r"].getValue(), expected[0], msg=operation )
self.assertAlmostEqual( sampler["color"]["g"].getValue(), expected[1], msg=operation )
self.assertAlmostEqual( sampler["color"]["b"].getValue(), expected[2], msg=operation )
self.assertAlmostEqual( sampler["color"]["a"].getValue(), expected[3], msg=operation )
def testChannelRequest( self ) :
a = GafferImage.Constant()
a["color"].setValue( imath.Color4f( 0.1, 0.2, 0.3, 0.4 ) )
ad = GafferImage.DeleteChannels()
ad["in"].setInput( a["out"] )
ad["mode"].setValue( GafferImage.DeleteChannels.Mode.Delete )
ad["channels"].setValue( "R" )
b = GafferImage.Constant()
b["color"].setValue( imath.Color4f( 1.0, 0.3, 0.1, 0.2 ) )
bd = GafferImage.DeleteChannels()
bd["in"].setInput( b["out"] )
bd["mode"].setValue( GafferImage.DeleteChannels.Mode.Delete )
bd["channels"].setValue( "G" )
merge = GafferImage.Merge()
merge["in"][0].setInput( ad["out"] )
merge["in"][1].setInput( bd["out"] )
merge["operation"].setValue( GafferImage.Merge.Operation.Add )
sampler = GafferImage.ImageSampler()
sampler["image"].setInput( merge["out"] )
sampler["pixel"].setValue( imath.V2f( 10 ) )
self.assertAlmostEqual( sampler["color"]["r"].getValue(), 0.0 + 1.0 )
self.assertAlmostEqual( sampler["color"]["g"].getValue(), 0.2 + 0.0 )
self.assertAlmostEqual( sampler["color"]["b"].getValue(), 0.3 + 0.1 )
self.assertAlmostEqual( sampler["color"]["a"].getValue(), 0.4 + 0.2 )
def testNonFlatThrows( self ) :
deep = GafferImage.Empty()
flat = GafferImage.Constant()
merge = GafferImage.Merge()
merge["in"][0].setInput( flat["out"] )
merge["in"][1].setInput( flat["out"] )
self.assertNotEqual( GafferImage.ImageAlgo.imageHash( merge["out"] ), GafferImage.ImageAlgo.imageHash( flat["out"] ) )
merge["in"][0].setInput( deep["out"] )
six.assertRaisesRegex( self, RuntimeError, 'Deep data not supported in input "in.in0"', GafferImage.ImageAlgo.image, merge["out"] )
merge["in"][0].setInput( flat["out"] )
merge["in"][1].setInput( deep["out"] )
six.assertRaisesRegex( self, RuntimeError, 'Deep data not supported in input "in.in1"', GafferImage.ImageAlgo.image, merge["out"] )
def testDefaultFormat( self ) :
a = GafferImage.Constant()
a["format"].setValue( GafferImage.Format( 100, 200 ) )
m = GafferImage.Merge()
m["in"][1].setInput( a["out"] )
with Gaffer.Context() as c :
GafferImage.FormatPlug().setDefaultFormat( c, GafferImage.Format( 1000, 2000 ) )
self.assertEqual( m["out"]["format"].getValue(), GafferImage.Format( 1000, 2000 ) )
def testDataWindowWhenBNotConnected( self ) :
a = GafferImage.Constant()
a["format"].setValue( GafferImage.Format( 100, 200 ) )
m = GafferImage.Merge()
m["in"][1].setInput( a["out"] )
self.assertEqual( m["out"]["dataWindow"].getValue(), a["out"]["dataWindow"].getValue() )
# Make sure we don't fail by pulling tiles outside the data window when merging images with
# misaligned data
def testTilesOutsideDataWindow( self ) :
r = GafferImage.ImageReader()
r["fileName"].setValue( self.checkerPath )
o = GafferImage.Offset()
o["in"].setInput( r["out"] )
o["offset"].setValue( imath.V2i( -10 ) )
merge = GafferImage.Merge()
merge["in"][0].setInput( r["out"] )
merge["in"][1].setInput( o["out"] )
GafferImage.ImageAlgo.image( merge["out"] )
if __name__ == "__main__":
unittest.main()
| 36.863265
| 144
| 0.64884
|
f7367b85ef33529c5c360e68d214cb8e6a80a38f
| 4,752
|
py
|
Python
|
dist/Platform.app/Contents/Resources/lib/python3.7/wx/lib/colourchooser/canvas.py
|
njalloul90/Genomics_Oncology_Platform
|
9bf6d0edca5df783f4e371fa1bc46b7b1576fe70
|
[
"MIT"
] | 6
|
2021-07-26T14:21:25.000Z
|
2021-07-26T14:32:01.000Z
|
dist/Platform.app/Contents/Resources/lib/python3.7/wx/lib/colourchooser/canvas.py
|
njalloul90/Genomics_Oncology_Platform
|
9bf6d0edca5df783f4e371fa1bc46b7b1576fe70
|
[
"MIT"
] | 9
|
2021-03-18T23:10:27.000Z
|
2022-03-11T23:43:55.000Z
|
dist/Platform.app/Contents/Resources/lib/python3.7/wx/lib/colourchooser/canvas.py
|
njalloul90/Genomics_Oncology_Platform
|
9bf6d0edca5df783f4e371fa1bc46b7b1576fe70
|
[
"MIT"
] | 2
|
2019-03-11T05:06:49.000Z
|
2019-03-22T21:48:49.000Z
|
"""
PyColourChooser
Copyright (C) 2002 Michael Gilfix <mgilfix@eecs.tufts.edu>
This file is part of PyColourChooser.
This version of PyColourChooser is open source; you can redistribute it
and/or modify it under the licensed terms.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
"""
# 12/14/2003 - Jeff Grimmett (grimmtooth@softhome.net)
#
# o 2.5 compatibility update.
#
# 12/21/2003 - Jeff Grimmett (grimmtooth@softhome.net)
#
# o wxPyColorChooser -> PyColorChooser
# o wxPyColourChooser -> PyColourChooser
#
# Tags: phoenix-port
import wx
class BitmapBuffer(wx.MemoryDC):
"""A screen buffer class.
This class implements a screen output buffer. Data is meant to
be drawn in the buffer class and then blitted directly to the
output device, or on-screen window.
"""
def __init__(self, width, height, colour):
"""Initialize the empty buffer object."""
wx.MemoryDC.__init__(self)
self.width = width
self.height = height
self.colour = colour
self.bitmap = wx.Bitmap(self.width, self.height)
self.SelectObject(self.bitmap)
# Initialize the buffer to the background colour
self.SetBackground(wx.Brush(self.colour, wx.BRUSHSTYLE_SOLID))
self.Clear()
# Make each logical unit of the buffer equal to 1 pixel
self.SetMapMode(wx.MM_TEXT)
def GetBitmap(self):
"""Returns the internal bitmap for direct drawing."""
return self.bitmap
# GetPixel seems to always return (-1, -1, -1, 255)
# on OS X so this is a workaround for that issue.
def GetPixelColour(self, x, y):
"""Gets the color value of the pixel at the given
cords.
"""
img = self.GetAsBitmap().ConvertToImage()
red = img.GetRed(x, y)
green = img.GetGreen(x, y)
blue = img.GetBlue(x, y)
return wx.Colour(red, green, blue)
class Canvas(wx.Window):
"""A canvas class for arbitrary drawing.
The Canvas class implements a window that allows for drawing
arbitrary graphics. It implements a double buffer scheme and
blits the off-screen buffer to the window during paint calls
by the windowing system for speed.
Some other methods for determining the canvas colour and size
are also provided.
"""
def __init__(self, parent, id,
pos=wx.DefaultPosition,
style=wx.SIMPLE_BORDER,
forceClientSize=None):
"""Creates a canvas instance and initializes the off-screen
buffer. Also sets the handler for rendering the canvas
automatically via size and paint calls from the windowing
system."""
wx.Window.__init__(self, parent, id, pos, style=style)
if forceClientSize:
self.SetMaxClientSize(forceClientSize)
self.SetMinClientSize(forceClientSize)
# Perform an intial sizing
self.ReDraw()
# Register event handlers
self.Bind(wx.EVT_SIZE, self.onSize)
self.Bind(wx.EVT_PAINT, self.onPaint)
def MakeNewBuffer(self):
size = self.GetClientSize()
self.buffer = BitmapBuffer(size[0], size[1],
self.GetBackgroundColour())
def onSize(self, event):
"""Perform actual redraw to off-screen buffer only when the
size of the canvas has changed. This saves a lot of computation
since the same image can be re-used, provided the canvas size
hasn't changed."""
self.MakeNewBuffer()
self.DrawBuffer()
self.Refresh()
def ReDraw(self):
"""Explicitly tells the canvas to redraw it's contents."""
self.onSize(None)
def Refresh(self):
"""Re-draws the buffer contents on-screen."""
dc = wx.ClientDC(self)
self.Blit(dc)
def onPaint(self, event):
"""Renders the off-screen buffer on-screen."""
dc = wx.PaintDC(self)
self.Blit(dc)
def Blit(self, dc):
"""Performs the blit of the buffer contents on-screen."""
width, height = self.buffer.GetSize()
dc.Blit(0, 0, width, height, self.buffer, 0, 0)
def GetBoundingRect(self):
"""Returns a tuple that contains the co-ordinates of the
top-left and bottom-right corners of the canvas."""
x, y = self.GetPosition()
w, h = self.GetSize()
return(x, y + h, x + w, y)
def DrawBuffer(self):
"""Actual drawing function for drawing into the off-screen
buffer. To be overrideen in the implementing class. Do nothing
by default."""
pass
| 32.547945
| 71
| 0.643729
|
40b6d5378a4c3825f42ec5c82e096f8be1291759
| 945
|
py
|
Python
|
Dec2021/mpi4py/ex7_mpi_pi.py
|
sbailey/tutorials
|
5820fb72da2ffec368e59bffdaecc94a2039060c
|
[
"BSD-3-Clause"
] | 22
|
2018-09-01T23:52:49.000Z
|
2022-01-22T20:30:54.000Z
|
Dec2021/mpi4py/ex7_mpi_pi.py
|
sbailey/tutorials
|
5820fb72da2ffec368e59bffdaecc94a2039060c
|
[
"BSD-3-Clause"
] | 40
|
2017-06-24T23:08:03.000Z
|
2021-06-10T23:22:37.000Z
|
Dec2021/mpi4py/ex7_mpi_pi.py
|
sbailey/tutorials
|
5820fb72da2ffec368e59bffdaecc94a2039060c
|
[
"BSD-3-Clause"
] | 9
|
2018-05-22T16:18:27.000Z
|
2021-06-10T23:13:42.000Z
|
""" Example6. Calculate Pi (3.1415..) with a simple
code based on trapzoidal method.
run with
$> python <scipt name>
"""
import numpy as np
from mpi4py import MPI
from time import time
def f(x):
return 4.0/(1.0+x*x)
def trap(local_a,local_b,local_n,h):
# trapzoidal method
estimate = (f(local_a)+f(local_b))/2.0
for i in np.arange(1,local_n):
x = local_a+float(i)*h
estimate += f(x)
return estimate*h
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
b = 1.0
a = 0.0
n = 1000000
h = (b-a)/float(n)
if rank==0:
start = time()
local_n = int(n/size)
local_a = a + rank*local_n*h
local_b = local_a + local_n*h
local_pi = trap(local_a, local_b, local_n, h)
comm.Barrier()
local_pi = comm.gather(local_pi, root=0)
if rank==0:
pi = sum(local_pi)
end = time()
print(f'Pi=%.6f (true)'%np.pi)
print("Pi=%.6f (%d steps in %.3f secs)" %(pi, n, end-start))
| 19.285714
| 64
| 0.620106
|
fa0c46d1f6f31aad108090d6161234cb3a6269a0
| 3,581
|
py
|
Python
|
autocleus/climaker/components.py
|
ludah65/autocleus
|
b7403894e4f72d7874af4d888ec63bd1e7832c02
|
[
"Apache-2.0"
] | 2
|
2021-05-21T15:51:24.000Z
|
2021-05-21T17:27:48.000Z
|
autocleus/climaker/components.py
|
ludah65/autocleus
|
b7403894e4f72d7874af4d888ec63bd1e7832c02
|
[
"Apache-2.0"
] | null | null | null |
autocleus/climaker/components.py
|
ludah65/autocleus
|
b7403894e4f72d7874af4d888ec63bd1e7832c02
|
[
"Apache-2.0"
] | null | null | null |
import os
import sys
import argparse
import importlib as imp
import types
import autocleus.cmd as cmd
from autocleus.library.parsers import BaseArgumentParser, FancyHelpFormatter, read_cli_conf
class CLImaker():
def __init__(self, cmdpath, cli_description):
self.cmdpath = cmdpath
self.cli_description = cli_description
def entrypoint(self):
"""Entry point for all commands"""
# initialize BAP
parser, args, unknown = self.initiate_argument_parser()
# check if help command is run and which level
self.helper(parser, args)
# attach Command class to parser and reparse command line args
parser, args, unknown = self.attach_command_to_parser(parser, args)
# invoke command
parser.cmdcls.invoke(args)
def initiate_argument_parser(self, **kwargs):
"""Create a basic argument parser with no subcommands added"""
parser = BaseArgumentParser(
formatter_class=FancyHelpFormatter, add_help=False,
description=(f"{self.cli_description}"), program=self.cmdpath.split('.')[0])
parser.program = self.cmdpath.split('.')[0]
parser.cmdpath = self.cmdpath
parser.add_argument('-h', '--help', dest='help', action='store_const',
const='short', default=None, help="Show this help message")
parser.add_argument('-H', '--help-all', dest='help', action='store_const',
const='long', default=None, help="Show help for all commands")
parser.add_argument('command', nargs=argparse.REMAINDER)
args, unknown = parser.parse_known_args()
return parser, args, unknown
def attach_command_to_parser(self, parser, args):
cmd_name = args.command[0]
parser.cmdcls = cmd.get_command(cmd_name, modpath=self.cmdpath)
parser.add_command()
# re-parse with proper sub-barser added
args, unknown = parser.parse_known_args()
return parser, args, unknown
def helper(self, parser, args):
if args.help:
sys.stdout.write(parser.format_help(level=args.help))
exit(0)
elif not args.command:
parser.print_help()
exit(1)
class SpecMagic(object):
def __init__(self,path):
self.path=path
self.names = {}
def find_module(self,fullname,path=None):
ml = imp.find_module(fullname,path)
raise ImportError
def load_module(self,fullname):
return imp.load_module(fullname)
raise ImportError
def load_spec(path, modname=None):
"""
Load spec from path to allow loading spec from inside module
even if module is not colocated with spec.
This is to be inserted into cliexe when combining specs
to avoid the need to manipulate imports to reflect new
name of the combined spec root directory.
(e.g when autocleus.cmd becomes new_spec_name.cmd)
Combined specs will have different structure then singular
specs. Will refer to as combine spec as a "multi-spec".
use like:
import autocleus.climaker.components as acc
acc.load_spec(path, specname) # e.g specname='autocleus'
# now import as if within the same spec
import autocleus.cmd
from autocleus.cmd import generate
"""
if modname is None:
modname = os.path.basename(path)
mod = types.ModuleType(modname)
sys.modules[modname] = mod
assert mod.__name__== modname
mod.__path__=[path]
mod.__loader__= SpecMagic(path)
return mod
| 31.412281
| 91
| 0.659313
|
b4b31123560144a6e3b691034c487cdd03bc2de1
| 1,505
|
py
|
Python
|
torrent.py
|
dcheno/dripdrop
|
ed8b3b18d0796192d0f8cfa09efd2aece078ea9b
|
[
"MIT"
] | null | null | null |
torrent.py
|
dcheno/dripdrop
|
ed8b3b18d0796192d0f8cfa09efd2aece078ea9b
|
[
"MIT"
] | null | null | null |
torrent.py
|
dcheno/dripdrop
|
ed8b3b18d0796192d0f8cfa09efd2aece078ea9b
|
[
"MIT"
] | null | null | null |
import os
from hashlib import sha1
from bencode3 import bdecode, bencode
"""Handle data related to a torrent and its torrent file"""
class TorrentError(Exception):
pass
class Torrent:
"""Hold information coming from a torrent file."""
def __init__(self, tor_file_path):
self.path = tor_file_path
self.file_name = os.path.basename(tor_file_path)
self.name = os.path.splitext(self.file_name)[0]
self._handle_file(self.path)
def _handle_file(self, tor_file_path):
try:
with open(tor_file_path, 'rb') as tor_file:
metainfo = tor_file.read()
except FileNotFoundError:
raise TorrentError("No Torrent File With That Name.")
metadict = bdecode(metainfo)
self.info = metadict['info']
self.info_hash = self._hash_info(self.info)
self.announce = metadict['announce']
self.length = self.info['length']
self.piece_length = self.info['piece length']
self.target_file_name = self.info['name']
# The pieces entry consists of 20 byte hash values for each pieces.
self.num_pieces = len(self.info['pieces'])//20
self.piece_hashes = []
hashes = self.info['pieces']
while hashes:
self.piece_hashes.append(hashes[:20])
hashes = hashes[20:]
@staticmethod
def _hash_info(info):
info_bencode = bencode(info)
info_hash = sha1(info_bencode).digest()
return info_hash
| 31.354167
| 75
| 0.636545
|
8c046c703710a54e97190a49937be7db9091d360
| 21,085
|
py
|
Python
|
icenumerics/colloidalice.py
|
aortiza/icenumerics
|
983bfff616f5bedee0ccefd0f9fad10e50e23e83
|
[
"MIT"
] | null | null | null |
icenumerics/colloidalice.py
|
aortiza/icenumerics
|
983bfff616f5bedee0ccefd0f9fad10e50e23e83
|
[
"MIT"
] | 8
|
2019-10-04T09:04:23.000Z
|
2021-09-29T18:01:21.000Z
|
icenumerics/colloidalice.py
|
aortiza/icenumerics
|
983bfff616f5bedee0ccefd0f9fad10e50e23e83
|
[
"MIT"
] | 1
|
2019-10-10T10:25:43.000Z
|
2019-10-10T10:25:43.000Z
|
import numpy as np
import copy as cp
import matplotlib.pyplot as plt
import matplotlib.patches as ptch
import matplotlib.animation as anm
import scipy.spatial as spa
import pandas as pd
import warnings
from icenumerics.geometry import *
from icenumerics.parameters import *
from icenumerics.spins import *
from . import mc
class colloid_in_trap():
"""
An object 'colloid_in_trap' represents a colloidal particle in a bistable trap. It has three main properties:
* center is the center of the bistable trap.
* direction is a vector (whose magnitude is not important) that points from one stable position to the other.
* colloid is a vector that indicates where the colloid is placed with respect to the center.
Each of these quantities are represented by lists of three elements, which correspond to
vectors in R3 space. A colloid_in_trap object also has the properties:
* colloid_properties:
* susceptibility
* diffusion
* diameter
* rel_density
* volume
* trap_properties
* trap_sep: The distance between the traps.
* height
* stiffness
* spread
"""
def __init__(self, center, direction, particle, trap):
self.center = np.array(center.magnitude,dtype="float")*center.units
# Direction is always unitary
self.direction = np.array(direction.magnitude,dtype="float")/np.linalg.norm(direction.magnitude,2)
self.particle = particle
self.trap = trap
""" Colloid position is given by the direction vectors"""
self.colloid = self.direction * self.trap.trap_sep/2
def __str__(self):
""" Prints a string which represents the colloid_in_trap """
return("Colloid is in [%d %d %d], trap is [%d %d %d %d %d %d]\n" %\
(tuple(self.colloid.magnitude)+tuple(self.center.magnitude)+tuple(self.direction)))
def display(self, ax1=False, units = None, scale = 1):
""" Draws a figure with the trap and the colloid inside it"""
if not ax1:
fig, ax1 = plt.subplots(1,1)
patches = self.create_patch(units = units, scale=scale)
#ax1.plot(X,Y,'k')
ax1.add_patch(patches[0])
ax1.add_patch(patches[1])
ax1.add_patch(patches[2])
#ax1.plot([X,X+PX],[Y,Y+PY],color='k')
def create_patch(self, units = None, scale = 1):
""" Draws a figure with the trap and the colloid inside it"""
if not units:
units = self.center.units
X=self.center[0].to(units).magnitude
Y=self.center[1].to(units).magnitude
# D is the vector that goes fom the center to each of the traps
DX=self.direction[0]/2*self.trap.trap_sep.to(units).magnitude
DY=self.direction[1]/2*self.trap.trap_sep.to(units).magnitude
# P is the vector that goes from the center to the colloid
PX=self.colloid[0].to(units).magnitude
PY=self.colloid[1].to(units).magnitude
W = (self.particle.radius.to(units).magnitude)
return [ptch.Circle((X-DX,Y-DY), radius = W*scale, ec='g', fc='g'),
ptch.Circle((X+DX,Y+DY), radius = W*scale, ec='y', fc='y'),
ptch.Circle((X+PX,Y+PY), radius = W, ec='k', fc = 'none')]
def update_patch(self, patch, units = None):
""" Changes the configuration of the colloid display"""
if not units:
units = self.center.units
X=self.center[0].to(units).magnitude
Y=self.center[1].to(units).magnitude
# D is the vector that goes fom the center to each of the traps
DX=self.direction[0]/2*self.trap.trap_sep.to(units).magnitude
DY=self.direction[1]/2*self.trap.trap_sep.to(units).magnitude
# P is the vector that goes from the center to the colloid
PX=self.colloid[0].to(units).magnitude
PY=self.colloid[1].to(units).magnitude
patch[0].center = (X-DX,Y-DY)
patch[1].center = (X+DX,Y+DY)
patch[2].center = (X+PX,Y+PY)
def flip(self):
"""flips the ColloidInTrap by inverting its direction and its colloid attributes. Returns fliped object"""
cp = copy.deepcopy(self);
cp.direction = self.direction*(-1)
cp.colloid = self.colloid*(-1)
return cp
def bias(self, vector):
"""
Flips the ColloidInTrap to make it point in the direction of vector (dot(colloid,vector)>0). Returns fliped object
"""
# if vector is 2D, append zero
if len(vector)==2:
if vector.__class__.__name__=="tuple":
vector = vector+0
if vector.__class__.__name__=="ndarray":
vector = np.hstack((vector,0))
if vector.__class__.__name__=="list":
vector = vector+[0]
elif len(vector)>3:
raise ValueError("The vector argument has to be 2 or 3 dimentions")
cp = copy.deepcopy(self);
if np.array(vector).dot(self.direction)<0:
cp = self.flip()
return cp
class colloidal_ice(list):
"""
The colloidal ice object is a list of colloid_in_trap objects. It also includes some extra parameters contained in the worldparams attribute. It normally takes a spin ice object as input and generates one colloid_in_trap object for each spin.
The arrangement parameter defines the positions and directions of the colloidal ice. There are two possible inputs:
* a `spins` object: in this case the colloidal ice is copied from the spins arrangement.
* a `dict` object: this `dict` object must contain two arrays, `center` and `direction`.
`particle` and `trap` are parameter containers created with the `particle` and `trap` generators. They can be a single object, or a list. If it is a list, it must coincide with the number of elements defined by the `arrangement` parameter.
"""
def __init__(self, arrangement, particle, trap, height_spread = 0, susceptibility_spread = 0, region = None, periodic = None):
if arrangement.__class__.__name__ == "spins":
centers = [s.center for s in arrangement]
directions = [s.direction for s in arrangement]
else:
centers = arrangement['centers']
directions = arrangement['directions']
if not hasattr(particle,'__getitem__'):
particle = [particle for c in centers]
if not hasattr(trap,'__getitem__'):
trap = [trap for c in centers]
self.height_spread=height_spread
self.susceptibility_spread = susceptibility_spread
self.extend(
[colloid_in_trap(c,d,p,t)
for p,t,c,d in zip(particle,trap,centers,directions)])
if region == None:
units = centers[0].units
lower_bounds = np.min(
np.array([c.to(units).magnitude for c in centers]),0)
upper_bounds = np.max(
np.array([c.to(units).magnitude for c in centers]),0)
region = np.vstack([lower_bounds,upper_bounds])*units
self.region = region
if periodic is None:
periodic = False
self.periodic = periodic
def display(self, ax = None, scale = 1):
if not ax:
fig1, ax = plt.subplots(1,1)
units = self.region.units
for s in self:
s.display(ax,units, scale=scale)
ax.set_xlim([self.region[0,0].magnitude,self.region[1,0].magnitude])
ax.set_ylim([self.region[0,1].magnitude,self.region[1,1].magnitude])
ax.set_aspect("equal")
def animate(self,sl=slice(0,-1,1),ax=None,speed = 1, verb=False):
""" Animates a trajectory """
if not ax:
fig, ax = plt.subplots(1,1,figsize=(7,7))
else:
fig = ax.figure
region = [r.magnitude for r in self.sim.world.region]
len_units = self.sim.world.region.units
time_units = self.sim.total_time.units
particles = self.trj.index.get_level_values('id').unique()
n_of_particles = len(particles)
frames = self.trj.index.get_level_values('frame').unique().values
region = [r.magnitude for r in self.sim.world.region]
try:
radius = [self.sim.particles.radius.to(len_units).magnitude]
except AttributeError:
radius = [p.radius.to(len_units).magnitude for p in self.sim.particles]
framerate = self.sim.framerate.to(1/time_units).magnitude
runtime = self.sim.total_time.to(time_units).magnitude
timestep = self.sim.timestep.to(time_units).magnitude
frame_duration = (self.sim.total_time/(len(frames-1))*sl.step).to(ureg.ms).magnitude/speed
patches = [p for c in self for p in c.create_patch(len_units)]
def init():
for patch in patches:
ax.add_patch(patch)
return patches
def animate(frame_id):
frame = frames[sl][frame_id]
self.set_state_from_frame(frame = frame)
if verb:
print("frame[%u] is "%frame,frames[frame])
for p1,p2,p3,c in zip(patches[0::3],patches[1::3],patches[2::3],self):
c.update_patch([p1,p2,p3],len_units)
for patch in patches:
ax.add_patch(patch)
return patches
ax.set_xlim(region[0],region[1])
ax.set_ylim(region[2],region[3])
ax.set(aspect='equal')
anim = anm.FuncAnimation(fig, animate, init_func=init,
frames=len(frames[sl]), interval=frame_duration, blit=True)
plt.close(anim._fig)
return anim
def pad_region(self,pad,enforce2d=True):
self.region[0] = self.region[0]-pad
self.region[1] = self.region[1]+pad
if enforce2d:
self.region[:,2] = np.array([-.02,.02])*ureg.um
def simulate(self, *args, **kargs):
self.simulation(*args,**kargs)
self.run_simulation()
self.load_simulation()
def simulation(self, world, name, targetdir = '', include_timestamp = True, run_time = 60*ureg.s, framerate = 15*ureg.Hz, timestep = 100*ureg.us, output = ["x","y","z"], processors = 1):
particles = [c.particle for c in self]
traps = [c.trap for c in self]
colloids = [c.colloid.to(ureg.um).magnitude for c in self]*ureg.um
centers = [c.center.to(ureg.um).magnitude for c in self]*ureg.um
directions = np.array([c.direction for c in self])
# s = np.shape(np.array(colloids))
# initial_randomization = np.random.randn(s[0],s[1])*0.1*ureg.um
initial_displacement = np.array([[0,0,0.001]]*len(colloids))*ureg.um
positions = colloids+centers+initial_displacement
p_type, inverse_p = unique_objects(particles)
t_type, inverse_t = unique_objects(traps)
p_id = np.arange(len(colloids))
particle_types = []
particle_locations = []
for i,p in enumerate(p_type):
particle_types.append(mc.particles(
positions[np.array(inverse_p)==i],
atom_type = 0,
atoms_id = np.arange(len(colloids)),
radius = p.radius,
susceptibility = p.susceptibility,
drag = p.drag,
activity = p.activity))
particle_locations.append(p_id[np.array(inverse_p)==i])
particle_locations = np.array([loc for p_loc in particle_locations for loc in p_loc])
self.particle_locations = particle_locations
trap_types = []
# trapbonds
part_in_type = [np.where(np.array(inverse_p)==t)[0] for t, pt in enumerate(p_type)]
trap_bond_i = lambda i,p: np.concatenate([[p], list(np.where(part_in_type[p]==i)[0])])
trap_bonds = [trap_bond_i(i,p) for i, p in enumerate(inverse_p)]
for i,t in enumerate(t_type):
## inverse_t relates a type of trap, to a position in the trap array
## inverse_p relates a type of particle to a position in the particle array
## the particle array and the trap array have the same order: namely, particle i should bond with trap i.
subsets = [trap_bonds[j] for j, typ in enumerate(inverse_t) if typ==i]
trap_types.append(mc.bistable_trap(
centers[np.array(inverse_t)==i],
directions[np.array(inverse_t)==i],
particle_types,
subsets = subsets,
# I don't know how this would work for several types of particles.
atom_type = 1,
trap_id = np.arange(len(centers))+len(colloids),
distance = t.trap_sep,
height = t.height,
stiffness = t.stiffness,
height_spread = self.height_spread,
cutoff = t.cutoff))
world_sim = mc.world(
particle_types,trap_types,
region=self.region.transpose().flatten(),
walls=[False,False,False],
boundaries = world.boundaries,
temperature = world.temperature,
dipole_cutoff = world.dipole_cutoff,
lj_cutoff = 0,
lj_parameters = [0*ureg.pg*ureg.um**2/ureg.us**2,0],
enforce2d = world.enforce2d,
gravity = 0*ureg.um/ureg.us**2)
field = mc.field(magnitude = world.field,
frequency = 0*ureg.Hz, angle = 0*ureg.degrees)
self.run_params = {
"file_name":name,
"dir_name":targetdir,
"stamp_time":include_timestamp,
"timestep":timestep,
"framerate":framerate,
"total_time":run_time,
"output":output,
"particles":particle_types,
"traps":trap_types,
"world":world_sim,
"field":field,
"processors":processors}
self.name = name
self.dir_name = targetdir
self.include_timestamp = include_timestamp
self.sim = mc.sim(**self.run_params)
def update_simulation(self):
self.sim = mc.sim(**self.run_params)
def run_simulation(self):
self.sim.generate_scripts()
self.sim.run()
def load_simulation(self, sl = slice(0,-1,1)):
""" Loads the results from a simulation from the .lammpstrj file."""
self.trj = self.sim.load(read_trj = True, sl = sl)
self.bnd = self.sim.lazy_read.get_bounds(sl = sl)
self.frames = self.trj.index.get_level_values("frame").unique()
self.set_state_from_frame(frame = -1)
def set_state_from_frame(self, frame):
frame = self.frames[frame]
index = np.argsort(self.particle_locations)
for i,c in enumerate(self):
c.colloid = self.trj.loc[(frame,index[i]+1),["x","y","z"]].values*ureg.um - c.center
dot_prod = np.dot(c.colloid.magnitude,c.direction)
dot_prod_sign = (dot_prod>=0)*1+(dot_prod<0)*(-1)
c.direction = c.direction * dot_prod_sign
return self
def calculate_energy(self, B = 1*ureg.mT):
""" Calculates the sum of the inverse cube of all the inter particle distances.
For this it uses the spatial package of scipy which shifts this calculation to
a compiled program and it's therefore faster.
The energy output is given in 1/nm^3
------
Parameters:
B (Quantity): The physical parameters of the particles ($\chi$, radius) are stored in the `col` object, but the field needs to be specified. The `calculate_energy` method accepts a vector field, or a scalar quantity. If a scalar quantity is given, it is asumed to be in the vertical direction.
Results:
U (Quantity): The total dipole-dipole energy stored in the system.
todo: it might be useful to be able to calculate the total internal energy, including the energy of the traps.
"""
mu0 = 4e5*np.pi*ureg.pN/ureg.A**2
positions = np.array([np.array(c.center.to("um").magnitude+c.colloid.to("um").magnitude) for c in self])
distances = spa.distance.pdist(positions)
moment = lambda part: 4/3*np.pi*part.radius**3 * B * part.susceptibility / mu0
moments = np.array([moment(c.particle).to(ureg.A*ureg.um**2).magnitude for c in self])
try:
# This should fail if B is not a vector
pairs = np.array([[i,j+i+1] for i,p1 in enumerate(positions[:]) for j,p2 in enumerate(positions[i+1:])])
r = positions[pairs[:,0]]-positions[pairs[:,1]]
mdotm = np.sum(moments[pairs[:,0]]*moments[pairs[:,1]],axis=1)
m1dotr = np.sum(r*moments[pairs[:,0]],axis=1)
m2dotr = np.sum(r*moments[pairs[:,1]],axis=1)
U = sum( -mu0.magnitude / (4*np.pi) * ((3*m1dotr*m2dotr)/distances**5-mdotm/distances**3))
except np.AxisError:
mdotm = np.array([m1*moments[j+i+1]
for i,m1 in enumerate(moments[:])
for j,m2 in enumerate(moments[i+1:])])
U = sum(mu0.magnitude / (4*np.pi) * mdotm * (distances)**(-3))
self.energy = (U*ureg.pN*ureg.um).to("pN nm")
return self.energy
def DataFrame(self):
frames = self.trj.groupby("frame").count().index
return pd.concat([pd.DataFrame(data = np.array(
[np.concatenate([c.center.magnitude,c.direction,c.colloid])
for c in self.set_state_from_frame(f)]),
columns = ["x","y","z","dx","dy","dz","cx","cy","cz"],
index = pd.Index(range(len(self)),name = "id"))
for f in frames], keys = frames, names = ["frame"])
def where(self,center,tol=None):
if tol is None:
tol = 1e-6*ureg.um
return [i for i,c in enumerate(self) if np.linalg.norm(c.center-center)<tol]
def copy(self,deep = False):
import copy as cp
if deep:
return cp.deepcopy(self)
else:
return cp.copy(self)
def randomize(self):
import random
for c in self:
if random.randint(0, 1):
c.colloid = -c.colloid
c.direction = -c.direction
def remove(self, idx):
""" removes a list of colloids given by the indices idx"""
col_list = [c for c in self]
self.clear()
self.extend([c for i,c in enumerate(col_list) if i not in idx])
def unique_objects(object_list):
""" Classifies objects by uniqueness.
Returns:
list_of_unique: a list with a single instance of each unique objects
inverse: an array of where in list_of_unique the object is found. list_of_unque[inverse] == object_list
"""
set_of_unique = {t for t in object_list}
list_of_unique = list(set_of_unique)
def where(obj, list_of_unique):
""" returns the location of the object obj in the list_of_unique. """
for i,unique_object in enumerate(list_of_unique):
if obj == unique_object:
return i
inverse = [where(obj,list_of_unique) for obj in object_list]
return list_of_unique, inverse
def classify_objects(object_list):
""" Classifies objects by uniqueness. Returns a list with an object type directory."""
o_type = -1 # Default (maybe)
def where(obj,obj_list):
"""returns the first occurence of `particle` in the array `particles`"""
for i,o in enumerate(obj_list):
if o==obj:
return i
# This warning was made in 25-05-2020.
warnings.warn(DeprecationWarning("The function classify_objects, in colloidalice doesn't work properly and will be removed soon. Use instead the function unique_objects"))
type_dict = []
for i,o in enumerate(object_list):
loc = where(o,object_list[0:i])
if loc is not None:
type_dict.append(o_type)
else:
o_type = o_type+1
type_dict.append(o_type)
return type_dict
| 38.830571
| 302
| 0.576334
|
c03ee7c4af7f405edfa4302f782d43ad147c3996
| 1,166
|
py
|
Python
|
subjective_logic/testjpy.py
|
tum-i4/SACPS-robotics-system
|
dfa56cde0b433ff1c31a1ddcbf2c108247115a36
|
[
"MIT"
] | null | null | null |
subjective_logic/testjpy.py
|
tum-i4/SACPS-robotics-system
|
dfa56cde0b433ff1c31a1ddcbf2c108247115a36
|
[
"MIT"
] | null | null | null |
subjective_logic/testjpy.py
|
tum-i4/SACPS-robotics-system
|
dfa56cde0b433ff1c31a1ddcbf2c108247115a36
|
[
"MIT"
] | null | null | null |
import jpy
# Create a new JVM with the SL library on its classpath
jpy.create_jvm(['-Xmx512M', '-Djava.class.path=/home/sebastian/catkin_ws_Malte/knowledge_aggregation/subjective_logic/build/libs/subjective-logic-java-library-0.1.0.jar'])
# Get a reference of the SubjectiveOpinion Java class through jpy bridge
SubjectiveOpinion = jpy.get_type('de.tum.i4.subjectivelogic.SubjectiveOpinion')
# Create two subjective opinions
so1 = SubjectiveOpinion(0.7, 0.00, 0.3, 0.50)
so2 = SubjectiveOpinion(0.55 - 1e-2, 0.45, 1e-2, 0.50)
# Get a reference to Java ArrayList through jpy bridge
ArrayList = jpy.get_type('java.util.ArrayList')
olist = ArrayList()
# Add the subjective opinions to the array list
olist.add(so1)
olist.add(so2)
print("SO 1: " + so1.toString())
print("SO 2: " + so2.toString())
# Perform Consensus&Compromise Fusion of the opinions on the list
ccf = SubjectiveOpinion.ccCollectionFuse(olist)
print("CCF: " + ccf.toString())
# Perform Cumulative Fusion of the opinions in the list
cbf = SubjectiveOpinion.cumulativeCollectionFuse(olist)
print("CBF: " + cbf.toString())
print(cbf.getBelief(), " ", cbf.getDisbelief(), " ", cbf.getUncertainty())
| 36.4375
| 171
| 0.754717
|
407d8f0d1a8e091683e148b00a791b2009d9a020
| 4,443
|
py
|
Python
|
GANs/sagan/match.py
|
shikisawamura/nnabla-examples
|
baf4e4cc620dedbf4368683325c0fb868676850d
|
[
"Apache-2.0"
] | 1
|
2020-08-03T12:49:25.000Z
|
2020-08-03T12:49:25.000Z
|
GANs/sagan/match.py
|
takuseno/nnabla-examples
|
070d25078ad3d5458744dbfd390cdd926e20e573
|
[
"Apache-2.0"
] | null | null | null |
GANs/sagan/match.py
|
takuseno/nnabla-examples
|
070d25078ad3d5458744dbfd390cdd926e20e573
|
[
"Apache-2.0"
] | 1
|
2020-04-25T06:11:28.000Z
|
2020-04-25T06:11:28.000Z
|
# Copyright (c) 2017 Sony Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import nnabla as nn
import nnabla.logger as logger
import nnabla.functions as F
import nnabla.parametric_functions as PF
import nnabla.solvers as S
from nnabla.monitor import Monitor, MonitorImage, MonitorImageTile
from nnabla.utils.nnp_graph import NnpLoader
import nnabla.utils.save as save
from nnabla.ext_utils import get_extension_context
from args import get_args, save_args
from helpers import (generate_random_class, generate_one_class,
get_input_and_output, preprocess, resample)
from models import generator, discriminator, gan_loss
from imagenet_data import data_iterator_imagenet
def match(args):
# Context
extension_module = "cudnn"
ctx = get_extension_context(extension_module, device_id=args.device_id,
type_config=args.type_config)
nn.set_default_context(ctx)
# Args
latent = args.latent
maps = args.maps
batch_size = 1
image_size = args.image_size
n_classes = args.n_classes
not_sn = args.not_sn
threshold = args.truncation_threshold
# Model (SAGAN)
nn.load_parameters(args.model_load_path)
z = nn.Variable([batch_size, latent])
y_fake = nn.Variable([batch_size])
x_fake = generator(z, y_fake, maps=maps, n_classes=n_classes, test=True, sn=not_sn)\
.apply(persistent=True)
# Model (Inception model) from nnp file
nnp = NnpLoader(args.nnp_inception_model_load_path)
x, h = get_input_and_output(nnp, batch_size, args.variable_name)
# DataIterator for a given class_id
di = data_iterator_imagenet(args.train_dir, args.dirname_to_label_path,
batch_size=batch_size, n_classes=args.n_classes,
noise=False,
class_id=args.class_id)
# Monitor
monitor = Monitor(args.monitor_path)
name = "Matched Image {}".format(args.class_id)
monitor_image = MonitorImage(name, monitor, interval=1,
num_images=batch_size,
normalize_method=lambda x: (x + 1.) / 2. * 255.)
name = "Matched Image Tile {}".format(args.class_id)
monitor_image_tile = MonitorImageTile(name, monitor, interval=1,
num_images=batch_size + args.top_n,
normalize_method=lambda x: (x + 1.) / 2. * 255.)
# Generate and p(h|x).forward
# generate
z_data = resample(batch_size, latent, threshold)
y_data = generate_one_class(args.class_id, batch_size)
z.d = z_data
y_fake.d = y_data
x_fake.forward(clear_buffer=True)
# p(h|x).forward
x_fake_d = x_fake.d.copy()
x_fake_d = preprocess(
x_fake_d, (args.image_size, args.image_size), args.nnp_preprocess)
x.d = x_fake_d
h.forward(clear_buffer=True)
h_fake_d = h.d.copy()
# Feature matching
norm2_list = []
x_data_list = []
x_data_list.append(x_fake.d)
for i in range(di.size):
# forward for real data
x_d, _ = di.next()
x_data_list.append(x_d)
x_d = preprocess(
x_d, (args.image_size, args.image_size), args.nnp_preprocess)
x.d = x_d
h.forward(clear_buffer=True)
h_real_d = h.d.copy()
# norm computation
axis = tuple(np.arange(1, len(h.shape)).tolist())
norm2 = np.sum((h_real_d - h_fake_d) ** 2.0, axis=axis)
norm2_list.append(norm2)
# Save top-n images
argmins = np.argsort(norm2_list)
for i in range(args.top_n):
monitor_image.add(i, x_data_list[i])
matched_images = np.concatenate(x_data_list)
monitor_image_tile.add(0, matched_images)
def main():
args = get_args()
save_args(args, "match")
match(args)
if __name__ == '__main__':
main()
| 34.44186
| 90
| 0.664191
|
5f108b04b2e28a0d76459e9c9a07fd35e28cd55a
| 1,213
|
py
|
Python
|
tests/core/test_cluster_defaults.py
|
xmh19936688/rancher
|
2ad6bddf645515fe62d3e4a06cebd53fbbf9751e
|
[
"Apache-2.0"
] | 1
|
2019-02-19T01:46:20.000Z
|
2019-02-19T01:46:20.000Z
|
tests/core/test_cluster_defaults.py
|
xmh19936688/rancher
|
2ad6bddf645515fe62d3e4a06cebd53fbbf9751e
|
[
"Apache-2.0"
] | 1
|
2022-02-16T16:55:21.000Z
|
2022-02-16T16:55:26.000Z
|
tests/core/test_cluster_defaults.py
|
xmh19936688/rancher
|
2ad6bddf645515fe62d3e4a06cebd53fbbf9751e
|
[
"Apache-2.0"
] | null | null | null |
import pytest
import json
@pytest.mark.skip(reason="cluster-defaults disabled")
def test_initial_defaults(admin_mc):
cclient = admin_mc.client
schema_defaults = {}
setting_defaults = {}
data = cclient.schema.types['cluster'].resourceFields
default = data["enableNetworkPolicy"]["default"]
for name in cclient.schema.types['cluster'].resourceFields.keys():
if name == "enableNetworkPolicy":
schema_defaults["enableNetworkPolicy"] = default
for name in cclient.schema.types['rancherKubernetesEngineConfig']\
.resourceFields.keys():
if name == "ignoreDockerVersion":
schema_defaults["ignoreDockerVersion"] = cclient.schema.\
types["rancherKubernetesEngineConfig"].\
resourceFields["ignoreDockerVersion"].\
data_dict()["default"]
setting = cclient.list_setting(name="cluster-defaults")
data = json.loads(setting['data'][0]['default'])
setting_defaults["enableNetworkPolicy"] = data["enableNetworkPolicy"]
setting_defaults["ignoreDockerVersion"] = \
data["rancherKubernetesEngineConfig"]["ignoreDockerVersion"]
assert schema_defaults == setting_defaults
| 35.676471
| 73
| 0.688376
|
84bb084ea5304458106fe05b3a935cf3a440f79b
| 3,562
|
gyp
|
Python
|
node_modules/ethereumjs-vm/node_modules/merkle-patricia-tree/node_modules/secp256k1/binding.gyp
|
thejdavid/EthTrading
|
349415ba84de31bf1b800c7c62c642fe6b5b44ad
|
[
"MIT"
] | 12
|
2018-09-12T05:59:27.000Z
|
2020-04-12T13:32:04.000Z
|
node_modules/ethereumjs-vm/node_modules/merkle-patricia-tree/node_modules/secp256k1/binding.gyp
|
thejdavid/EthTrading
|
349415ba84de31bf1b800c7c62c642fe6b5b44ad
|
[
"MIT"
] | 6
|
2020-09-07T23:38:46.000Z
|
2022-03-23T16:33:56.000Z
|
node_modules/ethereumjs-vm/node_modules/merkle-patricia-tree/node_modules/secp256k1/binding.gyp
|
thejdavid/EthTrading
|
349415ba84de31bf1b800c7c62c642fe6b5b44ad
|
[
"MIT"
] | 9
|
2018-10-03T21:00:13.000Z
|
2019-08-22T18:52:39.000Z
|
{
"targets": [{
"target_name": "secp256k1",
"variables": {
"with_gmp%": "<!(./utils/has_lib.sh gmpxx && ./utils/has_lib.sh gmp)"
},
"sources": [
"./src/addon.cc",
"./src/secretkey.cc",
"./src/publickey.cc",
"./src/signature.cc",
"./src/sign.cc",
"./src/verify.cc",
"./src/recover.cc",
"./src/ecdh.cc",
"./src/secp256k1-src/src/secp256k1.c",
"./src/secp256k1-src/contrib/lax_der_privatekey_parsing.c"
],
"cflags": [
"-Wall",
"-Wno-maybe-uninitialized",
"-Wno-uninitialized",
"-Wno-unused-function",
"-Wextra"
],
"cflags_cc+": [
"-std=c++0x"
],
"include_dirs": [
"/usr/local/include",
"./src/secp256k1-src",
"./src/secp256k1-src/contrib",
"./src/secp256k1-src/include",
"./src/secp256k1-src/src",
"<!(node -e \"require('nan')\")"
],
"conditions": [
[
"with_gmp=='true'", {
"defines": [
"HAVE_LIBGMP=1",
"USE_NUM_GMP=1",
"USE_FIELD_INV_NUM=1",
"USE_SCALAR_INV_NUM=1"
],
"libraries": [
"-lgmpxx",
"-lgmp"
]
}, {
"defines": [
"USE_NUM_NONE=1",
"USE_SCALAR_INV_BUILTIN=1",
"USE_FIELD_INV_BUILTIN=1",
"ENABLE_MODULE_ECDH=1",
"ENABLE_MODULE_RECOVERY=1"
]
}
],
[
"target_arch=='ia32'", {
"defines": [
"USE_FIELD_10X26=1",
"USE_SCALAR_8X32=1"
]
}
],
[
"target_arch=='x64'", {
"defines": [
"HAVE___INT128=1"
"USE_ASM_X86_64=1",
"USE_FIELD_5X52=1",
"USE_FIELD_5X52_INT128=1",
"USE_SCALAR_4X64=1"
]
}
],
[
'OS=="mac"', {
"libraries": [
"-L/usr/local/lib"
],
"xcode_settings": {
"MACOSX_DEPLOYMENT_TARGET": '10.7',
"OTHER_CPLUSPLUSFLAGS": [
'-stdlib=libc++'
]
}
}],
[
"OS=='win'", {
"conditions": [
[
"target_arch=='x64'", {
"variables": {
"openssl_root%": "C:/OpenSSL-Win64"
},
}, {
"variables": {
"openssl_root%": "C:/OpenSSL-Win32"
}
}
]
],
"libraries": [
"-l<(openssl_root)/lib/libeay32.lib",
],
"include_dirs": [
"<(openssl_root)/include",
],
}, {
"conditions": [
[
"target_arch=='ia32'", {
"variables": {
"openssl_config_path": "<(nodedir)/deps/openssl/config/piii"
}
}
],
[
"target_arch=='x64'", {
"variables": {
"openssl_config_path": "<(nodedir)/deps/openssl/config/k8"
},
}
],
[
"target_arch=='arm'", {
"variables": {
"openssl_config_path": "<(nodedir)/deps/openssl/config/arm"
}
}
],
],
"include_dirs": [
"<(nodedir)/deps/openssl/openssl/include",
"<(openssl_config_path)"
]
}
]
]
}]
}
| 24.565517
| 78
| 0.3863
|
0e6a425e045c083ae9d336bcfa90b6a0e157b575
| 3,573
|
py
|
Python
|
skip/serializers/v1/serializers.py
|
LCOGT/skip
|
2524ba71c39876aae8a31fff3de55e6cb7aa1f83
|
[
"BSD-3-Clause"
] | null | null | null |
skip/serializers/v1/serializers.py
|
LCOGT/skip
|
2524ba71c39876aae8a31fff3de55e6cb7aa1f83
|
[
"BSD-3-Clause"
] | 4
|
2020-09-10T20:31:54.000Z
|
2022-02-27T18:40:23.000Z
|
skip/serializers/v1/serializers.py
|
scimma/skip
|
aa9437d8c4f7d5edbffaec20e6651339241bbb95
|
[
"BSD-3-Clause"
] | null | null | null |
from astropy.coordinates import Angle
from astropy import units
from skip.models import Alert, Topic
from rest_framework import serializers
class AlertSerializer(serializers.ModelSerializer):
alert_identifier = serializers.CharField(source='identifier')
alert_timestamp = serializers.CharField(source='timestamp')
message = serializers.JSONField(source='parsed_message')
extracted_fields = serializers.SerializerMethodField()
right_ascension = serializers.SerializerMethodField()
right_ascension_sexagesimal = serializers.SerializerMethodField()
declination = serializers.SerializerMethodField()
declination_sexagesimal = serializers.SerializerMethodField()
topic = serializers.SerializerMethodField()
class Meta:
model = Alert
fields = [
'id',
'alert_identifier',
'alert_timestamp',
'topic',
'right_ascension',
'declination',
'right_ascension_sexagesimal',
'declination_sexagesimal',
'extracted_fields',
'message',
'created',
'modified']
def get_extracted_fields(self, obj):
extracted_fields = {}
event = obj.events.all()
if event.count() > 0:
event_attributes_list = event.first().eventattributes_set.order_by('-sequence_number')
if event_attributes_list.count() > 0:
event_attributes = event_attributes_list.first()
extracted_fields['BBH'] = event_attributes.attributes.get('prob_bbh', '')
extracted_fields['BNS'] = event_attributes.attributes.get('prob_bns', '')
extracted_fields['FAR'] = event_attributes.attributes.get('far', '')
extracted_fields['NSBH'] = event_attributes.attributes.get('prob_nsbh', '')
extracted_fields['HasNS'] = event_attributes.attributes.get('prob_ns', '')
extracted_fields['MassGap'] = event_attributes.attributes.get('prob_massgap', '')
extracted_fields['area_50'] = event_attributes.attributes.get('area_50', '')
extracted_fields['area_90'] = event_attributes.attributes.get('area_90', '')
extracted_fields['HasRemnant'] = event_attributes.attributes.get('prob_remnant', '')
extracted_fields['Instruments'] = ''
extracted_fields['Terrestrial'] = event_attributes.attributes.get('prob_terres', '')
version = obj.parsed_message.get('notice_type', '') + ' ' + obj.parsed_message.get('sequence_num', '')
extracted_fields['data_version'] = version
return extracted_fields
def get_right_ascension(self, obj):
if obj.coordinates:
return obj.coordinates.x
def get_declination(self, obj):
if obj.coordinates:
return obj.coordinates.y
def get_right_ascension_sexagesimal(self, obj):
if obj.coordinates:
a = Angle(obj.coordinates.x, unit=units.degree)
return a.to_string(unit=units.hour, sep=':')
def get_declination_sexagesimal(self, obj):
if obj.coordinates:
a = Angle(obj.coordinates.y, unit=units.degree)
return a.to_string(unit=units.degree, sep=':')
def get_topic(self, obj):
return Topic.objects.get(pk=obj.topic.id).name
class TopicSerializer(serializers.ModelSerializer):
class Meta:
model = Topic
fields = ['id', 'name']
| 43.048193
| 118
| 0.631962
|
547c2e607c8e1523fe5f11af958ca426ca45a3c8
| 445
|
py
|
Python
|
setup.py
|
sagar30051991/helpdesk
|
d6b8e29ba70464c8ecbe5af9bb2ffcf3c3ab8610
|
[
"MIT"
] | 1
|
2017-09-12T17:03:38.000Z
|
2017-09-12T17:03:38.000Z
|
setup.py
|
sagar30051991/helpdesk
|
d6b8e29ba70464c8ecbe5af9bb2ffcf3c3ab8610
|
[
"MIT"
] | null | null | null |
setup.py
|
sagar30051991/helpdesk
|
d6b8e29ba70464c8ecbe5af9bb2ffcf3c3ab8610
|
[
"MIT"
] | 3
|
2016-06-03T11:21:40.000Z
|
2017-09-12T17:03:39.000Z
|
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
import os
version = '0.0.1'
with open("requirements.txt", "r") as f:
install_requires = f.readlines()
setup(
name='helpdesk',
version=version,
description='helpdesk',
author='helpdesk',
author_email='makarand.b@indictranstech.com',
packages=find_packages(),
zip_safe=False,
include_package_data=True,
install_requires=install_requires
)
| 21.190476
| 49
| 0.698876
|
1be784a8369c06aa9ff1c7ec776c634a56078a00
| 574
|
py
|
Python
|
tripaille/commands/feature/delete_orphans.py
|
mboudet/python-tripal
|
c1d0128b16b7c0d3794e217993b053bf0fb64a71
|
[
"MIT"
] | 3
|
2017-09-08T15:16:07.000Z
|
2019-07-18T12:00:18.000Z
|
tripaille/commands/feature/delete_orphans.py
|
abretaud/python-tripal
|
9988f5a648a40be21d2ae553b2eda91c15fa881e
|
[
"MIT"
] | 9
|
2018-04-21T14:33:50.000Z
|
2019-02-08T09:59:32.000Z
|
tripaille/commands/feature/delete_orphans.py
|
abretaud/python-tripal
|
9988f5a648a40be21d2ae553b2eda91c15fa881e
|
[
"MIT"
] | 2
|
2018-09-04T13:00:40.000Z
|
2018-12-04T14:59:33.000Z
|
import click
from tripaille.cli import pass_context
from tripaille.decorators import custom_exception, str_output
@click.command('delete_orphans')
@click.option(
"--job_name",
help="Name of the job",
type=str
)
@click.option(
"--no_wait",
help="Return immediately without waiting for job completion",
is_flag=True
)
@pass_context
@custom_exception
@str_output
def cli(ctx, job_name="", no_wait=""):
"""Delete orphans Drupal feature nodes
Output:
status
"""
return ctx.gi.feature.delete_orphans(job_name=job_name, no_wait=no_wait)
| 20.5
| 76
| 0.722997
|
7a151c62700c85b03fb4fe0dee3f57f53297e2c0
| 5,903
|
py
|
Python
|
mmocr/models/textdet/losses/bs_loss_tb.py
|
zzx0226/mmocr
|
50354895244339a392b4f1af5a35963883923cca
|
[
"Apache-2.0"
] | null | null | null |
mmocr/models/textdet/losses/bs_loss_tb.py
|
zzx0226/mmocr
|
50354895244339a392b4f1af5a35963883923cca
|
[
"Apache-2.0"
] | null | null | null |
mmocr/models/textdet/losses/bs_loss_tb.py
|
zzx0226/mmocr
|
50354895244339a392b4f1af5a35963883923cca
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
import torch.nn.functional as F
from mmdet.core import multi_apply
from torch import nn
from mmocr.models.builder import LOSSES
@LOSSES.register_module()
class BSLoss_tb(nn.Module):
"""The class for implementing FCENet loss.
FCENet(CVPR2021): `Fourier Contour Embedding for Arbitrary-shaped Text
Detection <https://arxiv.org/abs/2104.10442>`_
Args:
fourier_degree (int) : The maximum Fourier transform degree k.
num_sample (int) : The sampling points number of regression
loss. If it is too small, fcenet tends to be overfitting.
ohem_ratio (float): the negative/positive ratio in OHEM.
"""
def __init__(self, bs_degree, cp_num, ohem_ratio=3.):
super().__init__()
self.bs_degree = bs_degree
self.cp_num = cp_num
self.ohem_ratio = ohem_ratio
def forward(self, preds, _, p3_maps, p4_maps, p5_maps):
"""Compute FCENet loss.
Args:
preds (list[list[Tensor]]): The outer list indicates images
in a batch, and the inner list indicates the classification
prediction map (with shape :math:`(N, C, H, W)`) and
regression map (with shape :math:`(N, C, H, W)`).
p3_maps (list[ndarray]): List of leval 3 ground truth target map
with shape :math:`(C, H, W)`.
p4_maps (list[ndarray]): List of leval 4 ground truth target map
with shape :math:`(C, H, W)`.
p5_maps (list[ndarray]): List of leval 5 ground truth target map
with shape :math:`(C, H, W)`.
Returns:
dict: A loss dict with ``loss_text``, ``loss_center``,
``loss_reg_x`` and ``loss_reg_y``.
"""
assert isinstance(preds, list)
# assert p3_maps[0].shape[0] == 4 * self.fourier_degree + 5,\
# 'fourier degree not equal in FCEhead and FCEtarget'
device = preds[0][0].device
# to tensor
gts = [p3_maps, p4_maps, p5_maps]
for idx, maps in enumerate(gts):
gts[idx] = torch.from_numpy(np.stack(maps)).float().to(device)
losses = multi_apply(self.forward_single, preds, gts)
loss_tr = torch.tensor(0., device=device).float()
loss_tcl = torch.tensor(0., device=device).float()
loss_reg_x = torch.tensor(0., device=device).float()
loss_reg_y = torch.tensor(0., device=device).float()
for idx, loss in enumerate(losses):
if idx == 0:
loss_tr += sum(loss)
elif idx == 1:
loss_tcl += sum(loss)
elif idx == 2:
loss_reg_x += sum(loss)
else:
loss_reg_y += sum(loss)
results = dict(
loss_text=loss_tr,
loss_center=loss_tcl,
loss_reg_x=loss_reg_x,
loss_reg_y=loss_reg_y,
)
return results
def forward_single(self, pred, gt):
cls_pred = pred[0].permute(0, 2, 3, 1).contiguous()
reg_pred = pred[1].permute(0, 2, 3, 1).contiguous()
gt = gt.permute(0, 2, 3, 1).contiguous()
k = self.cp_num * 2
tr_pred = cls_pred[:, :, :, :2].view(-1, 2)
tcl_pred = cls_pred[:, :, :, 2:].view(-1, 2)
x_pred = reg_pred[:, :, :, 0:k].view(-1, k)
y_pred = reg_pred[:, :, :, k:].view(-1, k)
tr_mask = gt[:, :, :, :1].view(-1)
tcl_mask = gt[:, :, :, 1:2].view(-1)
train_mask = gt[:, :, :, 2:3].view(-1)
x_map = gt[:, :, :, 3:3 + k].view(-1, k)
y_map = gt[:, :, :, 3 + k:].view(-1, k)
tr_train_mask = train_mask * tr_mask
device = x_map.device
# tr loss
loss_tr = self.ohem(tr_pred, tr_mask.long(), train_mask.long())
# tcl loss
loss_tcl = torch.tensor(0.).float().to(device)
tr_neg_mask = 1 - tr_train_mask
if tr_train_mask.sum().item() > 0:
loss_tcl_pos = F.cross_entropy(tcl_pred[tr_train_mask.bool()], tcl_mask[tr_train_mask.bool()].long())
loss_tcl_neg = F.cross_entropy(tcl_pred[tr_neg_mask.bool()], tcl_mask[tr_neg_mask.bool()].long())
loss_tcl = loss_tcl_pos + 0.5 * loss_tcl_neg
# regression loss
loss_reg_x = torch.tensor(0.).float().to(device)
loss_reg_y = torch.tensor(0.).float().to(device)
if tr_train_mask.sum().item() > 0:
weight = (tr_mask[tr_train_mask.bool()].float() + tcl_mask[tr_train_mask.bool()].float()) / 2 # 10
weight = weight.contiguous().view(-1, 1)
loss_reg_x = torch.mean(
weight *
F.smooth_l1_loss(x_map[tr_train_mask.bool()], x_pred[tr_train_mask.bool()], reduction='none')) # / scale
loss_reg_y = torch.mean(
weight *
F.smooth_l1_loss(y_map[tr_train_mask.bool()], y_pred[tr_train_mask.bool()], reduction='none')) # / scale
return loss_tr, loss_tcl, loss_reg_x, loss_reg_y
def ohem(self, predict, target, train_mask):
device = train_mask.device
pos = (target * train_mask).bool()
neg = ((1 - target) * train_mask).bool()
n_pos = pos.float().sum()
if n_pos.item() > 0:
loss_pos = F.cross_entropy(predict[pos], target[pos], reduction='sum')
loss_neg = F.cross_entropy(predict[neg], target[neg], reduction='none')
n_neg = min(int(neg.float().sum().item()), int(self.ohem_ratio * n_pos.float()))
else:
loss_pos = torch.tensor(0.).to(device)
loss_neg = F.cross_entropy(predict[neg], target[neg], reduction='none')
n_neg = 100
if len(loss_neg) > n_neg:
loss_neg, _ = torch.topk(loss_neg, n_neg)
return (loss_pos + loss_neg.sum()) / (n_pos + n_neg).float()
| 38.835526
| 121
| 0.571066
|
060025f16c04bb4d9f51088613da854542257688
| 1,080
|
py
|
Python
|
manage_versions/settings.py
|
zilohumberto/PySvn-Handle-Delta-Deploy
|
197a55ebda688c0f480bd3bb72db7e5e8bd163f9
|
[
"Unlicense"
] | null | null | null |
manage_versions/settings.py
|
zilohumberto/PySvn-Handle-Delta-Deploy
|
197a55ebda688c0f480bd3bb72db7e5e8bd163f9
|
[
"Unlicense"
] | null | null | null |
manage_versions/settings.py
|
zilohumberto/PySvn-Handle-Delta-Deploy
|
197a55ebda688c0f480bd3bb72db7e5e8bd163f9
|
[
"Unlicense"
] | null | null | null |
import codecs
import json
#all vars for default to start!
class svnSettings:
count = 2
dir_local = []
dir_server = []
concurrent_id = 0
dir_toGenerate = []
last_revison = []
config = None
source_config_file = "C:\Desarrollo-Practica\PySvn\manage_versions\config.json"
def load_settings(self,):
configFile = open(
self.source_config_file).read()
self.config = json.loads(configFile)
self.change_settings()
def change_settings(self,):
self.count = len(self.config)
for i in range(0,len(self.config)):
self.dir_server.append(self.config[i]["source_server"])
self.dir_local.append(self.config[i]["source_local"])
self.dir_toGenerate.append(self.config[i]["destination"])
self.last_revison.append(int(self.config[i]["last_revision"]))
def save_settings(self):
with open(self.source_config_file, 'w') as fp:
json.dump(self.config, fp)
def generate_info(self):
pass
| 31.764706
| 84
| 0.612037
|
18523b5118c989f63c92949de9a0fc3d3f08a849
| 40,248
|
py
|
Python
|
circup.py
|
jepler/circup
|
751578094b9e79410fb01805a4a51f14b2ee368a
|
[
"MIT",
"MIT-0",
"Unlicense"
] | null | null | null |
circup.py
|
jepler/circup
|
751578094b9e79410fb01805a4a51f14b2ee368a
|
[
"MIT",
"MIT-0",
"Unlicense"
] | null | null | null |
circup.py
|
jepler/circup
|
751578094b9e79410fb01805a4a51f14b2ee368a
|
[
"MIT",
"MIT-0",
"Unlicense"
] | null | null | null |
# SPDX-FileCopyrightText: 2019 Nicholas Tollervey, written for Adafruit Industries
#
# SPDX-License-Identifier: MIT
"""
CircUp -- a utility to manage and update libraries on a CircuitPython device.
"""
import ctypes
import glob
import json
import logging
import os
from pathlib import Path
import re
import shutil
from subprocess import check_output
import sys
import zipfile
import appdirs
import click
import requests
from semver import VersionInfo
# Useful constants.
#: The unique USB vendor ID for Adafruit boards.
VENDOR_ID = 9114
#: Flag to indicate if the command is being run in verbose mode.
VERBOSE = False
#: The location of data files used by circup (following OS conventions).
DATA_DIR = appdirs.user_data_dir(appname="circup", appauthor="adafruit")
#: The path to the JSON file containing the metadata about the current bundle.
BUNDLE_DATA = os.path.join(DATA_DIR, "circup.json")
#: The path to the zip file containing the current library bundle.
BUNDLE_ZIP = os.path.join(DATA_DIR, "adafruit-circuitpython-bundle-{}.zip")
#: The path to the directory into which the current bundle is unzipped.
BUNDLE_DIR = os.path.join(DATA_DIR, "adafruit_circuitpython_bundle_{}")
#: The directory containing the utility's log file.
LOG_DIR = appdirs.user_log_dir(appname="circup", appauthor="adafruit")
#: The location of the log file for the utility.
LOGFILE = os.path.join(LOG_DIR, "circup.log")
#: The libraries (and blank lines) which don't go on devices
NOT_MCU_LIBRARIES = [
"",
"adafruit-blinka",
"adafruit-blinka-bleio",
"adafruit-blinka-displayio",
"pyserial",
]
#: The version of CircuitPython found on the connected device.
CPY_VERSION = ""
#: The latest version of the CircuitPython Bundle from github.
LATEST_BUNDLE_VERSION = ""
# Ensure DATA_DIR / LOG_DIR related directories and files exist.
if not os.path.exists(DATA_DIR): # pragma: no cover
os.makedirs(DATA_DIR)
if not os.path.exists(LOG_DIR): # pragma: no cover
os.makedirs(LOG_DIR)
# Setup logging.
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
logfile_handler = logging.FileHandler(LOGFILE)
log_formatter = logging.Formatter(
"%(asctime)s %(levelname)s: %(message)s", datefmt="%m/%d/%Y %H:%M:%S"
)
logfile_handler.setFormatter(log_formatter)
logger.addHandler(logfile_handler)
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/adafruit/circup.git"
class Module:
"""
Represents a CircuitPython module.
"""
# pylint: disable=too-many-arguments
def __init__(self, path, repo, device_version, bundle_version, mpy):
"""
The ``self.file`` and ``self.name`` attributes are constructed from
the ``path`` value. If the path is to a directory based module, the
resulting self.file value will be None, and the name will be the
basename of the directory path.
:param str path: The path to the module on the connected
CIRCUITPYTHON device.
:param str repo: The URL of the Git repository for this module.
:param str device_version: The semver value for the version on device.
:param str bundle_version: The semver value for the version in bundle.
:param bool mpy: Flag to indicate if the module is byte-code compiled.
"""
self.path = path
if os.path.isfile(self.path):
# Single file module.
self.file = os.path.basename(path)
self.name = self.file.replace(".py", "").replace(".mpy", "")
else:
# Directory based module.
self.file = None
self.name = os.path.basename(os.path.dirname(self.path))
self.repo = repo
self.device_version = device_version
self.bundle_version = bundle_version
self.mpy = mpy
# Figure out the bundle path.
self.bundle_path = None
if self.mpy:
# Byte compiled, now check CircuitPython version.
major_version = CPY_VERSION.split(".")[0]
bundle_platform = "{}mpy".format(major_version)
else:
# Regular Python
bundle_platform = "py"
for search_path, _, _ in os.walk(BUNDLE_DIR.format(bundle_platform)):
if os.path.basename(search_path) == "lib":
if self.file:
self.bundle_path = os.path.join(search_path, self.file)
else:
self.bundle_path = os.path.join(search_path, self.name)
logger.info(self)
# pylint: enable=too-many-arguments
@property
def outofdate(self):
"""
Returns a boolean to indicate if this module is out of date.
:return: Truthy indication if the module is out of date.
"""
if self.device_version and self.bundle_version:
try:
return VersionInfo.parse(self.device_version) < VersionInfo.parse(
self.bundle_version
)
except ValueError as ex:
logger.warning("Module '%s' has incorrect semver value.", self.name)
logger.warning(ex)
return True # Assume out of date to try to update.
@property
def major_update(self):
"""
Returns a boolean to indicate if this is a major version update.
:return: Boolean indicating if this is a major version upgrade
"""
try:
if (
VersionInfo.parse(self.device_version).major
== VersionInfo.parse(self.bundle_version).major
):
return False
except (TypeError, ValueError) as ex:
logger.warning("Module '%s' has incorrect semver value.", self.name)
logger.warning(ex)
return True # Assume Major Version udpate.
@property
def row(self):
"""
Returns a tuple of items to display in a table row to show the module's
name, local version and remote version.
:return: A tuple containing the module's name, version on the connected
device and version in the latest bundle.
"""
loc = self.device_version if self.device_version else "unknown"
rem = self.bundle_version if self.bundle_version else "unknown"
major_update = str(self.major_update)
return (self.name, loc, rem, major_update)
def update(self):
"""
Delete the module on the device, then copy the module from the bundle
back onto the device.
The caller is expected to handle any exceptions raised.
"""
if os.path.isdir(self.path):
# Delete and copy the directory.
shutil.rmtree(self.path, ignore_errors=True)
shutil.copytree(self.bundle_path, self.path)
else:
# Delete and copy file.
os.remove(self.path)
shutil.copyfile(self.bundle_path, self.path)
def __repr__(self):
"""
Helps with log files.
:return: A repr of a dictionary containing the module's metadata.
"""
return repr(
{
"path": self.path,
"file": self.file,
"name": self.name,
"repo": self.repo,
"device_version": self.device_version,
"bundle_version": self.bundle_version,
"bundle_path": self.bundle_path,
"mpy": self.mpy,
}
)
def clean_library_name(assumed_library_name):
"""
Most CP repos and library names are look like this:
repo: Adafruit_CircuitPython_LC709203F
library: adafruit_lc709203f
But some do not and this handles cleaning that up.
Also cleans up if the pypi or reponame is passed in instead of the
CP library name.
:param str assumed_library_name: An assumed name of a library from user
or requirements.txt entry
:return: str proper library name
"""
not_standard_names = {
# Assumed Name : Actual Name
"adafruit_adafruitio": "adafruit_io",
"adafruit_busdevice": "adafruit_bus_device",
"adafruit_neopixel": "neopixel",
"adafruit_sd": "adafruit_sdcard",
"adafruit_simpleio": "simpleio",
}
if "circuitpython" in assumed_library_name:
# convert repo or pypi name to common library name
assumed_library_name = (
assumed_library_name.replace("-circuitpython-", "_")
.replace("_circuitpython_", "_")
.replace("-", "_")
)
if assumed_library_name in not_standard_names.keys():
return not_standard_names[assumed_library_name]
return assumed_library_name
def ensure_latest_bundle():
"""
Ensure that there's a copy of the latest library bundle available so circup
can check the metadata contained therein.
"""
logger.info("Checking for library updates.")
tag = get_latest_tag()
old_tag = "0"
if os.path.isfile(BUNDLE_DATA):
with open(BUNDLE_DATA, encoding="utf-8") as data:
try:
old_tag = json.load(data)["tag"]
except json.decoder.JSONDecodeError as ex:
# Sometimes (why?) the JSON file becomes corrupt. In which case
# log it and carry on as if setting up for first time.
logger.error("Could not parse %s", BUNDLE_DATA)
logger.exception(ex)
if tag > old_tag:
logger.info("New version available (%s).", tag)
try:
get_bundle(tag)
with open(BUNDLE_DATA, "w", encoding="utf-8") as data:
json.dump({"tag": tag}, data)
except requests.exceptions.HTTPError as ex:
# See #20 for reason this this
click.secho(
(
"There was a problem downloading the bundle. "
"Please try again in a moment."
),
fg="red",
)
logger.exception(ex)
sys.exit(1)
else:
logger.info("Current library bundle up to date %s.", tag)
def extract_metadata(path):
"""
Given an file path, return a dictionary containing metadata extracted from
dunder attributes found therein. Works with both .py and .mpy files.
For Python source files, such metadata assignments should be simple and
single-line. For example::
__version__ = "1.1.4"
__repo__ = "https://github.com/adafruit/SomeLibrary.git"
For byte compiled .mpy files, a brute force / backtrack approach is used
to find the __version__ number in the file -- see comments in the
code for the implementation details.
:param str path: The path to the file containing the metadata.
:return: The dunder based metadata found in the file, as a dictionary.
"""
result = {}
logger.info("%s", path)
if path.endswith(".py"):
result["mpy"] = False
with open(path, encoding="utf-8") as source_file:
content = source_file.read()
#: The regex used to extract ``__version__`` and ``__repo__`` assignments.
dunder_key_val = r"""(__\w+__)\s*=\s*(?:['"]|\(\s)(.+)['"]"""
for match in re.findall(dunder_key_val, content):
result[match[0]] = str(match[1])
if result:
logger.info("Extracted metadata: %s", result)
return result
if path.endswith(".mpy"):
result["mpy"] = True
with open(path, "rb") as mpy_file:
content = mpy_file.read()
# Find the start location of the "__version__" (prepended with byte
# value of 11 to indicate length of "__version__").
loc = content.find(b"\x0b__version__")
if loc > -1:
# Backtrack until a byte value of the offset is reached.
offset = 1
while offset < loc:
val = int(content[loc - offset])
if val == offset - 1: # Off by one..!
# Found version, extract the number given boundaries.
start = loc - offset + 1 # No need for prepended length.
end = loc # Up to the start of the __version__.
version = content[start:end] # Slice the version number.
# Create a string version as metadata in the result.
result = {"__version__": version.decode("utf-8"), "mpy": True}
break # Nothing more to do.
offset += 1 # ...and again but backtrack by one.
return result
def find_device():
"""
Return the location on the filesystem for the connected Adafruit device.
This is based upon how Mu discovers this information.
:return: The path to the device on the local filesystem.
"""
device_dir = None
# Attempt to find the path on the filesystem that represents the plugged in
# CIRCUITPY board.
if os.name == "posix":
# Linux / OSX
for mount_command in ["mount", "/sbin/mount"]:
try:
mount_output = check_output(mount_command).splitlines()
mounted_volumes = [x.split()[2] for x in mount_output]
for volume in mounted_volumes:
if volume.endswith(b"CIRCUITPY"):
device_dir = volume.decode("utf-8")
except FileNotFoundError:
continue
elif os.name == "nt":
# Windows
def get_volume_name(disk_name):
"""
Each disk or external device connected to windows has an attribute
called "volume name". This function returns the volume name for the
given disk/device.
Based upon answer given here: http://stackoverflow.com/a/12056414
"""
vol_name_buf = ctypes.create_unicode_buffer(1024)
ctypes.windll.kernel32.GetVolumeInformationW(
ctypes.c_wchar_p(disk_name),
vol_name_buf,
ctypes.sizeof(vol_name_buf),
None,
None,
None,
None,
0,
)
return vol_name_buf.value
#
# In certain circumstances, volumes are allocated to USB
# storage devices which cause a Windows popup to raise if their
# volume contains no media. Wrapping the check in SetErrorMode
# with SEM_FAILCRITICALERRORS (1) prevents this popup.
#
old_mode = ctypes.windll.kernel32.SetErrorMode(1)
try:
for disk in "ABCDEFGHIJKLMNOPQRSTUVWXYZ":
path = "{}:\\".format(disk)
if os.path.exists(path) and get_volume_name(path) == "CIRCUITPY":
device_dir = path
# Report only the FIRST device found.
break
finally:
ctypes.windll.kernel32.SetErrorMode(old_mode)
else:
# No support for unknown operating systems.
raise NotImplementedError('OS "{}" not supported.'.format(os.name))
logger.info("Found device: %s", device_dir)
return device_dir
def find_modules(device_path):
"""
Extracts metadata from the connected device and available bundle and
returns this as a list of Module instances representing the modules on the
device.
:return: A list of Module instances describing the current state of the
modules on the connected device.
"""
# pylint: disable=broad-except
try:
device_modules = get_device_versions(device_path)
bundle_modules = get_bundle_versions()
result = []
for name, device_metadata in device_modules.items():
if name in bundle_modules:
bundle_metadata = bundle_modules[name]
path = device_metadata["path"]
repo = bundle_metadata.get("__repo__")
device_version = device_metadata.get("__version__")
bundle_version = bundle_metadata.get("__version__")
mpy = device_metadata["mpy"]
result.append(Module(path, repo, device_version, bundle_version, mpy))
return result
except Exception as ex:
# If it's not possible to get the device and bundle metadata, bail out
# with a friendly message and indication of what's gone wrong.
logger.exception(ex)
click.echo("There was a problem: {}".format(ex))
sys.exit(1)
# pylint: enable=broad-except
def get_bundle(tag):
"""
Downloads and extracts the version of the bundle with the referenced tag.
:param str tag: The GIT tag to use to download the bundle.
:return: The location of the resulting zip file in a temporary location on
the local filesystem.
"""
urls = {
"py": (
"https://github.com/adafruit/Adafruit_CircuitPython_Bundle"
"/releases/download"
"/{tag}/adafruit-circuitpython-bundle-py-{tag}.zip".format(tag=tag)
),
"6mpy": (
"https://github.com/adafruit/Adafruit_CircuitPython_Bundle/"
"releases/download"
"/{tag}/adafruit-circuitpython-bundle-6.x-mpy-{tag}.zip".format(tag=tag)
),
"7mpy": (
"https://github.com/adafruit/Adafruit_CircuitPython_Bundle/"
"releases/download"
"/{tag}/adafruit-circuitpython-bundle-7.x-mpy-{tag}.zip".format(tag=tag)
),
}
click.echo("Downloading latest version information.\n")
for platform, url in urls.items():
logger.info("Downloading bundle: %s", url)
r = requests.get(url, stream=True)
# pylint: disable=no-member
if r.status_code != requests.codes.ok:
logger.warning("Unable to connect to %s", url)
r.raise_for_status()
# pylint: enable=no-member
total_size = int(r.headers.get("Content-Length"))
temp_zip = BUNDLE_ZIP.format(platform)
with click.progressbar(r.iter_content(1024), length=total_size) as pbar, open(
temp_zip, "wb"
) as f:
for chunk in pbar:
f.write(chunk)
pbar.update(len(chunk))
logger.info("Saved to %s", temp_zip)
temp_dir = BUNDLE_DIR.format(platform)
if os.path.isdir(temp_dir):
shutil.rmtree(temp_dir)
with zipfile.ZipFile(temp_zip, "r") as zfile:
zfile.extractall(temp_dir)
click.echo("\nOK\n")
def get_bundle_versions():
"""
Returns a dictionary of metadata from modules in the latest known release
of the library bundle. Uses the Python version (rather than the compiled
version) of the library modules.
:return: A dictionary of metadata about the modules available in the
library bundle.
"""
ensure_latest_bundle()
path = None
for path, _, _ in os.walk(BUNDLE_DIR.format("py")):
if os.path.basename(path) == "lib":
break
return get_modules(path)
def get_circuitpython_version(device_path):
"""
Returns the version number of CircuitPython running on the board connected
via ``device_path``. This is obtained from the ``boot_out.txt`` file on the
device, whose content will start with something like this::
Adafruit CircuitPython 4.1.0 on 2019-08-02;
:param str device_path: The path to the connected board.
:return: The version string for CircuitPython running on the connected
board.
"""
with open(os.path.join(device_path, "boot_out.txt")) as boot:
circuit_python, _ = boot.read().split(";")
return circuit_python.split(" ")[-3]
def get_dependencies(*requested_libraries, mod_names, to_install=()):
"""
Return a list of other CircuitPython libraries
:param tuple requested_libraries: The libraries to search for dependencies
:param object mod_names: All the modules metadata from bundle
:return: tuple of module names to install which we build
"""
# Internal variables
_to_install = to_install
_requested_libraries = []
_rl = requested_libraries[0]
if not requested_libraries[0]:
# If nothing is requested, we're done
return _to_install
for l in _rl:
# Convert tuple to list and force all to lowercase, Clean the names
l = clean_library_name(l.lower())
if l in NOT_MCU_LIBRARIES:
logger.info("Skipping %s. It is not for microcontroller installs.", l)
else:
try:
# Don't process any names we can't find in mod_names
mod_names[l] # pylint: disable=pointless-statement
_requested_libraries.append(l)
except KeyError:
click.secho(
f"WARNING:\n\t{l} is not a known CircuitPython library.",
fg="yellow",
)
if not _requested_libraries:
# If nothing is requested, we're done
return _to_install
for library in _requested_libraries:
if library not in _to_install:
_to_install = _to_install + (library,)
# get the requirements.txt from bundle
requirements_txt = get_requirements(library)
if requirements_txt:
_requested_libraries.extend(
libraries_from_requirements(requirements_txt)
)
# we've processed this library, remove it from the list
_requested_libraries.remove(library)
return get_dependencies(
tuple(_requested_libraries),
mod_names=mod_names,
to_install=_to_install,
)
def get_device_versions(device_path):
"""
Returns a dictionary of metadata from modules on the connected device.
:return: A dictionary of metadata about the modules available on the
connected device.
"""
return get_modules(os.path.join(device_path, "lib"))
def get_latest_release_from_url(url):
"""
Find the tag name of the latest release by using HTTP HEAD and decoding the redirect.
:return: The most recent tag value for the release.
"""
logger.info("Requesting redirect information: %s", url)
response = requests.head(url)
responseurl = response.url
if response.is_redirect:
responseurl = response.headers["Location"]
tag = responseurl.rsplit("/", 1)[-1]
logger.info("Tag: '%s'", tag)
return tag
def get_latest_tag():
"""
Find the value of the latest tag for the Adafruit CircuitPython library
bundle.
:return: The most recent tag value for the project.
"""
global LATEST_BUNDLE_VERSION
if LATEST_BUNDLE_VERSION == "":
LATEST_BUNDLE_VERSION = get_latest_release_from_url(
"https://github.com/adafruit/Adafruit_CircuitPython_Bundle/releases/latest"
)
return LATEST_BUNDLE_VERSION
def get_modules(path):
"""
Get a dictionary containing metadata about all the Python modules found in
the referenced path.
:param str path: The directory in which to find modules.
:return: A dictionary containing metadata about the found modules.
"""
result = {}
if not path:
return result
single_file_py_mods = glob.glob(os.path.join(path, "*.py"))
single_file_mpy_mods = glob.glob(os.path.join(path, "*.mpy"))
directory_mods = [
d
for d in glob.glob(os.path.join(path, "*", ""))
if not os.path.basename(os.path.normpath(d)).startswith(".")
]
single_file_mods = single_file_py_mods + single_file_mpy_mods
for sfm in [f for f in single_file_mods if not os.path.basename(f).startswith(".")]:
metadata = extract_metadata(sfm)
metadata["path"] = sfm
result[os.path.basename(sfm).replace(".py", "").replace(".mpy", "")] = metadata
for dm in directory_mods:
name = os.path.basename(os.path.dirname(dm))
metadata = {}
py_files = glob.glob(os.path.join(dm, "*.py"))
mpy_files = glob.glob(os.path.join(dm, "*.mpy"))
all_files = py_files + mpy_files
for source in [f for f in all_files if not os.path.basename(f).startswith(".")]:
metadata = extract_metadata(source)
if "__version__" in metadata:
metadata["path"] = dm
result[name] = metadata
break
else:
# No version metadata found.
result[name] = {"path": dm, "mpy": bool(mpy_files)}
return result
def get_requirements(library_name):
"""
Return a string of the requirements.txt for a GitHub Repo
NOTE: This is only looks at the py bundle. No known differences in the mpy
bundle for requirements.txt
:param str library_name: CircuitPython library name
:return: str the content of requirements.txt or None if not found
"""
bundle_path = BUNDLE_DIR.format("py")
requirements_txt = (
"{}/adafruit-circuitpython-bundle-py-{}/requirements/{}/"
"requirements.txt".format(bundle_path, get_latest_tag(), library_name)
)
if Path(requirements_txt).is_file():
return open(requirements_txt).read()
return None
# pylint: disable=too-many-locals,too-many-branches
def install_module(device_path, name, py, mod_names): # pragma: no cover
"""
Finds a connected device and installs a given module name if it
is available in the current module bundle and is not already
installed on the device.
TODO: There is currently no check for the version.
:param str device_path: The path to the connected board.
:param str name: Name of module to install
:param bool py: Boolean to specify if the module should be installed from
source or from a pre-compiled module
:param mod_names: Dictionary of metadata from modules that can be generated
with get_bundle_versions()
"""
if not name:
click.echo("No module name(s) provided.")
elif name in mod_names:
library_path = os.path.join(device_path, "lib")
if not os.path.exists(library_path): # pragma: no cover
os.makedirs(library_path)
metadata = mod_names[name]
# Grab device modules to check if module already installed
device_modules = []
for module in find_modules(device_path):
device_modules.append(module.name)
if name in device_modules:
click.echo("'{}' is already installed.".format(name))
return
if py:
# Use Python source for module.
source_path = metadata["path"] # Path to Python source version.
if os.path.isdir(source_path):
target = os.path.basename(os.path.dirname(source_path))
target_path = os.path.join(library_path, target)
# Copy the directory.
shutil.copytree(source_path, target_path)
else:
target = os.path.basename(source_path)
target_path = os.path.join(library_path, target)
# Copy file.
shutil.copyfile(source_path, target_path)
else:
# Use pre-compiled mpy modules.
module_name = os.path.basename(metadata["path"]).replace(".py", ".mpy")
if not module_name:
# Must be a directory based module.
module_name = os.path.basename(os.path.dirname(metadata["path"]))
major_version = CPY_VERSION.split(".")[0]
bundle_platform = "{}mpy".format(major_version)
bundle_path = ""
for path, _, _ in os.walk(BUNDLE_DIR.format(bundle_platform)):
if os.path.basename(path) == "lib":
bundle_path = os.path.join(path, module_name)
if bundle_path:
if os.path.isdir(bundle_path):
target_path = os.path.join(library_path, module_name)
# Copy the directory.
shutil.copytree(bundle_path, target_path)
else:
target = os.path.basename(bundle_path)
target_path = os.path.join(library_path, target)
# Copy file.
shutil.copyfile(bundle_path, target_path)
else:
raise IOError("Cannot find compiled version of module.")
click.echo("Installed '{}'.".format(name))
else:
click.echo("Unknown module named, '{}'.".format(name))
# pylint: enable=too-many-locals,too-many-branches
def libraries_from_requirements(requirements):
"""
Clean up supplied requirements.txt and turn into tuple of CP libraries
:param str requirements: A string version of a requirements.txt
:return: tuple of library names
"""
libraries = ()
for line in requirements.split("\n"):
line = line.lower().strip()
if line.startswith("#") or line == "":
# skip comments
pass
else:
if any(operators in line for operators in [">", "<", "="]):
# Remove everything after any pip style version specifiers
line = re.split("[<|>|=|]", line)[0]
libraries = libraries + (line,)
return libraries
# ----------- CLI command definitions ----------- #
# The following functions have IO side effects (for instance they emit to
# stdout). Ergo, these are not checked with unit tests. Most of the
# functionality they provide is provided by the functions above, which *are*
# tested. Most of the logic of the following functions is to prepare things for
# presentation to / interaction with the user.
@click.group()
@click.option(
"--verbose", is_flag=True, help="Comprehensive logging is sent to stdout."
)
@click.option(
"--path",
type=click.Path(exists=True, file_okay=False),
help="Path to CircuitPython directory. Overrides automatic path detection.",
)
@click.version_option(
prog_name="CircUp",
message="%(prog)s, A CircuitPython module updater. Version %(version)s",
)
@click.pass_context
def main(ctx, verbose, path): # pragma: no cover
"""
A tool to manage and update libraries on a CircuitPython device.
"""
ctx.ensure_object(dict)
if verbose:
# Configure additional logging to stdout.
global VERBOSE
VERBOSE = True
verbose_handler = logging.StreamHandler(sys.stdout)
verbose_handler.setLevel(logging.INFO)
verbose_handler.setFormatter(log_formatter)
logger.addHandler(verbose_handler)
click.echo("Logging to {}\n".format(LOGFILE))
logger.info("### Started Circup ###")
if path:
device_path = path
else:
device_path = find_device()
ctx.obj["DEVICE_PATH"] = device_path
if device_path is None:
click.secho("Could not find a connected Adafruit device.", fg="red")
sys.exit(1)
global CPY_VERSION
CPY_VERSION = get_circuitpython_version(device_path)
click.echo(
"Found device at {}, running CircuitPython {}.".format(device_path, CPY_VERSION)
)
latest_version = get_latest_release_from_url(
"https://github.com/adafruit/circuitpython/releases/latest"
)
try:
if VersionInfo.parse(CPY_VERSION) < VersionInfo.parse(latest_version):
click.secho(
"A newer version of CircuitPython ({}) is available.".format(
latest_version
),
fg="green",
)
except ValueError as ex:
logger.warning("CircuitPython has incorrect semver value.")
logger.warning(ex)
@main.command()
@click.option("-r", "--requirement", is_flag=True)
@click.pass_context
def freeze(ctx, requirement): # pragma: no cover
"""
Output details of all the modules found on the connected CIRCUITPYTHON
device. Option -r saves output to requirements.txt file
"""
logger.info("Freeze")
modules = find_modules(ctx.obj["DEVICE_PATH"])
if modules:
output = []
for module in modules:
output.append("{}=={}".format(module.name, module.device_version))
for module in output:
click.echo(module)
logger.info(module)
if requirement:
cwd = os.path.abspath(os.getcwd())
for i, module in enumerate(output):
output[i] += "\n"
with open(cwd + "/" + "requirements.txt", "w", newline="\n") as file:
file.truncate(0)
file.writelines(output)
else:
click.echo("No modules found on the device.")
@main.command()
@click.pass_context
def list(ctx): # pragma: no cover
"""
Lists all out of date modules found on the connected CIRCUITPYTHON device.
"""
logger.info("List")
# Grab out of date modules.
data = [("Module", "Version", "Latest", "Major Update")]
modules = [m.row for m in find_modules(ctx.obj["DEVICE_PATH"]) if m.outofdate]
if modules:
data += modules
# Nice tabular display.
col_width = [0, 0, 0, 0]
for row in data:
for i, word in enumerate(row):
col_width[i] = max(len(word) + 2, col_width[i])
dashes = tuple(("-" * (width - 1) for width in col_width))
data.insert(1, dashes)
click.echo(
"The following modules are out of date or probably need an update.\n"
"Major Updates may include breaking changes. Review before updating.\n"
)
for row in data:
output = ""
for index, cell in enumerate(row):
output += cell.ljust(col_width[index])
if not VERBOSE:
click.echo(output)
logger.info(output)
else:
click.echo("All modules found on the device are up to date.")
@main.command()
@click.argument("modules", required=False, nargs=-1)
@click.option("--py", is_flag=True)
@click.option("-r", "--requirement")
@click.pass_context
def install(ctx, modules, py, requirement): # pragma: no cover
"""
Install a named module(s) onto the device. Multiple modules
can be installed at once by providing more than one module name, each
separated by a space.
Option -r allows specifying a text file to install all modules listed in
the text file.
TODO: Ensure there's enough space on the device, work out the version of
CircuitPytho on the device in order to copy the appropriate .mpy versions
too. ;-)
"""
available_modules = get_bundle_versions()
mod_names = {}
for module, metadata in available_modules.items():
mod_names[module.replace(".py", "").lower()] = metadata
if requirement:
cwd = os.path.abspath(os.getcwd())
requirements_txt = open(cwd + "/" + requirement, "r").read()
requested_installs = sorted(libraries_from_requirements(requirements_txt))
else:
requested_installs = sorted(modules)
click.echo(f"Searching for dependencies for: {requested_installs}")
to_install = get_dependencies(requested_installs, mod_names=mod_names)
if to_install is not None:
to_install = sorted(to_install)
click.echo(f"Ready to install: {to_install}\n")
for library in to_install:
install_module(ctx.obj["DEVICE_PATH"], library, py, mod_names)
@click.argument("match", required=False, nargs=1)
@main.command()
def show(match): # pragma: no cover
"""
Show a list of available modules in the bundle. These are modules which
*could* be installed on the device.
If MATCH is specified only matching modules will be listed.
"""
available_modules = get_bundle_versions()
module_names = sorted([m.replace(".py", "") for m in available_modules])
if match is not None:
module_names = [m for m in module_names if match in m]
click.echo("\n".join(module_names))
click.echo(
"{} shown of {} packages.".format(len(module_names), len(available_modules))
)
@main.command()
@click.argument("module", nargs=-1)
@click.pass_context
def uninstall(ctx, module): # pragma: no cover
"""
Uninstall a named module(s) from the connected device. Multiple modules
can be uninstalled at once by providing more than one module name, each
separated by a space.
"""
for name in module:
device_modules = get_device_versions(ctx.obj["DEVICE_PATH"])
name = name.lower()
mod_names = {}
for module_item, metadata in device_modules.items():
mod_names[module_item.replace(".py", "").lower()] = metadata
if name in mod_names:
library_path = os.path.join(ctx.obj["DEVICE_PATH"], "lib")
metadata = mod_names[name]
module_path = metadata["path"]
if os.path.isdir(module_path):
target = os.path.basename(os.path.dirname(module_path))
target_path = os.path.join(library_path, target)
# Remove the directory.
shutil.rmtree(target_path)
else:
target = os.path.basename(module_path)
target_path = os.path.join(library_path, target)
# Remove file
os.remove(target_path)
click.echo("Uninstalled '{}'.".format(name))
else:
click.echo("Module '{}' not found on device.".format(name))
@main.command(
short_help=(
"Update modules on the device. "
"Use --all to automatically update all modules without Major Version warnings."
)
)
@click.option(
"--all", is_flag=True, help="Update all modules without Major Version warnings."
)
@click.pass_context
def update(ctx, all): # pragma: no cover
"""
Checks for out-of-date modules on the connected CIRCUITPYTHON device, and
prompts the user to confirm updating such modules.
"""
logger.info("Update")
# Grab out of date modules.
modules = [m for m in find_modules(ctx.obj["DEVICE_PATH"]) if m.outofdate]
if modules:
click.echo("Found {} module[s] needing update.".format(len(modules)))
if not all:
click.echo("Please indicate which modules you wish to update:\n")
for module in modules:
update_flag = all
if VERBOSE:
click.echo(
"Device version: {}, Bundle version: {}".format(
module.device_version, module.bundle_version
)
)
if isinstance(module.bundle_version, str) and not VersionInfo.isvalid(
module.bundle_version
):
click.secho(
f"WARNING: Library {module.name} repo has incorrect __version__"
"\n\tmetadata. Circup will assume it needs updating."
"\n\tPlease file an issue in the library repo.",
fg="yellow",
)
if module.repo:
click.secho(f"\t{module.repo}", fg="yellow")
if not update_flag:
if module.major_update:
update_flag = click.confirm(
(
"'{}' is a Major Version update and may contain breaking "
"changes. Do you want to update?".format(module.name)
)
)
else:
update_flag = click.confirm("Update '{}'?".format(module.name))
if update_flag:
# pylint: disable=broad-except
try:
module.update()
click.echo("Updated {}".format(module.name))
except Exception as ex:
logger.exception(ex)
click.echo(
"Something went wrong, {} (check the logs)".format(str(ex))
)
# pylint: enable=broad-except
else:
click.echo("None of the modules found on the device need an update.")
# Allows execution via `python -m circup ...`
# pylint: disable=no-value-for-parameter
if __name__ == "__main__": # pragma: no cover
main()
| 37.47486
| 89
| 0.611906
|
a2ab56021c5f70e5d83d62278481e4102b0e0b64
| 12,487
|
py
|
Python
|
thinc/neural/_classes/rnn.py
|
alephmelo/thinc
|
5c4340eddbe1f21649f773bc68b4b0600cf414ba
|
[
"MIT"
] | 1
|
2018-10-30T07:19:27.000Z
|
2018-10-30T07:19:27.000Z
|
thinc/neural/_classes/rnn.py
|
alephmelo/thinc
|
5c4340eddbe1f21649f773bc68b4b0600cf414ba
|
[
"MIT"
] | 4
|
2020-07-26T02:10:42.000Z
|
2021-03-31T18:48:58.000Z
|
thinc/neural/_classes/rnn.py
|
alephmelo/thinc
|
5c4340eddbe1f21649f773bc68b4b0600cf414ba
|
[
"MIT"
] | 1
|
2020-11-18T06:18:15.000Z
|
2020-11-18T06:18:15.000Z
|
from ...api import layerize
from ..util import get_array_module
from .model import Model
def begin_stepwise_tanh(X, nG):
xp = get_array_module(X)
Y = xp.zeros(X.shape, dtype='f')
def tanh_fwd(t):
Y[t] = xp.tanh(X[t])
return Y[t]
def tanh_bwd(dY):
return (1-Y**2) * dY
return Y, tanh_fwd, tanh_bwd
def begin_stepwise_relu(X, nG):
xp = get_array_module(X)
Y = xp.zeros(X.shape, dtype='f')
def relu_fwd(t):
Y[t] = X[t] * (X[t] > 0)
return Y[t]
def relu_bwd(dY):
return dY * (X>0)
return Y, relu_fwd, relu_bwd
def begin_stepwise_LSTM(gates, nG):
xp = get_array_module(gates)
nN = gates.shape[0]
nO = gates.shape[1]//nG
gates = gates.reshape((nN, nO, nG))
Hout = xp.zeros((nN, nO), dtype='f')
cells = xp.zeros((nN, nO), dtype='f')
pad = xp.zeros((nO,), dtype='f')
d_pad = xp.zeros((nO,), dtype='f')
def lstm_nonlin_fwd(t):
ops.lstm(Hout[t], cells[t],
gates[t], cells[t-1] if t >= 1 else pad)
return Hout[t]
def lstm_nonlin_bwd(d_output, Wh):
d_gates = xp.zeros(gates.shape, dtype='f')
d_cells = xp.zeros(cells.shape, dtype='f')
if d_output.shape[0] >= 2:
d_gates[:-1] += xp.tensordot(d_output[1:], Wh,
axes=[[1], [1]]).reshape((nN-1, nO, nG))
for t in range(d_output.shape[0]-1, 0, -1):
ops.backprop_lstm(d_cells[t], d_cells[t-1], d_gates[t],
d_output[t], gates[t], cells[t], cells[t-1])
ops.backprop_lstm(d_cells[0], d_pad, d_gates[0], d_output[0], gates[0],
cells[0], pad)
return d_gates.reshape((nN, nO*nG))
return Hout, lstm_nonlin_fwd, lstm_nonlin_bwd
def LSTM(width, residual=False, xp=None):
alloc, params = xp_params(xp)
model = _ResidualLSTM(alloc, width)
def lstm_fwd(X, drop=0.):
y, bp_y = model(X)
def lstm_bwd(dy, sgd=None):
dX = bp_y(dy)
for param, grad in params:
sgd(param.ravel(), grad.ravel(), key=id(param))
grad.fill(0)
return dX
return y, lstm_bwd
return layerize(lstm_fwd)
def BiLSTM(width, residual=False, xp=None):
alloc, params = xp_params(xp)
model = _BiLSTM(alloc, width, width, residual=residual)
def lstm_fwd(X, drop=0.):
y, bp_y = model(X)
def lstm_bwd(dy, sgd=None):
dX = bp_y(dy)
for param, grad in params:
sgd(param.ravel(), grad.ravel(), key=id(param))
grad.fill(0)
return dX
return y, lstm_bwd
return layerize(lstm_fwd)
def BiRNN(width, residual=False, xp=None):
alloc, params = xp_params(xp)
model = _BiRNN(alloc, width, width, nonlinearity=begin_stepwise_selu,
residual=residual)
def rnn_fwd(X, drop=0.):
y, bp_y = model(X)
def rnn_bwd(dy, sgd=None):
dX = bp_y(dy)
for param, grad in params:
sgd(param.ravel(), grad.ravel(), key=id(param))
grad.fill(0)
return dX
return y, rnn_bwd
return layerize(rnn_fwd)
def RNN(width, residual=True, xp=None):
alloc, params = xp_params(xp)
model = _RNN(alloc, width, width, nonlinearity=begin_stepwise_relu,
residual=residual)
def rnn_fwd(X, drop=0.):
y, bp_y = model(X)
def rnn_bwd(dy, sgd=None):
dX = bp_y(dy)
for param, grad in params:
sgd(param.ravel(), grad.ravel(), key=id(param))
grad.fill(0)
return dX
return y, rnn_bwd
return layerize(rnn_fwd)
def xp_params(xp=None):
if xp is None:
xp = Model.Ops.xp
params = []
def allocate(shape, gradient=False):
param = xp.zeros(shape, dtype='f')
if not gradient:
return param
else:
d_param = xp.zeros(shape, dtype='f')
params.append([param, d_param])
return param, d_param
return allocate, params
def _BiRNN(alloc, nO, nI, nG=1, nonlinearity=begin_stepwise_tanh, residual=False):
#l2r_model = _RNN(alloc, nO, nI, nonlinearity, nG=nG, residual=residual)
#r2l_model = _RNN(alloc, nO, nI, nonlinearity, nG=nG, residual=residual)
assert nO == nI
l2r_model = _ResidualLSTM(alloc, nI)
r2l_model = _ResidualLSTM(alloc, nI)
def birnn_fwd(Xs):
xp = get_array_module(Xs[0])
l2r_Zs, bp_l2r_Zs = l2r_model(Xs)
r2l_Zs, bp_r2l_Zs = r2l_model([xp.ascontiguousarray(X[::-1]) for X in Xs])
def birnn_bwd(dZs):
d_l2r_Zs = []
d_r2l_Zs = []
for dZ in dZs:
l2r = dZ[:, :nO]
r2l = dZ[:, nO:]
d_l2r_Zs.append(xp.ascontiguousarray(l2r))
d_r2l_Zs.append(xp.ascontiguousarray(r2l[::-1]))
dXs_l2r = bp_l2r_Zs(d_l2r_Zs)
dXs_r2l = bp_r2l_Zs(d_r2l_Zs)
dXs = [dXf+dXb[::-1] for dXf, dXb in zip(dXs_l2r, dXs_r2l)]
return dXs
Zs = [xp.hstack((Zf, Zb[::-1])) for Zf, Zb in zip(l2r_Zs, r2l_Zs)]
return Zs, birnn_bwd
return birnn_fwd
def _BiLSTM(alloc, nO, nI, residual=False):
return _BiRNN(alloc, nO, nI, nG=4, nonlinearity=begin_stepwise_LSTM,
residual=residual)
def _RNN(alloc, nO, nI, nonlinearity=begin_stepwise_tanh, nG=1,
residual=False):
begin_nonlin = nonlinearity
if not residual:
Wx, dWx = alloc((nO*nG, nI), gradient=True)
Wh, dWh = alloc((nO*nG, nO), gradient=True)
b, db = alloc((nO*nG,), gradient=True)
pad, d_pad = alloc((nO,), gradient=True)
xp = get_array_module(Wh)
if not residual:
Wx += xp.random.normal(scale=xp.sqrt(1./nI), size=Wx.size).reshape(Wx.shape)
Wh += xp.random.normal(scale=xp.sqrt(1./nI), size=Wh.size).reshape(Wh.shape)
# Initialize forget gates' bias
if nG == 4:
b = b.reshape((nO, nG))
b[:, 0] = 3.
b = b.reshape((nO * nG,))
nonlocals = [[], d_pad, dWx, dWh, db]
def rnn_fwd(Xs):
nonlocals[0] = []
Zs = nonlocals[0]
Ys = []
backprops = []
for X in Xs:
if residual:
Y = xp.zeros((X.shape[0], nO*nG), dtype='f')
else:
Y = xp.tensordot(X, Wx, axes=[[1], [1]])
Y += b
Z, nonlin_fwd, bp_nonlin = begin_nonlin(Y, nG)
state = pad
for t in range(Y.shape[0]):
Y[t] += Wh.dot(state)
state = nonlin_fwd(t)
if residual:
Z[t] += X[t]
state += X[t]
backprops.append(bp_nonlin)
Ys.append(Y)
Zs.append(Z)
def rnn_bwd(dZs):
Zs, d_pad, dWx, dWh, db = nonlocals
dXs = []
for X, Z, dZ, bp_Z in zip(Xs, Zs, dZs, backprops):
dY = bp_Z(dZ, Wh)
if residual:
dX = dZ.copy()
else:
dX = xp.tensordot(dY, Wx, axes=[[1], [0]])
dWx += xp.tensordot(dY, X, axes=[[0], [0]])
if dY.shape[0] >= 2:
dWh += xp.tensordot(dY[1:], Z[:-1], axes=[[0], [0]])
db += dY.sum(axis=0)
dXs.append(dX)
return dXs
return Zs, rnn_bwd
return rnn_fwd
def _ResidualLSTM(alloc, nI):
nO = nI
nG = 4
W, dW = alloc((nO*nG, nO), gradient=True)
b, db = alloc((nO*nG,), gradient=True)
pad = alloc((nO,))
xp = get_array_module(W)
W += xp.random.normal(scale=xp.sqrt(1./nI), size=W.size).reshape(W.shape)
# Initialize forget gates' bias
b = b.reshape((nO, nG))
b[:, 0] = 3.
b = b.reshape((nO * nG,))
nonlocals = [dW, db]
ops = Model.ops
def lstm_fwd(Xs):
batch_gates = []
batch_cells = []
batch_Houts = []
for X in Xs:
nN = X.shape[0]
gates = xp.zeros((nN, nO * nG), dtype='f')
Hout = xp.zeros((nN, nO), dtype='f')
cells = xp.zeros((nN, nO), dtype='f')
gates += b
for t in range(nN):
gates[t] += W.dot(Hout[t-1] if t >= 1 else pad)
ops.lstm(Hout[t], cells[t], gates[t],
cells[t-1] if t >= 1 else pad)
Hout[t] += X[t]
batch_gates.append(gates)
batch_cells.append(cells)
batch_Houts.append(Hout)
def lstm_bwd(d_Houts):
dW, db = nonlocals
dXs = []
for X, gates, cells, dH in zip(Xs, batch_gates, batch_cells, d_Houts):
nN = X.shape[0]
d_gates = xp.zeros((nN, nO * nG), dtype='f')
d_cells = xp.zeros((nN, nO), dtype='f')
for t in range(nN-1, 0, -1):
ops.backprop_lstm(d_cells[t], d_cells[t-1], d_gates[t],
dH[t], gates[t], cells[t], cells[t-1])
dH[t-1] += xp.tensordot(d_gates[t], W, axes=[[0], [0]])
if nN >= 2:
dW += xp.tensordot(d_gates[1:], dH[:-1], axes=[[0], [0]])
db += d_gates.sum(axis=0)
dXs.append(dH.copy())
return dXs
return batch_Houts, lstm_bwd
return lstm_fwd
def lstm_fwd(Xs_lengths, W, b):
Xs, lengths = Xs_lengths
timesteps = []
Hp = pad
Cp = pad
for t in max(lengths):
Xt = _make_timestep(Xs, lengths, t)
Gt = xp.zeros((nB, nO, nG), dtype='f')
Ht = xp.zeros((nB, nO), dtype='f')
Ct = xp.zeros((nN, nO), dtype='f')
Gt += b
Gt += W.dot(Hp)
ops.lstm(Ht, Ct, Gt,
Cp)
Ht += Xt
timesteps.append((Xt, Gt, Ct))
_write_timestep(Hs, lengths, t, Ht)
Cp = Ct
Hp = Ht
def lstm_bwd(dHs):
dXs = []
Cp = pad
Hp = pad
dHp = xp.zeros((nB, nO), dtype='f')
dXs = xp.zeros(Xs.shape, dtype='f')
for t, (Xt, Gt, Ct) in reversed(enumerate(timesteps)):
dHt = dHp + _make_timestep(Hs, lengths, t)
dGt.fill(0)
dCt.fill(0)
ops.backprop_lstm(dCt, dCp, dGt,
dHt, Gt, Ct, Cp)
dHp = dG.dot(W.T)
dW += xp.tensordot(dGn, dHt, axes=[[0], [0]])
db += dGt.sum(axis=0)
_write_timestep(dXs, lengths, t, dHt)
dCt, dCp = dCp, dCt
return dXs
return Hs, lstm_bwd
def _make_timestep(Xs, lengths, t):
xp = get_array_module(Xs)
n = 0
for i, length in enumerate(lengths):
n += length < t
output = xp.zeros((n,) + Xs.shape[1:], dtype=Xs.dtype)
start = 0
i = 0
for length in lengths:
if t < length:
output[i] = Xs[start + t]
i += 1
start += length
return output
def _write_timestep(Xs, lengths, t, timestep):
xp = get_array_module(Xs)
start = 0
i = 0
for length in lengths:
if t < length:
Xs[start + t] = timestep[i]
i += 1
start += length
#
#def begin_stepwise_LSTM(gates, nG):
# ops = NumpyOps()
# xp = ops.xp
# nN = gates.shape[0]
# nO = gates.shape[1]//nG
# gates = gates.reshape((nN, nO, nG))
# Hout = numpy.zeros((nN, nO), dtype='f')
# cells = numpy.zeros((nN, nO), dtype='f')
# pad = numpy.zeros((nO,), dtype='f')
# d_pad = numpy.zeros((nO,), dtype='f')
# def lstm_nonlin_fwd(t):
# ops.lstm(Hout[t], cells[t],
# gates[t], cells[t-1] if t >= 1 else pad)
# return Hout[t]
#
# def lstm_nonlin_bwd(d_output, Wh):
# d_gates = numpy.zeros(gates.shape, dtype='f')
# d_cells = numpy.zeros(cells.shape, dtype='f')
# if d_output.shape[0] >= 2:
# d_gates[:-1] += xp.tensordot(d_output[1:], Wh,
# axes=[[1], [1]]).reshape((nN-1, nO, nG))
# for t in range(d_output.shape[0]-1, 0, -1):
# ops.backprop_lstm(d_cells[t], d_cells[t-1], d_gates[t],
# d_output[t], gates[t], cells[t], cells[t-1])
# ops.backprop_lstm(d_cells[0], d_pad, d_gates[0], d_output[0], gates[0],
# cells[0], pad)
# return d_gates.reshape((nN, nO*nG))
#
# return Hout, lstm_nonlin_fwd, lstm_nonlin_bwd
#
#
| 32.100257
| 84
| 0.51005
|
1c5029a56352751e5100e49abd7dc7e5c540acd0
| 2,061
|
py
|
Python
|
harness/tests/experiment/fixtures/tf_keras_xor_model_native.py
|
ybt195/determined
|
913fdc3b81ef33c2760bdb128c8ce9179e4ab9b2
|
[
"Apache-2.0"
] | 1
|
2021-03-29T13:39:45.000Z
|
2021-03-29T13:39:45.000Z
|
harness/tests/experiment/fixtures/tf_keras_xor_model_native.py
|
ybt195/determined
|
913fdc3b81ef33c2760bdb128c8ce9179e4ab9b2
|
[
"Apache-2.0"
] | null | null | null |
harness/tests/experiment/fixtures/tf_keras_xor_model_native.py
|
ybt195/determined
|
913fdc3b81ef33c2760bdb128c8ce9179e4ab9b2
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import pathlib
import tensorflow as tf
from tensorflow.keras.layers import Dense
from tensorflow.keras.losses import binary_crossentropy
from tensorflow.keras.metrics import categorical_accuracy
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import SGD
from determined.experimental.keras import init
from tests.experiment import utils
def categorical_error(y_true: tf.Tensor, y_pred: tf.Tensor) -> tf.Tensor:
return 1.0 - categorical_accuracy(y_true, y_pred)
def predictions(y_true: tf.Tensor, y_pred: tf.Tensor) -> tf.Tensor:
return y_pred
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--local", action="store_true")
parser.add_argument("--test", action="store_true")
parser.add_argument("--use-dataset", action="store_true")
args = parser.parse_args()
config = {
"hyperparameters": {
"hidden_size": 2,
"learning_rate": 0.1,
"global_batch_size": 4,
"trial_type": "default",
}
}
context = init(
config=config, local=args.local, test=args.test, context_dir=str(pathlib.Path.cwd())
)
model = Sequential()
model.add(Dense(context.get_hparam("hidden_size"), activation="sigmoid", input_shape=(2,)))
model.add(Dense(1))
if args.use_dataset:
data, labels = utils.xor_data()
train = context.wrap_dataset(tf.data.Dataset.from_tensor_slices((data, labels)))
train = train.batch(context.get_hparam("global_batch_size"))
valid = context.wrap_dataset(tf.data.Dataset.from_tensor_slices((data, labels)))
valid = valid.batch(context.get_hparam("global_batch_size"))
else:
train, valid = utils.make_xor_data_sequences(batch_size=4)
model = context.wrap_model(model)
model.compile(
SGD(lr=context.get_hparam("learning_rate")),
binary_crossentropy,
metrics=[categorical_error],
)
model.fit(x=train, steps_per_epoch=100, validation_data=valid, workers=0)
| 32.203125
| 95
| 0.697234
|
62bb9b71c2d8e63c85c6b48ce0f22a89e2dd4bcc
| 2,097
|
py
|
Python
|
algoplex/api/simulator/market_data_sim.py
|
dmitryaleks/algo-plex
|
c83421642fc1ac11e558126ec73909b175b07862
|
[
"BSD-2-Clause"
] | null | null | null |
algoplex/api/simulator/market_data_sim.py
|
dmitryaleks/algo-plex
|
c83421642fc1ac11e558126ec73909b175b07862
|
[
"BSD-2-Clause"
] | null | null | null |
algoplex/api/simulator/market_data_sim.py
|
dmitryaleks/algo-plex
|
c83421642fc1ac11e558126ec73909b175b07862
|
[
"BSD-2-Clause"
] | null | null | null |
from algoplex.api.common.market_data import MarketData
import threading
import os
import time
class MarketDataSim(MarketData):
def __init__(self, market_data_file):
self.market_data_file = market_data_file
self.subscribers = []
self.subscribed = False
self.watcher = None
self.last_trade_price = None
self.watcher = None
self.active = False
self.prices = []
full_path = os.path.dirname(__file__) + '/../../data/' + self.market_data_file
with open(full_path) as f:
lines = f.readlines()
for line in lines:
(time, price) = line.split(',')
self.prices.append(float(price))
self.cursor = 0
def subscribe(self, subscriber):
self.subscribers.append(subscriber)
if(self.subscribers.__len__() > 0 and not self.subscribed):
self.start_subscription()
def unsubscribe(self, subscriber):
self.subscribers.remove(subscriber)
if(self.subscribers.__len__() == 0):
self.active = False
def start_subscription(self):
self.active = True
self.watcher = threading.Thread(target=self.watch_market_data)
self.watcher.start()
#self.watcher.join()
def watch_market_data(self):
try:
while self.active:
new_trade_price = float(self.get_last_price())
if(new_trade_price != self.last_trade_price
and new_trade_price != None):
self.last_trade_price = new_trade_price
for subscriber in self.subscribers:
subscriber.update(new_trade_price)
time.sleep(0.1)
except Exception as e:
for subscriber in self.subscribers:
subscriber.on_data_end()
def get_last_price(self):
if(self.cursor < self.prices.__len__()):
price = self.prices[self.cursor]
self.cursor += 1
return price
else:
raise Exception("No more data")
| 32.261538
| 86
| 0.587983
|
f484f4f51c852a95329552a015ff19480994c6bd
| 22,935
|
py
|
Python
|
download_from_google_storage.py
|
Cosium/depot_tools
|
e117e46a6894979f85f6329dcb04c945a93f37d8
|
[
"BSD-3-Clause"
] | null | null | null |
download_from_google_storage.py
|
Cosium/depot_tools
|
e117e46a6894979f85f6329dcb04c945a93f37d8
|
[
"BSD-3-Clause"
] | null | null | null |
download_from_google_storage.py
|
Cosium/depot_tools
|
e117e46a6894979f85f6329dcb04c945a93f37d8
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Download files from Google Storage based on SHA1 sums."""
import hashlib
import optparse
import os
import Queue
import re
import shutil
import stat
import sys
import tarfile
import threading
import time
import subprocess2
GSUTIL_DEFAULT_PATH = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'gsutil.py')
# Maps sys.platform to what we actually want to call them.
PLATFORM_MAPPING = {
'cygwin': 'win',
'darwin': 'mac',
'linux2': 'linux',
'win32': 'win',
}
class FileNotFoundError(IOError):
pass
class InvalidFileError(IOError):
pass
class InvalidPlatformError(Exception):
pass
def GetNormalizedPlatform():
"""Returns the result of sys.platform accounting for cygwin.
Under cygwin, this will always return "win32" like the native Python."""
if sys.platform == 'cygwin':
return 'win32'
return sys.platform
# Common utilities
class Gsutil(object):
"""Call gsutil with some predefined settings. This is a convenience object,
and is also immutable.
HACK: This object is used directly by the external script
`<depot_tools>/win_toolchain/get_toolchain_if_necessary.py`
"""
MAX_TRIES = 5
RETRY_BASE_DELAY = 5.0
RETRY_DELAY_MULTIPLE = 1.3
VPYTHON = 'vpython.bat' if GetNormalizedPlatform() == 'win32' else 'vpython'
def __init__(self, path, boto_path=None, timeout=None, version='4.28'):
if not os.path.exists(path):
raise FileNotFoundError('GSUtil not found in %s' % path)
self.path = path
self.timeout = timeout
self.boto_path = boto_path
self.version = version
def get_sub_env(self):
env = os.environ.copy()
if self.boto_path == os.devnull:
env['AWS_CREDENTIAL_FILE'] = ''
env['BOTO_CONFIG'] = ''
elif self.boto_path:
env['AWS_CREDENTIAL_FILE'] = self.boto_path
env['BOTO_CONFIG'] = self.boto_path
return env
def call(self, *args):
cmd = [self.VPYTHON, self.path, '--force-version', self.version]
cmd.extend(args)
return subprocess2.call(cmd, env=self.get_sub_env(), timeout=self.timeout)
def check_call(self, *args):
cmd = [self.VPYTHON, self.path, '--force-version', self.version]
cmd.extend(args)
((out, err), code) = subprocess2.communicate(
cmd,
stdout=subprocess2.PIPE,
stderr=subprocess2.PIPE,
env=self.get_sub_env(),
timeout=self.timeout)
# Parse output.
status_code_match = re.search('status=([0-9]+)', err)
if status_code_match:
return (int(status_code_match.group(1)), out, err)
if ('You are attempting to access protected data with '
'no configured credentials.' in err):
return (403, out, err)
if 'matched no objects' in err:
return (404, out, err)
return (code, out, err)
def check_call_with_retries(self, *args):
delay = self.RETRY_BASE_DELAY
for i in xrange(self.MAX_TRIES):
code, out, err = self.check_call(*args)
if not code or i == self.MAX_TRIES - 1:
break
time.sleep(delay)
delay *= self.RETRY_DELAY_MULTIPLE
return code, out, err
def check_platform(target):
"""Checks if any parent directory of target matches (win|mac|linux)."""
assert os.path.isabs(target)
root, target_name = os.path.split(target)
if not target_name:
return None
if target_name in ('linux', 'mac', 'win'):
return target_name
return check_platform(root)
def get_sha1(filename):
sha1 = hashlib.sha1()
with open(filename, 'rb') as f:
while True:
# Read in 1mb chunks, so it doesn't all have to be loaded into memory.
chunk = f.read(1024*1024)
if not chunk:
break
sha1.update(chunk)
return sha1.hexdigest()
# Download-specific code starts here
def enumerate_input(input_filename, directory, recursive, ignore_errors, output,
sha1_file, auto_platform):
if sha1_file:
if not os.path.exists(input_filename):
if not ignore_errors:
raise FileNotFoundError('%s not found.' % input_filename)
print >> sys.stderr, '%s not found.' % input_filename
with open(input_filename, 'rb') as f:
sha1_match = re.match('^([A-Za-z0-9]{40})$', f.read(1024).rstrip())
if sha1_match:
yield (sha1_match.groups(1)[0], output)
return
if not ignore_errors:
raise InvalidFileError('No sha1 sum found in %s.' % input_filename)
print >> sys.stderr, 'No sha1 sum found in %s.' % input_filename
return
if not directory:
yield (input_filename, output)
return
for root, dirs, files in os.walk(input_filename):
if not recursive:
for item in dirs[:]:
dirs.remove(item)
else:
for exclude in ['.svn', '.git']:
if exclude in dirs:
dirs.remove(exclude)
for filename in files:
full_path = os.path.join(root, filename)
if full_path.endswith('.sha1'):
if auto_platform:
# Skip if the platform does not match.
target_platform = check_platform(os.path.abspath(full_path))
if not target_platform:
err = ('--auto_platform passed in but no platform name found in '
'the path of %s' % full_path)
if not ignore_errors:
raise InvalidFileError(err)
print >> sys.stderr, err
continue
current_platform = PLATFORM_MAPPING[sys.platform]
if current_platform != target_platform:
continue
with open(full_path, 'rb') as f:
sha1_match = re.match('^([A-Za-z0-9]{40})$', f.read(1024).rstrip())
if sha1_match:
yield (sha1_match.groups(1)[0], full_path.replace('.sha1', ''))
else:
if not ignore_errors:
raise InvalidFileError('No sha1 sum found in %s.' % filename)
print >> sys.stderr, 'No sha1 sum found in %s.' % filename
def _validate_tar_file(tar, prefix):
def _validate(tarinfo):
"""Returns false if the tarinfo is something we explicitly forbid."""
if tarinfo.issym() or tarinfo.islnk():
return False
if '..' in tarinfo.name or not tarinfo.name.startswith(prefix):
return False
return True
return all(map(_validate, tar.getmembers()))
def _downloader_worker_thread(thread_num, q, force, base_url,
gsutil, out_q, ret_codes, _verbose, extract,
delete=True):
while True:
input_sha1_sum, output_filename = q.get()
if input_sha1_sum is None:
return
extract_dir = None
if extract:
if not output_filename.endswith('.tar.gz'):
out_q.put('%d> Error: %s is not a tar.gz archive.' % (
thread_num, output_filename))
ret_codes.put((1, '%s is not a tar.gz archive.' % (output_filename)))
continue
extract_dir = output_filename[:-len('.tar.gz')]
if os.path.exists(output_filename) and not force:
if not extract or os.path.exists(extract_dir):
if get_sha1(output_filename) == input_sha1_sum:
continue
# Check if file exists.
file_url = '%s/%s' % (base_url, input_sha1_sum)
(code, _, err) = gsutil.check_call('ls', file_url)
if code != 0:
if code == 404:
out_q.put('%d> File %s for %s does not exist, skipping.' % (
thread_num, file_url, output_filename))
ret_codes.put((1, 'File %s for %s does not exist.' % (
file_url, output_filename)))
else:
# Other error, probably auth related (bad ~/.boto, etc).
out_q.put('%d> Failed to fetch file %s for %s, skipping. [Err: %s]' % (
thread_num, file_url, output_filename, err))
ret_codes.put((1, 'Failed to fetch file %s for %s. [Err: %s]' % (
file_url, output_filename, err)))
continue
# Fetch the file.
out_q.put('%d> Downloading %s...' % (thread_num, output_filename))
try:
if delete:
os.remove(output_filename) # Delete the file if it exists already.
except OSError:
if os.path.exists(output_filename):
out_q.put('%d> Warning: deleting %s failed.' % (
thread_num, output_filename))
code, _, err = gsutil.check_call('cp', file_url, output_filename)
if code != 0:
out_q.put('%d> %s' % (thread_num, err))
ret_codes.put((code, err))
continue
remote_sha1 = get_sha1(output_filename)
if remote_sha1 != input_sha1_sum:
msg = ('%d> ERROR remote sha1 (%s) does not match expected sha1 (%s).' %
(thread_num, remote_sha1, input_sha1_sum))
out_q.put(msg)
ret_codes.put((20, msg))
continue
if extract:
if not tarfile.is_tarfile(output_filename):
out_q.put('%d> Error: %s is not a tar.gz archive.' % (
thread_num, output_filename))
ret_codes.put((1, '%s is not a tar.gz archive.' % (output_filename)))
continue
with tarfile.open(output_filename, 'r:gz') as tar:
dirname = os.path.dirname(os.path.abspath(output_filename))
# If there are long paths inside the tarball we can get extraction
# errors on windows due to the 260 path length limit (this includes
# pwd). Use the extended path syntax.
if sys.platform == 'win32':
dirname = '\\\\?\\%s' % dirname
if not _validate_tar_file(tar, os.path.basename(extract_dir)):
out_q.put('%d> Error: %s contains files outside %s.' % (
thread_num, output_filename, extract_dir))
ret_codes.put((1, '%s contains invalid entries.' % (output_filename)))
continue
if os.path.exists(extract_dir):
try:
shutil.rmtree(extract_dir)
out_q.put('%d> Removed %s...' % (thread_num, extract_dir))
except OSError:
out_q.put('%d> Warning: Can\'t delete: %s' % (
thread_num, extract_dir))
ret_codes.put((1, 'Can\'t delete %s.' % (extract_dir)))
continue
out_q.put('%d> Extracting %d entries from %s to %s' %
(thread_num, len(tar.getmembers()),output_filename,
extract_dir))
tar.extractall(path=dirname)
# Set executable bit.
if sys.platform == 'cygwin':
# Under cygwin, mark all files as executable. The executable flag in
# Google Storage will not be set when uploading from Windows, so if
# this script is running under cygwin and we're downloading an
# executable, it will be unrunnable from inside cygwin without this.
st = os.stat(output_filename)
os.chmod(output_filename, st.st_mode | stat.S_IEXEC)
elif sys.platform != 'win32':
# On non-Windows platforms, key off of the custom header
# "x-goog-meta-executable".
code, out, _ = gsutil.check_call('stat', file_url)
if code != 0:
out_q.put('%d> %s' % (thread_num, err))
ret_codes.put((code, err))
elif re.search(r'executable:\s*1', out):
st = os.stat(output_filename)
os.chmod(output_filename, st.st_mode | stat.S_IEXEC)
class PrinterThread(threading.Thread):
def __init__(self, output_queue):
super(PrinterThread, self).__init__()
self.output_queue = output_queue
self.did_print_anything = False
def run(self):
while True:
line = self.output_queue.get()
# It's plausible we want to print empty lines: Explicit `is None`.
if line is None:
break
self.did_print_anything = True
print line
def _data_exists(input_sha1_sum, output_filename, extract):
"""Returns True if the data exists locally and matches the sha1.
This conservatively returns False for error cases.
Args:
input_sha1_sum: Expected sha1 stored on disk.
output_filename: The file to potentially download later. Its sha1 will be
compared to input_sha1_sum.
extract: Wheather or not a downloaded file should be extracted. If the file
is not extracted, this just compares the sha1 of the file. If the file
is to be extracted, this only compares the sha1 of the target archive if
the target directory already exists. The content of the target directory
is not checked.
"""
extract_dir = None
if extract:
if not output_filename.endswith('.tar.gz'):
# This will cause an error later. Conservativly return False to not bail
# out too early.
return False
extract_dir = output_filename[:-len('.tar.gz')]
if os.path.exists(output_filename):
if not extract or os.path.exists(extract_dir):
if get_sha1(output_filename) == input_sha1_sum:
return True
return False
def download_from_google_storage(
input_filename, base_url, gsutil, num_threads, directory, recursive,
force, output, ignore_errors, sha1_file, verbose, auto_platform, extract):
# Tuples of sha1s and paths.
input_data = list(enumerate_input(
input_filename, directory, recursive, ignore_errors, output, sha1_file,
auto_platform))
# Sequentially check for the most common case and see if we can bail out
# early before making any slow calls to gsutil.
if not force and all(
_data_exists(sha1, path, extract) for sha1, path in input_data):
return 0
# Call this once to ensure gsutil's update routine is called only once. Only
# needs to be done if we'll process input data in parallel, which can lead to
# a race in gsutil's self-update on the first call. Note, this causes a
# network call, therefore any fast bailout should be done before this point.
if len(input_data) > 1:
gsutil.check_call('version')
# Start up all the worker threads.
all_threads = []
download_start = time.time()
stdout_queue = Queue.Queue()
work_queue = Queue.Queue()
ret_codes = Queue.Queue()
ret_codes.put((0, None))
for thread_num in range(num_threads):
t = threading.Thread(
target=_downloader_worker_thread,
args=[thread_num, work_queue, force, base_url,
gsutil, stdout_queue, ret_codes, verbose, extract])
t.daemon = True
t.start()
all_threads.append(t)
printer_thread = PrinterThread(stdout_queue)
printer_thread.daemon = True
printer_thread.start()
# Populate our work queue.
for sha1, path in input_data:
work_queue.put((sha1, path))
for _ in all_threads:
work_queue.put((None, None)) # Used to tell worker threads to stop.
# Wait for all downloads to finish.
for t in all_threads:
t.join()
stdout_queue.put(None)
printer_thread.join()
# See if we ran into any errors.
max_ret_code = 0
for ret_code, message in ret_codes.queue:
max_ret_code = max(ret_code, max_ret_code)
if message:
print >> sys.stderr, message
# Only print summary if any work was done.
if printer_thread.did_print_anything:
print 'Downloading %d files took %1f second(s)' % (
len(input_data), time.time() - download_start)
return max_ret_code
def main(args):
usage = ('usage: %prog [options] target\n'
'Target must be:\n'
' (default) a sha1 sum ([A-Za-z0-9]{40}).\n'
' (-s or --sha1_file) a .sha1 file, containing a sha1 sum on '
'the first line.\n'
' (-d or --directory) A directory to scan for .sha1 files.')
parser = optparse.OptionParser(usage)
parser.add_option('-o', '--output',
help='Specify the output file name. Defaults to: '
'(a) Given a SHA1 hash, the name is the SHA1 hash. '
'(b) Given a .sha1 file or directory, the name will '
'match (.*).sha1.')
parser.add_option('-b', '--bucket',
help='Google Storage bucket to fetch from.')
parser.add_option('-e', '--boto',
help='Specify a custom boto file.')
parser.add_option('-c', '--no_resume', action='store_true',
help='DEPRECATED: Resume download if file is '
'partially downloaded.')
parser.add_option('-f', '--force', action='store_true',
help='Force download even if local file exists.')
parser.add_option('-i', '--ignore_errors', action='store_true',
help='Don\'t throw error if we find an invalid .sha1 file.')
parser.add_option('-r', '--recursive', action='store_true',
help='Scan folders recursively for .sha1 files. '
'Must be used with -d/--directory')
parser.add_option('-t', '--num_threads', default=1, type='int',
help='Number of downloader threads to run.')
parser.add_option('-d', '--directory', action='store_true',
help='The target is a directory. '
'Cannot be used with -s/--sha1_file.')
parser.add_option('-s', '--sha1_file', action='store_true',
help='The target is a file containing a sha1 sum. '
'Cannot be used with -d/--directory.')
parser.add_option('-g', '--config', action='store_true',
help='Alias for "gsutil config". Run this if you want '
'to initialize your saved Google Storage '
'credentials. This will create a read-only '
'credentials file in ~/.boto.depot_tools.')
parser.add_option('-n', '--no_auth', action='store_true',
help='Skip auth checking. Use if it\'s known that the '
'target bucket is a public bucket.')
parser.add_option('-p', '--platform',
help='A regular expression that is compared against '
'Python\'s sys.platform. If this option is specified, '
'the download will happen only if there is a match.')
parser.add_option('-a', '--auto_platform',
action='store_true',
help='Detects if any parent folder of the target matches '
'(linux|mac|win). If so, the script will only '
'process files that are in the paths that '
'that matches the current platform.')
parser.add_option('-u', '--extract',
action='store_true',
help='Extract a downloaded tar.gz file. '
'Leaves the tar.gz file around for sha1 verification'
'If a directory with the same name as the tar.gz '
'file already exists, is deleted (to get a '
'clean state in case of update.)')
parser.add_option('-v', '--verbose', action='store_true', default=True,
help='DEPRECATED: Defaults to True. Use --no-verbose '
'to suppress.')
parser.add_option('-q', '--quiet', action='store_false', dest='verbose',
help='Suppresses diagnostic and progress information.')
(options, args) = parser.parse_args()
# Make sure we should run at all based on platform matching.
if options.platform:
if options.auto_platform:
parser.error('--platform can not be specified with --auto_platform')
if not re.match(options.platform, GetNormalizedPlatform()):
if options.verbose:
print('The current platform doesn\'t match "%s", skipping.' %
options.platform)
return 0
# Set the boto file to /dev/null if we don't need auth.
if options.no_auth:
if (set(('http_proxy', 'https_proxy')).intersection(
env.lower() for env in os.environ) and
'NO_AUTH_BOTO_CONFIG' not in os.environ):
print >> sys.stderr, ('NOTICE: You have PROXY values set in your '
'environment, but gsutil in depot_tools does not '
'(yet) obey them.')
print >> sys.stderr, ('Also, --no_auth prevents the normal BOTO_CONFIG '
'environment variable from being used.')
print >> sys.stderr, ('To use a proxy in this situation, please supply '
'those settings in a .boto file pointed to by '
'the NO_AUTH_BOTO_CONFIG environment var.')
options.boto = os.environ.get('NO_AUTH_BOTO_CONFIG', os.devnull)
# Make sure gsutil exists where we expect it to.
if os.path.exists(GSUTIL_DEFAULT_PATH):
gsutil = Gsutil(GSUTIL_DEFAULT_PATH,
boto_path=options.boto)
else:
parser.error('gsutil not found in %s, bad depot_tools checkout?' %
GSUTIL_DEFAULT_PATH)
# Passing in -g/--config will run our copy of GSUtil, then quit.
if options.config:
print '===Note from depot_tools==='
print 'If you do not have a project ID, enter "0" when asked for one.'
print '===End note from depot_tools==='
print
gsutil.check_call('version')
return gsutil.call('config')
if not args:
parser.error('Missing target.')
if len(args) > 1:
parser.error('Too many targets.')
if not options.bucket:
parser.error('Missing bucket. Specify bucket with --bucket.')
if options.sha1_file and options.directory:
parser.error('Both --directory and --sha1_file are specified, '
'can only specify one.')
if options.recursive and not options.directory:
parser.error('--recursive specified but --directory not specified.')
if options.output and options.directory:
parser.error('--directory is specified, so --output has no effect.')
if (not (options.sha1_file or options.directory)
and options.auto_platform):
parser.error('--auto_platform must be specified with either '
'--sha1_file or --directory')
input_filename = args[0]
# Set output filename if not specified.
if not options.output and not options.directory:
if not options.sha1_file:
# Target is a sha1 sum, so output filename would also be the sha1 sum.
options.output = input_filename
elif options.sha1_file:
# Target is a .sha1 file.
if not input_filename.endswith('.sha1'):
parser.error('--sha1_file is specified, but the input filename '
'does not end with .sha1, and no --output is specified. '
'Either make sure the input filename has a .sha1 '
'extension, or specify --output.')
options.output = input_filename[:-5]
else:
parser.error('Unreachable state.')
base_url = 'gs://%s' % options.bucket
return download_from_google_storage(
input_filename, base_url, gsutil, options.num_threads, options.directory,
options.recursive, options.force, options.output, options.ignore_errors,
options.sha1_file, options.verbose, options.auto_platform,
options.extract)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| 38.546218
| 80
| 0.628733
|
7b0b22b56f833b28e7450791b63e46edb618aad3
| 473
|
py
|
Python
|
tests/test_sho.py
|
aticio/legitindicators
|
a3ecbbe8739df84f0a7031d78c13ed809c68ca9a
|
[
"MIT"
] | 4
|
2020-04-06T15:02:30.000Z
|
2021-05-26T12:39:06.000Z
|
tests/test_sho.py
|
aticio/legitindicators
|
a3ecbbe8739df84f0a7031d78c13ed809c68ca9a
|
[
"MIT"
] | null | null | null |
tests/test_sho.py
|
aticio/legitindicators
|
a3ecbbe8739df84f0a7031d78c13ed809c68ca9a
|
[
"MIT"
] | 1
|
2020-04-06T14:32:03.000Z
|
2020-04-06T14:32:03.000Z
|
import requests
from legitindicators import simple_harmonic_oscillator
BINANCE_URL = "https://api.binance.com/api/v3/klines"
SYMBOL = "BTCUSDT"
INTERVAL = "1h"
PARAMS = {"symbol":SYMBOL, "interval":INTERVAL}
def test_simple_harmonic_oscillator():
response = requests.get(url=BINANCE_URL, params=PARAMS)
data = response.json()
close = [float(d[4]) for d in data]
sho = simple_harmonic_oscillator(close, 14)
print(sho)
assert len(sho) == len(close)
| 29.5625
| 59
| 0.725159
|
ac36ed19f20332170297f0b88a490b3149179d1f
| 612
|
py
|
Python
|
system/protocols/mumble/structs.py
|
UltrosBot/Ultros
|
639efc11f73ebf7e8f47f0554aced00a559d9e2d
|
[
"Artistic-2.0"
] | 16
|
2015-01-02T00:16:11.000Z
|
2019-06-03T06:23:11.000Z
|
system/protocols/mumble/structs.py
|
UltrosBot/Ultros
|
639efc11f73ebf7e8f47f0554aced00a559d9e2d
|
[
"Artistic-2.0"
] | 31
|
2015-01-18T12:14:53.000Z
|
2018-01-07T13:32:29.000Z
|
system/protocols/mumble/structs.py
|
UltrosBot/Ultros
|
639efc11f73ebf7e8f47f0554aced00a559d9e2d
|
[
"Artistic-2.0"
] | 9
|
2015-02-13T09:38:53.000Z
|
2020-11-29T19:59:32.000Z
|
# coding=utf-8
from collections import namedtuple
__author__ = 'Sean'
Version = namedtuple("Version", ["version", "release", "os", "os_version"])
class Stats(object):
"""
Mumble user connection stats
"""
def __init__(self, good=0, late=0, lost=0, resync=0):
self.good = good
self.late = late
self.lost = lost
self.resync = resync
def __repr__(self):
return "%s(good=%s, late=%s, lost=%s, resync=%s)" % (
self.__class__.__name__,
self.good,
self.late,
self.lost,
self.resync
)
| 20.4
| 75
| 0.545752
|
1f603fab01c3d2c1cf16a9d8afbfac005ba6bd14
| 23,314
|
py
|
Python
|
sdk/keyvault/azure-keyvault-administration/azure/keyvault/administration/_generated/v7_3_preview/models/_models.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 2,728
|
2015-01-09T10:19:32.000Z
|
2022-03-31T14:50:33.000Z
|
sdk/keyvault/azure-keyvault-administration/azure/keyvault/administration/_generated/v7_3_preview/models/_models.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 17,773
|
2015-01-05T15:57:17.000Z
|
2022-03-31T23:50:25.000Z
|
sdk/keyvault/azure-keyvault-administration/azure/keyvault/administration/_generated/v7_3_preview/models/_models.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 1,916
|
2015-01-19T05:05:41.000Z
|
2022-03-31T19:36:44.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.core.exceptions import HttpResponseError
import msrest.serialization
class Attributes(msrest.serialization.Model):
"""The object attributes managed by the KeyVault service.
Variables are only populated by the server, and will be ignored when sending a request.
:param enabled: Determines whether the object is enabled.
:type enabled: bool
:param not_before: Not before date in UTC.
:type not_before: ~datetime.datetime
:param expires: Expiry date in UTC.
:type expires: ~datetime.datetime
:ivar created: Creation time in UTC.
:vartype created: ~datetime.datetime
:ivar updated: Last updated time in UTC.
:vartype updated: ~datetime.datetime
"""
_validation = {
'created': {'readonly': True},
'updated': {'readonly': True},
}
_attribute_map = {
'enabled': {'key': 'enabled', 'type': 'bool'},
'not_before': {'key': 'nbf', 'type': 'unix-time'},
'expires': {'key': 'exp', 'type': 'unix-time'},
'created': {'key': 'created', 'type': 'unix-time'},
'updated': {'key': 'updated', 'type': 'unix-time'},
}
def __init__(
self,
**kwargs
):
super(Attributes, self).__init__(**kwargs)
self.enabled = kwargs.get('enabled', None)
self.not_before = kwargs.get('not_before', None)
self.expires = kwargs.get('expires', None)
self.created = None
self.updated = None
class Error(msrest.serialization.Model):
"""The key vault server error.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: The error code.
:vartype code: str
:ivar message: The error message.
:vartype message: str
:ivar inner_error: The key vault server error.
:vartype inner_error: ~azure.keyvault.v7_3_preview.models.Error
"""
_validation = {
'code': {'readonly': True},
'message': {'readonly': True},
'inner_error': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'inner_error': {'key': 'innererror', 'type': 'Error'},
}
def __init__(
self,
**kwargs
):
super(Error, self).__init__(**kwargs)
self.code = None
self.message = None
self.inner_error = None
class FullBackupOperation(msrest.serialization.Model):
"""Full backup operation.
:param status: Status of the backup operation.
:type status: str
:param status_details: The status details of backup operation.
:type status_details: str
:param error: Error encountered, if any, during the full backup operation.
:type error: ~azure.keyvault.v7_3_preview.models.Error
:param start_time: The start time of the backup operation in UTC.
:type start_time: ~datetime.datetime
:param end_time: The end time of the backup operation in UTC.
:type end_time: ~datetime.datetime
:param job_id: Identifier for the full backup operation.
:type job_id: str
:param azure_storage_blob_container_uri: The Azure blob storage container Uri which contains
the full backup.
:type azure_storage_blob_container_uri: str
"""
_attribute_map = {
'status': {'key': 'status', 'type': 'str'},
'status_details': {'key': 'statusDetails', 'type': 'str'},
'error': {'key': 'error', 'type': 'Error'},
'start_time': {'key': 'startTime', 'type': 'unix-time'},
'end_time': {'key': 'endTime', 'type': 'unix-time'},
'job_id': {'key': 'jobId', 'type': 'str'},
'azure_storage_blob_container_uri': {'key': 'azureStorageBlobContainerUri', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(FullBackupOperation, self).__init__(**kwargs)
self.status = kwargs.get('status', None)
self.status_details = kwargs.get('status_details', None)
self.error = kwargs.get('error', None)
self.start_time = kwargs.get('start_time', None)
self.end_time = kwargs.get('end_time', None)
self.job_id = kwargs.get('job_id', None)
self.azure_storage_blob_container_uri = kwargs.get('azure_storage_blob_container_uri', None)
class KeyVaultError(msrest.serialization.Model):
"""The key vault error exception.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar error: The key vault server error.
:vartype error: ~azure.keyvault.v7_3_preview.models.Error
"""
_validation = {
'error': {'readonly': True},
}
_attribute_map = {
'error': {'key': 'error', 'type': 'Error'},
}
def __init__(
self,
**kwargs
):
super(KeyVaultError, self).__init__(**kwargs)
self.error = None
class Permission(msrest.serialization.Model):
"""Role definition permissions.
:param actions: Action permissions that are granted.
:type actions: list[str]
:param not_actions: Action permissions that are excluded but not denied. They may be granted by
other role definitions assigned to a principal.
:type not_actions: list[str]
:param data_actions: Data action permissions that are granted.
:type data_actions: list[str or ~azure.keyvault.v7_3_preview.models.DataAction]
:param not_data_actions: Data action permissions that are excluded but not denied. They may be
granted by other role definitions assigned to a principal.
:type not_data_actions: list[str or ~azure.keyvault.v7_3_preview.models.DataAction]
"""
_attribute_map = {
'actions': {'key': 'actions', 'type': '[str]'},
'not_actions': {'key': 'notActions', 'type': '[str]'},
'data_actions': {'key': 'dataActions', 'type': '[str]'},
'not_data_actions': {'key': 'notDataActions', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(Permission, self).__init__(**kwargs)
self.actions = kwargs.get('actions', None)
self.not_actions = kwargs.get('not_actions', None)
self.data_actions = kwargs.get('data_actions', None)
self.not_data_actions = kwargs.get('not_data_actions', None)
class RestoreOperation(msrest.serialization.Model):
"""Restore operation.
:param status: Status of the restore operation.
:type status: str
:param status_details: The status details of restore operation.
:type status_details: str
:param error: Error encountered, if any, during the restore operation.
:type error: ~azure.keyvault.v7_3_preview.models.Error
:param job_id: Identifier for the restore operation.
:type job_id: str
:param start_time: The start time of the restore operation.
:type start_time: ~datetime.datetime
:param end_time: The end time of the restore operation.
:type end_time: ~datetime.datetime
"""
_attribute_map = {
'status': {'key': 'status', 'type': 'str'},
'status_details': {'key': 'statusDetails', 'type': 'str'},
'error': {'key': 'error', 'type': 'Error'},
'job_id': {'key': 'jobId', 'type': 'str'},
'start_time': {'key': 'startTime', 'type': 'unix-time'},
'end_time': {'key': 'endTime', 'type': 'unix-time'},
}
def __init__(
self,
**kwargs
):
super(RestoreOperation, self).__init__(**kwargs)
self.status = kwargs.get('status', None)
self.status_details = kwargs.get('status_details', None)
self.error = kwargs.get('error', None)
self.job_id = kwargs.get('job_id', None)
self.start_time = kwargs.get('start_time', None)
self.end_time = kwargs.get('end_time', None)
class RestoreOperationParameters(msrest.serialization.Model):
"""RestoreOperationParameters.
All required parameters must be populated in order to send to Azure.
:param sas_token_parameters: Required.
:type sas_token_parameters: ~azure.keyvault.v7_3_preview.models.SASTokenParameter
:param folder_to_restore: Required. The Folder name of the blob where the previous successful
full backup was stored.
:type folder_to_restore: str
"""
_validation = {
'sas_token_parameters': {'required': True},
'folder_to_restore': {'required': True},
}
_attribute_map = {
'sas_token_parameters': {'key': 'sasTokenParameters', 'type': 'SASTokenParameter'},
'folder_to_restore': {'key': 'folderToRestore', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(RestoreOperationParameters, self).__init__(**kwargs)
self.sas_token_parameters = kwargs['sas_token_parameters']
self.folder_to_restore = kwargs['folder_to_restore']
class RoleAssignment(msrest.serialization.Model):
"""Role Assignments.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The role assignment ID.
:vartype id: str
:ivar name: The role assignment name.
:vartype name: str
:ivar type: The role assignment type.
:vartype type: str
:param properties: Role assignment properties.
:type properties: ~azure.keyvault.v7_3_preview.models.RoleAssignmentPropertiesWithScope
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'RoleAssignmentPropertiesWithScope'},
}
def __init__(
self,
**kwargs
):
super(RoleAssignment, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.properties = kwargs.get('properties', None)
class RoleAssignmentCreateParameters(msrest.serialization.Model):
"""Role assignment create parameters.
All required parameters must be populated in order to send to Azure.
:param properties: Required. Role assignment properties.
:type properties: ~azure.keyvault.v7_3_preview.models.RoleAssignmentProperties
"""
_validation = {
'properties': {'required': True},
}
_attribute_map = {
'properties': {'key': 'properties', 'type': 'RoleAssignmentProperties'},
}
def __init__(
self,
**kwargs
):
super(RoleAssignmentCreateParameters, self).__init__(**kwargs)
self.properties = kwargs['properties']
class RoleAssignmentFilter(msrest.serialization.Model):
"""Role Assignments filter.
:param principal_id: Returns role assignment of the specific principal.
:type principal_id: str
"""
_attribute_map = {
'principal_id': {'key': 'principalId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(RoleAssignmentFilter, self).__init__(**kwargs)
self.principal_id = kwargs.get('principal_id', None)
class RoleAssignmentListResult(msrest.serialization.Model):
"""Role assignment list operation result.
:param value: Role assignment list.
:type value: list[~azure.keyvault.v7_3_preview.models.RoleAssignment]
:param next_link: The URL to use for getting the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[RoleAssignment]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(RoleAssignmentListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class RoleAssignmentProperties(msrest.serialization.Model):
"""Role assignment properties.
All required parameters must be populated in order to send to Azure.
:param role_definition_id: Required. The role definition ID used in the role assignment.
:type role_definition_id: str
:param principal_id: Required. The principal ID assigned to the role. This maps to the ID
inside the Active Directory. It can point to a user, service principal, or security group.
:type principal_id: str
"""
_validation = {
'role_definition_id': {'required': True},
'principal_id': {'required': True},
}
_attribute_map = {
'role_definition_id': {'key': 'roleDefinitionId', 'type': 'str'},
'principal_id': {'key': 'principalId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(RoleAssignmentProperties, self).__init__(**kwargs)
self.role_definition_id = kwargs['role_definition_id']
self.principal_id = kwargs['principal_id']
class RoleAssignmentPropertiesWithScope(msrest.serialization.Model):
"""Role assignment properties with scope.
:param scope: The role scope. Possible values include: "/", "/keys".
:type scope: str or ~azure.keyvault.v7_3_preview.models.RoleScope
:param role_definition_id: The role definition ID.
:type role_definition_id: str
:param principal_id: The principal ID.
:type principal_id: str
"""
_attribute_map = {
'scope': {'key': 'scope', 'type': 'str'},
'role_definition_id': {'key': 'roleDefinitionId', 'type': 'str'},
'principal_id': {'key': 'principalId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(RoleAssignmentPropertiesWithScope, self).__init__(**kwargs)
self.scope = kwargs.get('scope', None)
self.role_definition_id = kwargs.get('role_definition_id', None)
self.principal_id = kwargs.get('principal_id', None)
class RoleDefinition(msrest.serialization.Model):
"""Role definition.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The role definition ID.
:vartype id: str
:ivar name: The role definition name.
:vartype name: str
:ivar type: The role definition type. Possible values include:
"Microsoft.Authorization/roleDefinitions".
:vartype type: str or ~azure.keyvault.v7_3_preview.models.RoleDefinitionType
:param role_name: The role name.
:type role_name: str
:param description: The role definition description.
:type description: str
:param role_type: The role type. Possible values include: "AKVBuiltInRole", "CustomRole".
:type role_type: str or ~azure.keyvault.v7_3_preview.models.RoleType
:param permissions: Role definition permissions.
:type permissions: list[~azure.keyvault.v7_3_preview.models.Permission]
:param assignable_scopes: Role definition assignable scopes.
:type assignable_scopes: list[str or ~azure.keyvault.v7_3_preview.models.RoleScope]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'role_name': {'key': 'properties.roleName', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'role_type': {'key': 'properties.type', 'type': 'str'},
'permissions': {'key': 'properties.permissions', 'type': '[Permission]'},
'assignable_scopes': {'key': 'properties.assignableScopes', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(RoleDefinition, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.role_name = kwargs.get('role_name', None)
self.description = kwargs.get('description', None)
self.role_type = kwargs.get('role_type', None)
self.permissions = kwargs.get('permissions', None)
self.assignable_scopes = kwargs.get('assignable_scopes', None)
class RoleDefinitionCreateParameters(msrest.serialization.Model):
"""Role definition create parameters.
All required parameters must be populated in order to send to Azure.
:param properties: Required. Role definition properties.
:type properties: ~azure.keyvault.v7_3_preview.models.RoleDefinitionProperties
"""
_validation = {
'properties': {'required': True},
}
_attribute_map = {
'properties': {'key': 'properties', 'type': 'RoleDefinitionProperties'},
}
def __init__(
self,
**kwargs
):
super(RoleDefinitionCreateParameters, self).__init__(**kwargs)
self.properties = kwargs['properties']
class RoleDefinitionFilter(msrest.serialization.Model):
"""Role Definitions filter.
:param role_name: Returns role definition with the specific name.
:type role_name: str
"""
_attribute_map = {
'role_name': {'key': 'roleName', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(RoleDefinitionFilter, self).__init__(**kwargs)
self.role_name = kwargs.get('role_name', None)
class RoleDefinitionListResult(msrest.serialization.Model):
"""Role definition list operation result.
:param value: Role definition list.
:type value: list[~azure.keyvault.v7_3_preview.models.RoleDefinition]
:param next_link: The URL to use for getting the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[RoleDefinition]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(RoleDefinitionListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class RoleDefinitionProperties(msrest.serialization.Model):
"""Role definition properties.
:param role_name: The role name.
:type role_name: str
:param description: The role definition description.
:type description: str
:param role_type: The role type. Possible values include: "AKVBuiltInRole", "CustomRole".
:type role_type: str or ~azure.keyvault.v7_3_preview.models.RoleType
:param permissions: Role definition permissions.
:type permissions: list[~azure.keyvault.v7_3_preview.models.Permission]
:param assignable_scopes: Role definition assignable scopes.
:type assignable_scopes: list[str or ~azure.keyvault.v7_3_preview.models.RoleScope]
"""
_attribute_map = {
'role_name': {'key': 'roleName', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'role_type': {'key': 'type', 'type': 'str'},
'permissions': {'key': 'permissions', 'type': '[Permission]'},
'assignable_scopes': {'key': 'assignableScopes', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(RoleDefinitionProperties, self).__init__(**kwargs)
self.role_name = kwargs.get('role_name', None)
self.description = kwargs.get('description', None)
self.role_type = kwargs.get('role_type', None)
self.permissions = kwargs.get('permissions', None)
self.assignable_scopes = kwargs.get('assignable_scopes', None)
class SASTokenParameter(msrest.serialization.Model):
"""SASTokenParameter.
All required parameters must be populated in order to send to Azure.
:param storage_resource_uri: Required. Azure Blob storage container Uri.
:type storage_resource_uri: str
:param token: Required. The SAS token pointing to an Azure Blob storage container.
:type token: str
"""
_validation = {
'storage_resource_uri': {'required': True},
'token': {'required': True},
}
_attribute_map = {
'storage_resource_uri': {'key': 'storageResourceUri', 'type': 'str'},
'token': {'key': 'token', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SASTokenParameter, self).__init__(**kwargs)
self.storage_resource_uri = kwargs['storage_resource_uri']
self.token = kwargs['token']
class SelectiveKeyRestoreOperation(msrest.serialization.Model):
"""Selective Key Restore operation.
:param status: Status of the restore operation.
:type status: str
:param status_details: The status details of restore operation.
:type status_details: str
:param error: Error encountered, if any, during the selective key restore operation.
:type error: ~azure.keyvault.v7_3_preview.models.Error
:param job_id: Identifier for the selective key restore operation.
:type job_id: str
:param start_time: The start time of the restore operation.
:type start_time: ~datetime.datetime
:param end_time: The end time of the restore operation.
:type end_time: ~datetime.datetime
"""
_attribute_map = {
'status': {'key': 'status', 'type': 'str'},
'status_details': {'key': 'statusDetails', 'type': 'str'},
'error': {'key': 'error', 'type': 'Error'},
'job_id': {'key': 'jobId', 'type': 'str'},
'start_time': {'key': 'startTime', 'type': 'unix-time'},
'end_time': {'key': 'endTime', 'type': 'unix-time'},
}
def __init__(
self,
**kwargs
):
super(SelectiveKeyRestoreOperation, self).__init__(**kwargs)
self.status = kwargs.get('status', None)
self.status_details = kwargs.get('status_details', None)
self.error = kwargs.get('error', None)
self.job_id = kwargs.get('job_id', None)
self.start_time = kwargs.get('start_time', None)
self.end_time = kwargs.get('end_time', None)
class SelectiveKeyRestoreOperationParameters(msrest.serialization.Model):
"""SelectiveKeyRestoreOperationParameters.
All required parameters must be populated in order to send to Azure.
:param sas_token_parameters: Required.
:type sas_token_parameters: ~azure.keyvault.v7_3_preview.models.SASTokenParameter
:param folder: Required. The Folder name of the blob where the previous successful full backup
was stored.
:type folder: str
"""
_validation = {
'sas_token_parameters': {'required': True},
'folder': {'required': True},
}
_attribute_map = {
'sas_token_parameters': {'key': 'sasTokenParameters', 'type': 'SASTokenParameter'},
'folder': {'key': 'folder', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SelectiveKeyRestoreOperationParameters, self).__init__(**kwargs)
self.sas_token_parameters = kwargs['sas_token_parameters']
self.folder = kwargs['folder']
| 34.184751
| 100
| 0.639616
|
63e5f3aab9d74ecb6f10b3d99d408267a48c8eb1
| 113
|
py
|
Python
|
MonoTools/stellar/isoclassify/isoclassify/__init__.py
|
hposborn/MonoTools
|
972c0e2ef23e8008ba43ab3e8a94b526db35d7fc
|
[
"BSD-2-Clause"
] | 6
|
2020-07-27T18:00:39.000Z
|
2022-03-15T07:33:46.000Z
|
stellar/isoclassify/isoclassify/__init__.py
|
hposborn/NamastePymc3
|
243e73ce813163f787e89b24c44a3e97549df3a7
|
[
"MIT"
] | 14
|
2020-06-17T03:48:37.000Z
|
2022-02-07T18:16:52.000Z
|
stellar/isoclassify/isoclassify/__init__.py
|
hposborn/NamastePymc3
|
243e73ce813163f787e89b24c44a3e97549df3a7
|
[
"MIT"
] | 1
|
2019-09-26T18:19:15.000Z
|
2019-09-26T18:19:15.000Z
|
import os
DATADIR = os.environ['ISOCLASSIFY']
from .grid import *
from .direct import *
from . import pipeline
| 14.125
| 35
| 0.734513
|
e560f795a151acf991ac7cd74cf9277326a126d0
| 2,371
|
py
|
Python
|
cities/admin.py
|
aleluc13/django-cities
|
80de1e14b4e056f31b50bb1addb3b247b5b15ad2
|
[
"MIT"
] | null | null | null |
cities/admin.py
|
aleluc13/django-cities
|
80de1e14b4e056f31b50bb1addb3b247b5b15ad2
|
[
"MIT"
] | null | null | null |
cities/admin.py
|
aleluc13/django-cities
|
80de1e14b4e056f31b50bb1addb3b247b5b15ad2
|
[
"MIT"
] | null | null | null |
from django.contrib.gis import admin
import swapper
from .models import (Continent, Country, Region, Subregion, City, District,
PostalCode, AlternativeName)
class CitiesAdmin(admin.OSMGeoAdmin):
openlayers_url = 'https://cdnjs.cloudflare.com/ajax/libs/openlayers/2.13.1/OpenLayers.js'
raw_id_fields = ['alt_names']
class ContinentAdmin(CitiesAdmin):
list_display = ['name', 'code']
class CountryAdmin(CitiesAdmin):
list_display = ['name', 'code', 'code3', 'tld', 'phone', 'continent', 'area', 'population']
search_fields = ['name', 'code', 'code3', 'tld', 'phone']
class RegionAdmin(CitiesAdmin):
ordering = ['name_std']
list_display = ['name_std', 'code', 'country']
search_fields = ['name', 'name_std', 'code']
class SubregionAdmin(CitiesAdmin):
ordering = ['name_std']
list_display = ['name_std', 'code', 'region']
search_fields = ['name', 'name_std', 'code']
raw_id_fields = ['alt_names', 'region']
class CityAdmin(CitiesAdmin):
ordering = ['name_std']
list_display = ['name_std', 'subregion', 'region', 'country', 'population']
search_fields = ['name', 'name_std']
raw_id_fields = ['alt_names', 'region', 'subregion']
class DistrictAdmin(CitiesAdmin):
raw_id_fields = ['alt_names', 'city']
list_display = ['name_std', 'city']
search_fields = ['name', 'name_std']
class AltNameAdmin(admin.ModelAdmin):
ordering = ['name']
list_display = ['name', 'language_code', 'is_preferred', 'is_short', 'is_historic']
list_filter = ['is_preferred', 'is_short', 'is_historic', 'language_code']
search_fields = ['name']
class PostalCodeAdmin(CitiesAdmin):
ordering = ['code']
list_display = ['code', 'subregion_name', 'region_name', 'country']
search_fields = ['code', 'country__name', 'region_name', 'subregion_name']
if not swapper.is_swapped('cities', 'Continent'):
admin.site.register(Continent, ContinentAdmin)
if not swapper.is_swapped('cities', 'Country'):
admin.site.register(Country, CountryAdmin)
admin.site.register(Region, RegionAdmin)
admin.site.register(Subregion, SubregionAdmin)
if not swapper.is_swapped('cities', 'City'):
admin.site.register(City, CityAdmin)
admin.site.register(District, DistrictAdmin)
admin.site.register(AlternativeName, AltNameAdmin)
admin.site.register(PostalCode, PostalCodeAdmin)
| 32.479452
| 95
| 0.694222
|
326682fcbc6b9c31c05ced77402a981f40dec7bd
| 2,248
|
py
|
Python
|
opencivicdata/elections/migrations/0006_auto_20171005_2029.py
|
palewire/python-opencivicdata
|
7862be45bef6846d3e284995d208fabcc8635362
|
[
"BSD-3-Clause"
] | 20
|
2017-06-23T17:31:48.000Z
|
2021-11-23T19:20:58.000Z
|
opencivicdata/elections/migrations/0006_auto_20171005_2029.py
|
palewire/python-opencivicdata
|
7862be45bef6846d3e284995d208fabcc8635362
|
[
"BSD-3-Clause"
] | 70
|
2015-01-06T18:40:22.000Z
|
2017-05-24T18:06:52.000Z
|
opencivicdata/elections/migrations/0006_auto_20171005_2029.py
|
california-civic-data-coalition/python-opencivicdata-django
|
375cd09d48908a7be58186de64f470b233f616d6
|
[
"BSD-3-Clause"
] | 17
|
2017-05-25T17:05:57.000Z
|
2021-06-05T14:45:39.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-10-05 20:29
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [("elections", "0005_auto_20170823_1648")]
operations = [
migrations.AlterField(
model_name="candidacy",
name="party",
field=models.ForeignKey(
help_text="Reference to the Organization representing the political party that nominated the candidate or would nominate the candidate (as in the case of a partisan primary).",
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="candidacies",
to="core.Organization",
),
),
migrations.AlterField(
model_name="candidatecontest",
name="party",
field=models.ForeignKey(
help_text="If the contest is among candidates of the same political party, e.g., a partisan primary election, reference to the Organization representing that party.",
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="candidate_contests",
to="core.Organization",
),
),
migrations.AlterField(
model_name="election",
name="administrative_organization",
field=models.ForeignKey(
help_text="Reference to the Organization that administers the election.",
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="elections",
to="core.Organization",
),
),
migrations.AlterField(
model_name="election",
name="division",
field=models.ForeignKey(
help_text="Reference to the Division that defines the broadest political geography of any contest to be decided by the election.",
on_delete=django.db.models.deletion.PROTECT,
related_name="elections",
to="core.Division",
),
),
]
| 38.758621
| 192
| 0.589413
|
82ec860f8afa037573d1a4785b2565bae6972d3a
| 2,492
|
py
|
Python
|
tests/models/validators/v2_1_2/jsd_4ca2db1143ebb5d7.py
|
oboehmer/dnacentersdk
|
25c4e99900640deee91a56aa886874d9cb0ca960
|
[
"MIT"
] | 32
|
2019-09-05T05:16:56.000Z
|
2022-03-22T09:50:38.000Z
|
tests/models/validators/v2_1_2/jsd_4ca2db1143ebb5d7.py
|
oboehmer/dnacentersdk
|
25c4e99900640deee91a56aa886874d9cb0ca960
|
[
"MIT"
] | 35
|
2019-09-07T18:58:54.000Z
|
2022-03-24T19:29:36.000Z
|
tests/models/validators/v2_1_2/jsd_4ca2db1143ebb5d7.py
|
oboehmer/dnacentersdk
|
25c4e99900640deee91a56aa886874d9cb0ca960
|
[
"MIT"
] | 18
|
2019-09-09T11:07:21.000Z
|
2022-03-25T08:49:59.000Z
|
# -*- coding: utf-8 -*-
"""Cisco DNA Center Delete SP Profile data model.
Copyright (c) 2019-2021 Cisco Systems.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import fastjsonschema
import json
from dnacentersdk.exceptions import MalformedRequest
from builtins import *
class JSONSchemaValidator4Ca2Db1143EbB5D7(object):
"""Delete SP Profile request schema definition."""
def __init__(self):
super(JSONSchemaValidator4Ca2Db1143EbB5D7, self).__init__()
self._validator = fastjsonschema.compile(json.loads(
'''{
"properties": {
"executionId": {
"type": [
"string",
"null"
]
},
"executionStatusUrl": {
"type": [
"string",
"null"
]
},
"message": {
"type": [
"string",
"null"
]
}
},
"type": "object"
}'''.replace("\n" + ' ' * 16, '')
))
def validate(self, request):
try:
self._validator(request)
except fastjsonschema.exceptions.JsonSchemaException as e:
raise MalformedRequest(
'{} is invalid. Reason: {}'.format(request, e.message)
)
| 32.363636
| 78
| 0.614366
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.