code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
from __future__ import print_function, division
import sys, os
sys.path.append(os.path.abspath("."))
__author__ = 'panzer'
def eucledian(one, two):
"""
Compute Eucledian Distance between
2 vectors. We assume the input vectors
are normalized.
:param one: Vector 1
:param two: Vector 2
:return:
"""
dist = 0
for o_i, t_i in zip(one, two):
dist += (o_i - t_i)**2
return dist**0.5
def igd(obtained, ideals):
"""
Compute the IGD for a
set of solutions
:param obtained: Obtained pareto front
:param ideals: Ideal pareto front
:return:
"""
igd_val = 0
for d in ideals:
min_dist = sys.maxint
for o in obtained:
min_dist = min(min_dist, eucledian(o, d))
igd_val += min_dist
return igd_val/len(ideals)
|
ai-se/parGALE
|
measures/igd.py
|
Python
|
unlicense
| 760
|
from awxkit.api.resources import resources
from . import page
class Subscriptions(page.Page):
def get_possible_licenses(self, **kwargs):
return self.post(json=kwargs).json
page.register_page(resources.subscriptions, Subscriptions)
|
GoogleCloudPlatform/sap-deployment-automation
|
third_party/github.com/ansible/awx/awxkit/awxkit/api/pages/subscriptions.py
|
Python
|
apache-2.0
| 248
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""TODO(tsitsulin): add headers, tests, and improve style."""
from absl import app
from absl import flags
import numpy as np
from sklearn.metrics import accuracy_score
from sklearn.metrics import normalized_mutual_info_score
import tensorflow.compat.v2 as tf
from graph_embedding.dmon.models.multilayer_gcn import multilayer_gcn
from graph_embedding.dmon.synthetic_data.graph_util import construct_knn_graph
from graph_embedding.dmon.synthetic_data.overlapping_gaussians import overlapping_gaussians
tf.compat.v1.enable_v2_behavior()
FLAGS = flags.FLAGS
flags.DEFINE_integer(
'n_nodes', 1000, 'Number of nodes for the synthetic graph.', lower_bound=0)
flags.DEFINE_integer(
'n_clusters',
2,
'Number of clusters for the synthetic graph.',
lower_bound=0)
flags.DEFINE_float(
'train_size', 0.2, 'Training data proportion.', lower_bound=0)
flags.DEFINE_integer(
'n_epochs', 200, 'Number of epochs to train.', lower_bound=0)
flags.DEFINE_integer(
'n_random_features', 64, 'Number of random features.', lower_bound=0)
flags.DEFINE_float(
'learning_rate', 0.01, 'Optimizer\'s learning rate.', lower_bound=0)
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
print('Bröther may i have some self-lööps')
n_nodes = FLAGS.n_nodes
n_clusters = FLAGS.n_clusters
n_random_features = FLAGS.n_random_features
train_size = FLAGS.train_size
data_clean, data_dirty, labels = overlapping_gaussians(n_nodes, n_clusters)
data_random = np.random.normal(size=(n_nodes, n_random_features))
graph_clean = construct_knn_graph(data_clean).todense().A1.reshape(
n_nodes, n_nodes)
train_mask = np.zeros(n_nodes, dtype=np.bool)
train_mask[np.random.choice(
np.arange(n_nodes), int(n_nodes * train_size), replace=False)] = True
test_mask = ~train_mask
print(f'Data shape: {data_clean.shape}, graph shape: {graph_clean.shape}')
print(f'Train size: {train_mask.sum()}, test size: {test_mask.sum()}')
input_features = tf.keras.layers.Input(shape=(n_random_features,))
input_graph = tf.keras.layers.Input((n_nodes,))
model = multilayer_gcn([input_features, input_graph], [64, 32, n_clusters])
model.compile(
optimizer=tf.keras.optimizers.Adam(FLAGS.learning_rate),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
for epoch in range(FLAGS.n_epochs):
model.fit([data_random, graph_clean],
labels,
n_nodes,
shuffle=False,
sample_weight=train_mask)
clusters = model([data_random, graph_clean]).numpy().argmax(axis=1)[test_mask]
print(
'NMI:',
normalized_mutual_info_score(
labels[test_mask], clusters, average_method='arithmetic'))
print('Accuracy:', accuracy_score(labels[test_mask], clusters))
if __name__ == '__main__':
app.run(main)
|
google-research/google-research
|
graph_embedding/dmon/train_gcn_randomfeatures.py
|
Python
|
apache-2.0
| 3,521
|
#!/usr/bin/env python3
# usage
#
# with bazel:
#
# $ bazel run //tools/code_format:python_check -- -h
#
# $ bazel run //tools/code_format:python_check
#
# with pip:
#
# $ pip install envoy.code_format.python_check
# $ envoy.code_format.python_check -h
#
# usage with pip requires a path, eg
#
# $ envoy.code_format.python_check .
#
# The upstream lib is maintained here:
#
# https://github.com/envoyproxy/pytooling/tree/main/envoy.code_format.python_check
#
# Please submit issues/PRs to the pytooling repo:
#
# https://github.com/envoyproxy/pytooling
#
import pathlib
import sys
from functools import cached_property
import abstracts
from envoy.code_format import python_check
import envoy_repo
@abstracts.implementer(python_check.APythonChecker)
class EnvoyPythonChecker:
@cached_property
def path(self) -> pathlib.Path:
if self.args.paths:
return pathlib.Path(self.args.paths[0])
return pathlib.Path(envoy_repo.PATH)
def main(*args) -> int:
return EnvoyPythonChecker(*args).run()
if __name__ == "__main__":
sys.exit(main(*sys.argv[1:]))
|
lyft/envoy
|
tools/code_format/python_check.py
|
Python
|
apache-2.0
| 1,107
|
"""
this module generates markdown format test report for throughput test.
The test report contains 2 parts:
1. throughput with different configs
2. throughput with RSSI
"""
import os
class ThroughputForConfigsReport(object):
THROUGHPUT_TYPES = ['tcp_tx', 'tcp_rx', 'udp_tx', 'udp_rx']
REPORT_FILE_NAME = 'ThroughputForConfigs.md'
def __init__(self, output_path, ap_ssid, throughput_results, sdkconfig_files):
"""
:param ap_ssid: the ap we expected to use
:param throughput_results: config with the following type::
{
"config_name": {
"tcp_tx": result,
"tcp_rx": result,
"udp_tx": result,
"udp_rx": result,
},
"config_name2": {},
}
"""
self.output_path = output_path
self.ap_ssid = ap_ssid
self.results = throughput_results
self.sdkconfigs = dict()
for config_name in sdkconfig_files:
self.sdkconfigs[config_name] = self._parse_config_file(sdkconfig_files[config_name])
if not os.path.exists(output_path):
os.makedirs(output_path)
self.sort_order = list(self.sdkconfigs.keys())
self.sort_order.sort()
@staticmethod
def _parse_config_file(config_file_path):
sdkconfig = {}
with open(config_file_path, 'r') as f:
for line in f:
if not line.isspace():
if line[0] == '#':
continue
name, value = line.split('=')
value = value.strip('\r\n')
sdkconfig[name] = value if value else 'n'
return sdkconfig
def _generate_the_difference_between_configs(self):
"""
generate markdown list for different configs::
default: esp-idf default
low:
* `config name 1`: old value -> new value
* `config name 2`: old value -> new value
* ...
...
"""
data = '## Config Definition:\r\n\r\n'
def find_difference(base, new):
_difference = {}
all_configs = set(base.keys())
all_configs.update(set(new.keys()))
for _config in all_configs:
try:
_base_value = base[_config]
except KeyError:
_base_value = 'null'
try:
_new_value = new[_config]
except KeyError:
_new_value = 'null'
if _base_value != _new_value:
_difference[_config] = '{} -> {}'.format(_base_value, _new_value)
return _difference
for i, _config_name in enumerate(self.sort_order):
current_config = self.sdkconfigs[_config_name]
if i > 0:
previous_config_name = self.sort_order[i - 1]
previous_config = self.sdkconfigs[previous_config_name]
else:
previous_config = previous_config_name = None
if previous_config:
# log the difference
difference = find_difference(previous_config, current_config)
data += '* {} (compared to {}):\r\n'.format(_config_name, previous_config_name)
for diff_name in difference:
data += ' * `{}`: {}\r\n'.format(diff_name, difference[diff_name])
return data
def _generate_report_for_one_type(self, throughput_type):
"""
generate markdown table with the following format::
| config name | throughput (Mbps) | free heap size (bytes) |
|-------------|-------------------|------------------------|
| default | 32.11 | 147500 |
| low | 32.11 | 147000 |
| medium | 33.22 | 120000 |
| high | 43.11 | 100000 |
| max | 45.22 | 79000 |
"""
empty = True
ret = '\r\n### {} {}\r\n\r\n'.format(*throughput_type.split('_'))
ret += '| config name | throughput (Mbps) | free heap size (bytes) |\r\n'
ret += '|-------------|-------------------|------------------------|\r\n'
for config in self.sort_order:
try:
result = self.results[config][throughput_type]
throughput = '{:.02f}'.format(max(result.throughput_by_att[self.ap_ssid].values()))
heap_size = str(result.heap_size)
# although markdown table will do alignment
# do align here for better text editor presentation
ret += '| {:<12}| {:<18}| {:<23}|\r\n'.format(config, throughput, heap_size)
empty = False
except KeyError:
pass
return ret if not empty else ''
def generate_report(self):
data = '# Throughput for different configs\r\n'
data += '\r\nAP: {}\r\n'.format(self.ap_ssid)
for throughput_type in self.THROUGHPUT_TYPES:
data += self._generate_report_for_one_type(throughput_type)
data += '\r\n------\r\n'
data += self._generate_the_difference_between_configs()
with open(os.path.join(self.output_path, self.REPORT_FILE_NAME), 'w') as f:
f.write(data)
class ThroughputVsRssiReport(object):
REPORT_FILE_NAME = 'ThroughputVsRssi.md'
def __init__(self, output_path, throughput_results):
"""
:param throughput_results: config with the following type::
{
"tcp_tx": result,
"tcp_rx": result,
"udp_tx": result,
"udp_rx": result,
}
"""
self.output_path = output_path
self.raw_data_path = os.path.join(output_path, 'raw_data')
self.results = throughput_results
self.throughput_types = list(self.results.keys())
self.throughput_types.sort()
if not os.path.exists(self.raw_data_path):
os.makedirs(self.raw_data_path)
def _generate_summary(self):
"""
generate summary with the following format::
| item | curve analysis | max throughput (Mbps) |
|---------|----------------|-----------------------|
| tcp tx | Success | 32.11 |
| tcp rx | Success | 32.11 |
| udp tx | Success | 45.22 |
| udp rx | Failed | 55.44 |
"""
ret = '\r\n### Summary\r\n\r\n'
ret += '| item | curve analysis | max throughput (Mbps) |\r\n'
ret += '|---------|----------------|-----------------------|\r\n'
for _type in self.throughput_types:
result = self.results[_type]
max_throughput = 0.0
curve_analysis = 'Failed' if result.error_list else 'Success'
for ap_ssid in result.throughput_by_att:
_max_for_ap = max(result.throughput_by_rssi[ap_ssid].values())
if _max_for_ap > max_throughput:
max_throughput = _max_for_ap
max_throughput = '{:.02f}'.format(max_throughput)
ret += '| {:<8}| {:<15}| {:<22}|\r\n'.format('{}_{}'.format(result.proto, result.direction),
curve_analysis, max_throughput)
return ret
def _generate_report_for_one_type(self, result):
"""
generate markdown table with the following format::
### tcp rx
Errors:
* detected error 1
* ...
AP: ap_ssid

AP: ap_ssid

"""
result.post_analysis()
ret = '\r\n### {} {}\r\n'.format(result.proto, result.direction)
if result.error_list:
ret += '\r\nErrors:\r\n\r\n'
for error in result.error_list:
ret += '* ' + error + '\r\n'
for ap_ssid in result.throughput_by_rssi:
ret += '\r\nAP: {}\r\n'.format(ap_ssid)
# draw figure
file_name = result.draw_throughput_figure(self.raw_data_path, ap_ssid, 'rssi')
result.draw_throughput_figure(self.raw_data_path, ap_ssid, 'att')
result.draw_rssi_vs_att_figure(self.raw_data_path, ap_ssid)
ret += '\r\n[throughput Vs RSSI]({})\r\n'.format(os.path.join('raw_data', file_name))
return ret
def generate_report(self):
data = '# Throughput Vs RSSI\r\n'
data += self._generate_summary()
for _type in self.throughput_types:
data += self._generate_report_for_one_type(self.results[_type])
with open(os.path.join(self.output_path, self.REPORT_FILE_NAME), 'w') as f:
f.write(data)
|
espressif/esp-idf
|
tools/ci/python_packages/idf_iperf_test_util/TestReport.py
|
Python
|
apache-2.0
| 9,169
|
#!/usr/bin/env python
# -*- encoding: UTF-8 -*-
# Copyright Skyscape Cloud Services
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import os.path
from io import StringIO
import tempfile
import time
import unittest
from maloja import __version__
from maloja.model import Catalog
from maloja.model import Gateway
from maloja.model import Network
from maloja.model import Template
from maloja.model import Org
from maloja.model import Project
from maloja.model import VApp
from maloja.model import Vdc
from maloja.model import Vm
from maloja.model import yaml_loads
from maloja.workflow.path import Path
from maloja.workflow.path import cache
from maloja.workflow.path import find_project
from maloja.workflow.path import find_ypath
from maloja.workflow.path import make_project
from maloja.workflow.path import split_to_path
from maloja.workflow.test.test_utils import NeedsTempDirectory
class PathTests(NeedsTempDirectory, unittest.TestCase):
@property
def fixture(self):
root = self.drcty.name
return [
(Project(), Path(
root, "testproj", None, None, None, None, None, "project.yaml")),
(Org(name="0-123-4-567890"), Path(
root, "testproj", "0-123-4-567890", None, None, None, None, "org.yaml")),
(Catalog(name="Skyscape"),
Path(root, "testproj", "0-123-4-567890", "catalogs",
"Skyscape", None, None, "catalog.yaml")),
(Template(name="CentOS_FTP"),
Path(root, "testproj", "0-123-4-567890", "catalogs",
"Skyscape", "CentOS_FTP", None, "template.yaml")),
(Vm(name="server"),
Path(root, "testproj", "0-123-4-567890", "catalogs",
"Skyscape", "CentOS_FTP", "server", "vm.yaml")),
(Template(name="RedHat_MySQL"),
Path(root, "testproj", "0-123-4-567890", "catalogs",
"Skyscape", "RedHat_MySQL", None, "template.yaml")),
(Vm(name="master"),
Path(root, "testproj", "0-123-4-567890", "catalogs",
"Skyscape", "RedHat_MySQL", "master", "vm.yaml")),
(Vm(name="slave"),
Path(root, "testproj", "0-123-4-567890", "catalogs",
"Skyscape", "RedHat_MySQL", "slave", "vm.yaml")),
(Gateway(name="0-123-4-567890-edge"),
Path(root, "testproj", "0-123-4-567890", "PROD",
None, None, None, "edge.yaml")),
(Vdc(name="PROD"),
Path(root, "testproj", "0-123-4-567890", "PROD",
None, None, None, "vdc.yaml")),
(Network(name="USER_NET"),
Path(root, "testproj", "0-123-4-567890", "PROD",
"networks", "USER_NET", None, "net.yaml")),
(VApp(name="CentOS_FTP"),
Path(root, "testproj", "0-123-4-567890", "PROD",
"vapps", "CentOS_FTP", None, "vapp.yaml")),
(Vm(name="server"),
Path(root, "testproj", "0-123-4-567890", "PROD",
"vapps", "CentOS_FTP", "server", "vm.yaml")),
]
def test_cache_path(self):
self.maxDiff = 1200
for obj, path in self.fixture:
with self.subTest(path=path):
rv = cache(path, obj)
check = split_to_path(rv, root=self.drcty.name)
self.assertEqual(path, check, check)
self.assertTrue(os.path.isfile(rv))
def test_object_cache(self):
self.maxDiff = 1200
for obj, path in self.fixture:
with self.subTest(path=path):
fP = cache(path, obj)
with open(fP, 'r') as data:
text = data.read()
rv = type(obj)(**yaml_loads(text))
self.assertEqual(vars(obj), vars(rv))
def test_ypath_by_type(self):
proj = self.fixture[0][1]
for obj, path in self.fixture:
cache(path, obj)
results = list(find_ypath(proj, Vm()))
self.assertEqual(4, len(results))
self.assertTrue(all(len(i) == 2 for i in results), results)
self.assertTrue(all(isinstance(i[0], Path) for i in results), results)
self.assertTrue(all(isinstance(i[1], Vm) for i in results), results)
self.assertIn(self.fixture[-1][1], [i[0] for i in results], results)
def test_ypath_with_attributes(self):
proj = self.fixture[0][1]
for obj, path in self.fixture:
cache(path, obj)
results = list(find_ypath(proj, Vm(name="server")))
self.assertEqual(2, len(results))
self.assertTrue(all(len(i) == 2 for i in results), results)
self.assertTrue(all(isinstance(i[0], Path) for i in results), results)
self.assertTrue(all(isinstance(i[1], Vm) for i in results), results)
self.assertIn(self.fixture[-1][1], [i[0] for i in results], results)
results = list(find_ypath(proj, Gateway(name="0-123-4-567890-edge")))
self.assertTrue(results)
results = list(find_ypath(proj, Network(name="USER_NET")))
self.assertTrue(results)
def test_ypath_with_keywords(self):
proj = self.fixture[0][1]
for obj, path in self.fixture:
cache(path, obj)
results = list(find_ypath(proj, Vm(), name="server"))
self.assertEqual(2, len(results))
self.assertTrue(all(len(i) == 2 for i in results), results)
self.assertTrue(all(isinstance(i[0], Path) for i in results), results)
self.assertTrue(all(isinstance(i[1], Vm) for i in results), results)
self.assertIn(self.fixture[-1][1], [i[0] for i in results], results)
class ProjectTests(NeedsTempDirectory, unittest.TestCase):
def test_nodirectory_find_project(self):
drcty = tempfile.TemporaryDirectory()
drcty.cleanup()
self.assertFalse(os.path.isdir(drcty.name))
self.assertRaises(StopIteration, find_project, drcty.name)
def test_nodirectory_make_project(self):
drcty = tempfile.TemporaryDirectory()
drcty.cleanup()
self.assertFalse(os.path.isdir(drcty.name))
path, proj = make_project(drcty.name)
self.assertTrue(os.path.isdir(drcty.name))
self.assertEqual(drcty.name, path.root)
def test_nodirectory_project_found(self):
drcty = tempfile.TemporaryDirectory()
drcty.cleanup()
self.assertFalse(os.path.isdir(drcty.name))
locn, proj = make_project(drcty.name)
path, rv = find_project(drcty.name)
self.assertEqual(locn.root, path.root)
self.assertEqual(locn.project, path.project)
def test_project_has_version(self):
path, proj = make_project(self.drcty.name)
self.assertTrue(hasattr(proj, "version"))
self.assertTrue(proj.version)
def test_project_version_miss(self):
path, proj = make_project(self.drcty.name)
self.assertRaises(
StopIteration, find_project, self.drcty.name, Project(version="0.0.0")
)
def test_find_most_recently_modified_project(self):
assets = [make_project(self.drcty.name)]
time.sleep(1)
assets.append(make_project(self.drcty.name))
path, proj = find_project(self.drcty.name)
self.assertEqual(assets[1][0], path)
# Modify first project
time.sleep(1)
fP = os.path.join(*(i for i in assets[0][0] if i is not None))
with open(fP, "r") as data:
text = data.read()
with open(fP, "w") as output:
output.write(text)
output.flush()
path, proj = find_project(self.drcty.name)
self.assertEqual(assets[0][0], path)
class SplitToPathTests(NeedsTempDirectory, unittest.TestCase):
def test_org(self):
expect = Path(
self.drcty.name,
"project", "org", None, None, None, None,
"org.yaml"
)
data = os.path.join(*(i for i in expect if not i is None))
rv = split_to_path(data, expect.root)
self.assertEqual(expect[1:], rv[1:])
self.assertTrue(os.path.samefile(expect[0], rv[0]))
def test_vdc(self):
expect = Path(
self.drcty.name,
"project", "org", "vdc", None, None, None,
"vdc.yaml"
)
data = os.path.join(*(i for i in expect if not i is None))
rv = split_to_path(data, expect.root)
self.assertEqual(expect[1:], rv[1:])
self.assertTrue(os.path.samefile(expect[0], rv[0]))
def test_template(self):
expect = Path(
self.drcty.name,
"project", "org", "catalogs", "catalog", "template", None,
"template.yaml"
)
data = os.path.join(*(i for i in expect if not i is None))
rv = split_to_path(data, expect.root)
self.assertEqual(expect[1:], rv[1:])
self.assertTrue(os.path.samefile(expect[0], rv[0]))
def test_vapp(self):
expect = Path(
self.drcty.name,
"project", "org", "vdc", "vapps", "vapp", None,
"vapp.yaml"
)
data = os.path.join(*(i for i in expect if not i is None))
rv = split_to_path(data, expect.root)
self.assertEqual(expect[1:], rv[1:])
self.assertTrue(os.path.samefile(expect[0], rv[0]))
def test_vm(self):
expect = Path(
self.drcty.name,
"project", "org", "catalogs", "catalog", "template", "vm",
"vm.yaml"
)
data = os.path.join(*(i for i in expect if not i is None))
rv = split_to_path(data, expect.root)
self.assertEqual(expect[1:], rv[1:])
self.assertTrue(os.path.samefile(expect[0], rv[0]))
|
skyscape-cloud-services/maloja
|
maloja/workflow/test/test_path.py
|
Python
|
apache-2.0
| 10,228
|
# Copyright (c) 2017-2018 {Flair Inc.} WESLEY PENG
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support.ui import WebDriverWait
from taf.foundation.api.ui.support import WaitHandler
class BrowserWaitHandler(WaitHandler):
def __init__(
self,
handler=None,
timeout=None,
poll_frequency=1.0
):
super(BrowserWaitHandler, self).__init__(
handler, timeout
)
self.poll_frequency = poll_frequency or 1.0
def wait(self, timeout=None):
"""
Waits until the page is fully loaded
:param timeout: float in seconds
:return:
"""
try:
self.timeout = float(timeout or self.timeout)
self.poll_frequency = float(self.poll_frequency)
WebDriverWait(
self.handler,
self.timeout,
self.poll_frequency
).until(
lambda driver: driver.execute_script(
'return document.readyState=="complete";'
),
'Failed to fully load page in {} seconds'.format(
self.timeout
)
)
except TimeoutException:
raise
|
WesleyPeng/uiXautomation
|
src/main/python/taf/foundation/plugins/web/selenium/support/browserwaithandler.py
|
Python
|
apache-2.0
| 1,826
|
import os
import sys
import argparse
import requests
import codecs
import multiprocessing as mp
import urllib
def main():
odir, wiki_list, depth, cpus, ldir = parse_args()
pages = read_list(wiki_list, depth)
npages = len(pages)
pages_per_cpu = npages / cpus
jobs = []
pid = 0
log_prefix = os.path.split(wiki_list)[-1].replace('.txt', '_{}.log')
for i in xrange(0, npages, pages_per_cpu):
lfile = os.path.join(ldir, log_prefix.format(pid))
jobs.append((odir, pages[i:i+pages_per_cpu], lfile))
pid += 1
pool = mp.Pool(cpus)
x = pool.map_async(worker, jobs)
x.get()
pool.close()
pool.join()
def worker(args):
odir, pages, log_file = args
npages = len(pages)
lf = codecs.open(log_file, 'w', 'utf-8')
for i, page in enumerate(pages, 1):
lf.write(u'Requesting: {}\n'.format(page))
lf.flush()
req = {'action':'query', 'format':'json', 'prop':'revisions',
'titles':page,
'rvprop':'timestamp|ids',
'rvstart':'20111001000000', 'rvdir':'older' }
result = requests.get('http://en.wikipedia.org/w/api.php',
params=req).json()
pid = result['query']['pages'].keys()[0]
revs = result['query']['pages'][pid].get('revisions', None)
if revs is not None:
if len(revs) > 0:
rev = revs[0]
url_tmp = u'http://en.wikipedia.org/w/index.php?' \
+ u'title={}&oldid={}'
url = url_tmp.format(page.replace(u' ', u'_'),
rev['parentid'])
ofile = u'{}_{}_{}.html'.format(rev['timestamp'],
rev['parentid'],
page).replace(u' ', u'_')
ofile = ofile.replace(u'/', u'_')
ofile = ofile.replace(u'\\', u'_')
#ofile = ofile.encode('utf-8')
opath = os.path.join(odir, ofile)
opath = opath.encode('utf-8')
try:
req = requests.get(url)
except requests.exceptions.InvalidURL, e:
print "Invalid url?", url
continue
with codecs.open(opath, 'w', 'utf-8') as f: f.write(req.text)
lf.write(u'{}/{}) {} {}\n'.format(i, npages, rev['timestamp'],
rev['parentid']))
lf.write(u'\t--> {} {}k\n'.format(ofile,
os.path.getsize(opath)/1024))
lf.flush()
else:
lf.write(u'{} has no revision before cutoff date.\n'.format(page))
lf.flush()
lf.close()
def read_list(wiki_list, depth):
pages = []
with codecs.open(wiki_list, 'r', 'utf-8') as f:
for line in f:
items = line.strip().split(u'\t')
if len(items) != 3:
continue
item_depth = int(items[1])
if item_depth <= depth:
pages.append(items[2])
return pages
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--html-dir',
help=u'Location to write html files.',
type=unicode, required=True)
parser.add_argument('-w', '--wiki-list',
help=u'TSV of pageids, depth, page title.',
type=unicode, required=True)
parser.add_argument('-m', '--max-depth',
help=u'Max depth to retreive',
type=int, required=False, default=5)
parser.add_argument('-p', '--num-processes',
help=u'Number of processes to use',
type=int, required=False, default=1)
parser.add_argument('-l', '--log-dir',
help=u'Location to write logs',
type=str, required=False, default='logs')
args = parser.parse_args()
odir = args.html_dir
wiki_list = args.wiki_list
depth = args.max_depth
cpus = args.num_processes
ldir = args.log_dir
if ldir != '' and not os.path.exists(ldir):
os.makedirs(ldir)
if not os.path.exists(wiki_list) or os.path.isdir(wiki_list):
sys.stderr.write((u'--wiki-list argument {} either does not exist' \
+ u' is a directory!\n').format(wiki_list))
sys.stderr.flush()
sys.exit()
if not os.path.exists(odir):
os.makedirs(odir)
return odir, wiki_list, depth, cpus, ldir
if __name__ == '__main__':
main()
|
kedz/cuttsum
|
old/install-scripts/python/wiki_list2html.py
|
Python
|
apache-2.0
| 4,737
|
# Copyright (c) 2013-2018 Quarkslab.
# This file is part of IRMA project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the top-level directory
# of this distribution and at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# No part of the project, including this file, may be copied,
# modified, propagated, or distributed except according to the
# terms contained in the LICENSE file.
from setuptools import setup
def readme():
with open('README.rst') as f:
return f.read()
setup(name='irmacl',
version='2.0.7',
description='Irma command line tool for API v2',
long_description=readme(),
url='https://github.com/quarkslab/irma-cli',
author='irma-dev',
author_email='irma-dev@quarkslab.com',
license='ApacheV2',
packages=['irmacl'],
install_requires=[
'requests',
'marshmallow',
],
include_package_data=True,
test_suite='nose.collector',
tests_require=['nose'],
zip_safe=False)
|
quarkslab/irma-cli
|
setup.py
|
Python
|
apache-2.0
| 1,136
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
GOOGLE_CLOUD_TEST = 'marketplace.cloud.google.com/verification'
|
GoogleCloudPlatform/marketplace-k8s-app-tools
|
marketplace/deployer_util/constants.py
|
Python
|
apache-2.0
| 640
|
from Bio.config.FormatRegistry import FormatObject, FormatGroup
embl65 = FormatObject(
name = "embl/65",
abbrev = "embl65",
expression = "Bio.expressions.embl.embl65.format",
)
embl = FormatGroup(
name = "embl",
)
embl.add(embl65)
from Bio.formatdefs import sequence
sequence.sequence.add(embl)
|
dbmi-pitt/DIKB-Micropublication
|
scripts/mp-scripts/Bio/formatdefs/embl.py
|
Python
|
apache-2.0
| 322
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers
from google.api_core import gapic_v1
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.cloud.bigquery_storage_v1.types import storage
from google.cloud.bigquery_storage_v1.types import stream
from .base import BigQueryReadTransport, DEFAULT_CLIENT_INFO
class BigQueryReadGrpcTransport(BigQueryReadTransport):
"""gRPC backend transport for BigQueryRead.
BigQuery Read API.
The Read API can be used to read data from BigQuery.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(
self,
*,
host: str = "bigquerystorage.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(
cls,
host: str = "bigquerystorage.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def create_read_session(
self,
) -> Callable[[storage.CreateReadSessionRequest], stream.ReadSession]:
r"""Return a callable for the create read session method over gRPC.
Creates a new read session. A read session divides
the contents of a BigQuery table into one or more
streams, which can then be used to read data from the
table. The read session also specifies properties of the
data to be read, such as a list of columns or a
push-down filter describing the rows to be returned.
A particular row can be read by at most one stream. When
the caller has reached the end of each stream in the
session, then all the data in the table has been read.
Data is assigned to each stream such that roughly the
same number of rows can be read from each stream.
Because the server-side unit for assigning data is
collections of rows, the API does not guarantee that
each stream will return the same number or rows.
Additionally, the limits are enforced based on the
number of pre-filtered rows, so some filters can lead to
lopsided assignments.
Read sessions automatically expire 6 hours after they
are created and do not require manual clean-up by the
caller.
Returns:
Callable[[~.CreateReadSessionRequest],
~.ReadSession]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_read_session" not in self._stubs:
self._stubs["create_read_session"] = self.grpc_channel.unary_unary(
"/google.cloud.bigquery.storage.v1.BigQueryRead/CreateReadSession",
request_serializer=storage.CreateReadSessionRequest.serialize,
response_deserializer=stream.ReadSession.deserialize,
)
return self._stubs["create_read_session"]
@property
def read_rows(
self,
) -> Callable[[storage.ReadRowsRequest], storage.ReadRowsResponse]:
r"""Return a callable for the read rows method over gRPC.
Reads rows from the stream in the format prescribed
by the ReadSession. Each response contains one or more
table rows, up to a maximum of 100 MiB per response;
read requests which attempt to read individual rows
larger than 100 MiB will fail.
Each request also returns a set of stream statistics
reflecting the current state of the stream.
Returns:
Callable[[~.ReadRowsRequest],
~.ReadRowsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "read_rows" not in self._stubs:
self._stubs["read_rows"] = self.grpc_channel.unary_stream(
"/google.cloud.bigquery.storage.v1.BigQueryRead/ReadRows",
request_serializer=storage.ReadRowsRequest.serialize,
response_deserializer=storage.ReadRowsResponse.deserialize,
)
return self._stubs["read_rows"]
@property
def split_read_stream(
self,
) -> Callable[[storage.SplitReadStreamRequest], storage.SplitReadStreamResponse]:
r"""Return a callable for the split read stream method over gRPC.
Splits a given ``ReadStream`` into two ``ReadStream`` objects.
These ``ReadStream`` objects are referred to as the primary and
the residual streams of the split. The original ``ReadStream``
can still be read from in the same manner as before. Both of the
returned ``ReadStream`` objects can also be read from, and the
rows returned by both child streams will be the same as the rows
read from the original stream.
Moreover, the two child streams will be allocated back-to-back
in the original ``ReadStream``. Concretely, it is guaranteed
that for streams original, primary, and residual, that
original[0-j] = primary[0-j] and original[j-n] = residual[0-m]
once the streams have been read to completion.
Returns:
Callable[[~.SplitReadStreamRequest],
~.SplitReadStreamResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "split_read_stream" not in self._stubs:
self._stubs["split_read_stream"] = self.grpc_channel.unary_unary(
"/google.cloud.bigquery.storage.v1.BigQueryRead/SplitReadStream",
request_serializer=storage.SplitReadStreamRequest.serialize,
response_deserializer=storage.SplitReadStreamResponse.deserialize,
)
return self._stubs["split_read_stream"]
def close(self):
self.grpc_channel.close()
__all__ = ("BigQueryReadGrpcTransport",)
|
googleapis/python-bigquery-storage
|
google/cloud/bigquery_storage_v1/services/big_query_read/transports/grpc.py
|
Python
|
apache-2.0
| 16,017
|
# Copyright (c) 2014 Andrew Kerr. All rights reserved.
# Copyright (c) 2015 Tom Barron. All rights reserved.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit tests for the NetApp NFS storage driver
"""
import os
import copy
import ddt
import mock
from os_brick.remotefs import remotefs as remotefs_brick
from oslo_utils import units
from jacket.storage import exception
from jacket.storage import test
from jacket.tests.storage.unit.volume.drivers.netapp.dataontap import fakes as fake
from jacket.storage import utils
from jacket.storage.volume.drivers.netapp.dataontap.client import api as netapp_api
from jacket.storage.volume.drivers.netapp.dataontap import nfs_base
from jacket.storage.volume.drivers.netapp import utils as na_utils
from jacket.storage.volume.drivers import nfs
@ddt.ddt
class NetAppNfsDriverTestCase(test.TestCase):
def setUp(self):
super(NetAppNfsDriverTestCase, self).setUp()
configuration = mock.Mock()
configuration.reserved_percentage = 0
configuration.nfs_mount_point_base = '/mnt/test'
configuration.reserved_percentage = 0
configuration.max_over_subscription_ratio = 1.1
kwargs = {'configuration': configuration}
with mock.patch.object(utils, 'get_root_helper',
return_value=mock.Mock()):
with mock.patch.object(remotefs_brick, 'RemoteFsClient',
return_value=mock.Mock()):
self.driver = nfs_base.NetAppNfsDriver(**kwargs)
self.driver.ssc_enabled = False
self.driver.db = mock.Mock()
@mock.patch.object(nfs.NfsDriver, 'do_setup')
@mock.patch.object(na_utils, 'check_flags')
def test_do_setup(self, mock_check_flags, mock_super_do_setup):
self.driver.do_setup(mock.Mock())
self.assertTrue(mock_check_flags.called)
self.assertTrue(mock_super_do_setup.called)
def test_get_share_capacity_info(self):
mock_get_capacity = self.mock_object(self.driver, '_get_capacity_info')
mock_get_capacity.return_value = fake.CAPACITY_VALUES
expected_total_capacity_gb = na_utils.round_down(
fake.TOTAL_BYTES / units.Gi, '0.01')
expected_free_capacity_gb = (na_utils.round_down(
fake.AVAILABLE_BYTES / units.Gi, '0.01'))
expected_reserved_percentage = round(
self.driver.configuration.reserved_percentage)
result = self.driver._get_share_capacity_info(fake.NFS_SHARE)
self.assertEqual(expected_total_capacity_gb,
result['total_capacity_gb'])
self.assertEqual(expected_free_capacity_gb,
result['free_capacity_gb'])
self.assertEqual(expected_reserved_percentage,
round(result['reserved_percentage']))
def test_get_capacity_info_ipv4_share(self):
expected = fake.CAPACITY_VALUES
self.driver.zapi_client = mock.Mock()
get_capacity = self.driver.zapi_client.get_flexvol_capacity
get_capacity.return_value = fake.CAPACITY_VALUES
result = self.driver._get_capacity_info(fake.NFS_SHARE_IPV4)
self.assertEqual(expected, result)
get_capacity.assert_has_calls([
mock.call(fake.EXPORT_PATH)])
def test_get_capacity_info_ipv6_share(self):
expected = fake.CAPACITY_VALUES
self.driver.zapi_client = mock.Mock()
get_capacity = self.driver.zapi_client.get_flexvol_capacity
get_capacity.return_value = fake.CAPACITY_VALUES
result = self.driver._get_capacity_info(fake.NFS_SHARE_IPV6)
self.assertEqual(expected, result)
get_capacity.assert_has_calls([
mock.call(fake.EXPORT_PATH)])
def test_create_volume(self):
self.mock_object(self.driver, '_ensure_shares_mounted')
self.mock_object(na_utils, 'get_volume_extra_specs')
self.mock_object(self.driver, '_do_create_volume')
self.mock_object(self.driver, '_do_qos_for_volume')
update_ssc = self.mock_object(self.driver, '_update_stale_vols')
expected = {'provider_location': fake.NFS_SHARE}
result = self.driver.create_volume(fake.NFS_VOLUME)
self.assertEqual(expected, result)
self.assertEqual(0, update_ssc.call_count)
def test_create_volume_no_pool(self):
volume = copy.deepcopy(fake.NFS_VOLUME)
volume['host'] = '%s@%s' % (fake.HOST_NAME, fake.BACKEND_NAME)
self.mock_object(self.driver, '_ensure_shares_mounted')
self.assertRaises(exception.InvalidHost,
self.driver.create_volume,
volume)
def test_create_volume_exception(self):
self.mock_object(self.driver, '_ensure_shares_mounted')
self.mock_object(na_utils, 'get_volume_extra_specs')
mock_create = self.mock_object(self.driver, '_do_create_volume')
mock_create.side_effect = Exception
update_ssc = self.mock_object(self.driver, '_update_stale_vols')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume,
fake.NFS_VOLUME)
self.assertEqual(0, update_ssc.call_count)
def test_create_volume_from_snapshot(self):
provider_location = fake.POOL_NAME
snapshot = fake.CLONE_SOURCE
self.mock_object(self.driver, '_clone_source_to_destination_volume',
mock.Mock(return_value=provider_location))
result = self.driver.create_cloned_volume(fake.NFS_VOLUME,
snapshot)
self.assertEqual(provider_location, result)
def test_clone_source_to_destination_volume(self):
self.mock_object(self.driver, '_get_volume_location', mock.Mock(
return_value=fake.POOL_NAME))
self.mock_object(na_utils, 'get_volume_extra_specs', mock.Mock(
return_value=fake.EXTRA_SPECS))
self.mock_object(
self.driver,
'_clone_with_extension_check')
self.mock_object(self.driver, '_do_qos_for_volume')
expected = {'provider_location': fake.POOL_NAME}
result = self.driver._clone_source_to_destination_volume(
fake.CLONE_SOURCE, fake.CLONE_DESTINATION)
self.assertEqual(expected, result)
def test_clone_source_to_destination_volume_with_do_qos_exception(self):
self.mock_object(self.driver, '_get_volume_location', mock.Mock(
return_value=fake.POOL_NAME))
self.mock_object(na_utils, 'get_volume_extra_specs', mock.Mock(
return_value=fake.EXTRA_SPECS))
self.mock_object(
self.driver,
'_clone_with_extension_check')
self.mock_object(self.driver, '_do_qos_for_volume', mock.Mock(
side_effect=Exception))
self.assertRaises(
exception.VolumeBackendAPIException,
self.driver._clone_source_to_destination_volume,
fake.CLONE_SOURCE,
fake.CLONE_DESTINATION)
def test_clone_with_extension_check_equal_sizes(self):
clone_source = copy.deepcopy(fake.CLONE_SOURCE)
clone_source['size'] = fake.VOLUME['size']
self.mock_object(self.driver, '_clone_backing_file_for_volume')
self.mock_object(self.driver, 'local_path')
mock_discover = self.mock_object(self.driver,
'_discover_file_till_timeout')
mock_discover.return_value = True
self.mock_object(self.driver, '_set_rw_permissions')
mock_extend_volume = self.mock_object(self.driver, 'extend_volume')
self.driver._clone_with_extension_check(clone_source, fake.NFS_VOLUME)
self.assertEqual(0, mock_extend_volume.call_count)
def test_clone_with_extension_check_unequal_sizes(self):
clone_source = copy.deepcopy(fake.CLONE_SOURCE)
clone_source['size'] = fake.VOLUME['size'] + 1
self.mock_object(self.driver, '_clone_backing_file_for_volume')
self.mock_object(self.driver, 'local_path')
mock_discover = self.mock_object(self.driver,
'_discover_file_till_timeout')
mock_discover.return_value = True
self.mock_object(self.driver, '_set_rw_permissions')
mock_extend_volume = self.mock_object(self.driver, 'extend_volume')
self.driver._clone_with_extension_check(clone_source, fake.NFS_VOLUME)
self.assertEqual(1, mock_extend_volume.call_count)
def test_clone_with_extension_check_extend_exception(self):
clone_source = copy.deepcopy(fake.CLONE_SOURCE)
clone_source['size'] = fake.VOLUME['size'] + 1
self.mock_object(self.driver, '_clone_backing_file_for_volume')
self.mock_object(self.driver, 'local_path')
mock_discover = self.mock_object(self.driver,
'_discover_file_till_timeout')
mock_discover.return_value = True
self.mock_object(self.driver, '_set_rw_permissions')
mock_extend_volume = self.mock_object(self.driver, 'extend_volume')
mock_extend_volume.side_effect = Exception
mock_cleanup = self.mock_object(self.driver,
'_cleanup_volume_on_failure')
self.assertRaises(exception.CinderException,
self.driver._clone_with_extension_check,
clone_source,
fake.NFS_VOLUME)
self.assertEqual(1, mock_cleanup.call_count)
def test_clone_with_extension_check_no_discovery(self):
self.mock_object(self.driver, '_clone_backing_file_for_volume')
self.mock_object(self.driver, 'local_path')
self.mock_object(self.driver, '_set_rw_permissions')
mock_discover = self.mock_object(self.driver,
'_discover_file_till_timeout')
mock_discover.return_value = False
self.assertRaises(exception.CinderException,
self.driver._clone_with_extension_check,
fake.CLONE_SOURCE,
fake.NFS_VOLUME)
def test_create_cloned_volume(self):
provider_location = fake.POOL_NAME
src_vref = fake.CLONE_SOURCE
self.mock_object(self.driver, '_clone_source_to_destination_volume',
mock.Mock(return_value=provider_location))
result = self.driver.create_cloned_volume(fake.NFS_VOLUME,
src_vref)
self.assertEqual(provider_location, result)
def test_do_qos_for_volume(self):
self.assertRaises(NotImplementedError,
self.driver._do_qos_for_volume,
fake.NFS_VOLUME,
fake.EXTRA_SPECS)
def test_cleanup_volume_on_failure(self):
path = '%s/%s' % (fake.NFS_SHARE, fake.NFS_VOLUME['name'])
mock_local_path = self.mock_object(self.driver, 'local_path')
mock_local_path.return_value = path
mock_exists_check = self.mock_object(os.path, 'exists')
mock_exists_check.return_value = True
mock_delete = self.mock_object(self.driver, '_delete_file_at_path')
self.driver._cleanup_volume_on_failure(fake.NFS_VOLUME)
mock_delete.assert_has_calls([mock.call(path)])
def test_cleanup_volume_on_failure_no_path(self):
self.mock_object(self.driver, 'local_path')
mock_exists_check = self.mock_object(os.path, 'exists')
mock_exists_check.return_value = False
mock_delete = self.mock_object(self.driver, '_delete_file_at_path')
self.driver._cleanup_volume_on_failure(fake.NFS_VOLUME)
self.assertEqual(0, mock_delete.call_count)
def test_get_vol_for_share(self):
self.assertRaises(NotImplementedError,
self.driver._get_vol_for_share,
fake.NFS_SHARE)
def test_get_export_ip_path_volume_id_provided(self):
mock_get_host_ip = self.mock_object(self.driver, '_get_host_ip')
mock_get_host_ip.return_value = fake.IPV4_ADDRESS
mock_get_export_path = self.mock_object(
self.driver, '_get_export_path')
mock_get_export_path.return_value = fake.EXPORT_PATH
expected = (fake.IPV4_ADDRESS, fake.EXPORT_PATH)
result = self.driver._get_export_ip_path(fake.VOLUME_ID)
self.assertEqual(expected, result)
def test_get_export_ip_path_share_provided(self):
expected = (fake.SHARE_IP, fake.EXPORT_PATH)
result = self.driver._get_export_ip_path(share=fake.NFS_SHARE)
self.assertEqual(expected, result)
def test_get_export_ip_path_volume_id_and_share_provided(self):
mock_get_host_ip = self.mock_object(self.driver, '_get_host_ip')
mock_get_host_ip.return_value = fake.IPV4_ADDRESS
mock_get_export_path = self.mock_object(
self.driver, '_get_export_path')
mock_get_export_path.return_value = fake.EXPORT_PATH
expected = (fake.IPV4_ADDRESS, fake.EXPORT_PATH)
result = self.driver._get_export_ip_path(
fake.VOLUME_ID, fake.NFS_SHARE)
self.assertEqual(expected, result)
def test_get_export_ip_path_no_args(self):
self.assertRaises(exception.InvalidInput,
self.driver._get_export_ip_path)
def test_get_host_ip(self):
mock_get_provider_location = self.mock_object(
self.driver, '_get_provider_location')
mock_get_provider_location.return_value = fake.NFS_SHARE
expected = fake.SHARE_IP
result = self.driver._get_host_ip(fake.VOLUME_ID)
self.assertEqual(expected, result)
def test_get_export_path(self):
mock_get_provider_location = self.mock_object(
self.driver, '_get_provider_location')
mock_get_provider_location.return_value = fake.NFS_SHARE
expected = fake.EXPORT_PATH
result = self.driver._get_export_path(fake.VOLUME_ID)
self.assertEqual(expected, result)
def test_extend_volume(self):
new_size = 100
volume_copy = copy.copy(fake.VOLUME)
volume_copy['size'] = new_size
path = '%s/%s' % (fake.NFS_SHARE, fake.NFS_VOLUME['name'])
self.mock_object(self.driver,
'local_path',
mock.Mock(return_value=path))
mock_resize_image_file = self.mock_object(self.driver,
'_resize_image_file')
mock_get_volume_extra_specs = self.mock_object(
na_utils, 'get_volume_extra_specs',
mock.Mock(return_value=fake.EXTRA_SPECS))
mock_do_qos_for_volume = self.mock_object(self.driver,
'_do_qos_for_volume')
self.driver.extend_volume(fake.VOLUME, new_size)
mock_resize_image_file.assert_called_once_with(path, new_size)
mock_get_volume_extra_specs.assert_called_once_with(fake.VOLUME)
mock_do_qos_for_volume.assert_called_once_with(volume_copy,
fake.EXTRA_SPECS,
cleanup=False)
def test_extend_volume_resize_error(self):
new_size = 100
volume_copy = copy.copy(fake.VOLUME)
volume_copy['size'] = new_size
path = '%s/%s' % (fake.NFS_SHARE, fake.NFS_VOLUME['name'])
self.mock_object(self.driver,
'local_path',
mock.Mock(return_value=path))
mock_resize_image_file = self.mock_object(
self.driver, '_resize_image_file',
mock.Mock(side_effect=netapp_api.NaApiError))
mock_get_volume_extra_specs = self.mock_object(
na_utils, 'get_volume_extra_specs',
mock.Mock(return_value=fake.EXTRA_SPECS))
mock_do_qos_for_volume = self.mock_object(self.driver,
'_do_qos_for_volume')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.extend_volume,
fake.VOLUME,
new_size)
mock_resize_image_file.assert_called_once_with(path, new_size)
self.assertFalse(mock_get_volume_extra_specs.called)
self.assertFalse(mock_do_qos_for_volume.called)
def test_extend_volume_qos_error(self):
new_size = 100
volume_copy = copy.copy(fake.VOLUME)
volume_copy['size'] = new_size
path = '%s/%s' % (fake.NFS_SHARE, fake.NFS_VOLUME['name'])
self.mock_object(self.driver,
'local_path',
mock.Mock(return_value=path))
mock_resize_image_file = self.mock_object(self.driver,
'_resize_image_file')
mock_get_volume_extra_specs = self.mock_object(
na_utils, 'get_volume_extra_specs',
mock.Mock(return_value=fake.EXTRA_SPECS))
mock_do_qos_for_volume = self.mock_object(
self.driver, '_do_qos_for_volume',
mock.Mock(side_effect=netapp_api.NaApiError))
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.extend_volume,
fake.VOLUME,
new_size)
mock_resize_image_file.assert_called_once_with(path, new_size)
mock_get_volume_extra_specs.assert_called_once_with(fake.VOLUME)
mock_do_qos_for_volume.assert_called_once_with(volume_copy,
fake.EXTRA_SPECS,
cleanup=False)
def test_is_share_clone_compatible(self):
self.assertRaises(NotImplementedError,
self.driver._is_share_clone_compatible,
fake.NFS_VOLUME,
fake.NFS_SHARE)
@ddt.data(
{'size': 12, 'thin': False, 'over': 1.0, 'res': 0, 'expected': True},
{'size': 12, 'thin': False, 'over': 1.0, 'res': 5, 'expected': False},
{'size': 12, 'thin': True, 'over': 1.0, 'res': 5, 'expected': False},
{'size': 12, 'thin': True, 'over': 1.1, 'res': 5, 'expected': True},
{'size': 240, 'thin': True, 'over': 20.0, 'res': 0, 'expected': True},
{'size': 241, 'thin': True, 'over': 20.0, 'res': 0, 'expected': False},
)
@ddt.unpack
def test_share_has_space_for_clone(self, size, thin, over, res, expected):
total_bytes = 20 * units.Gi
available_bytes = 12 * units.Gi
with mock.patch.object(self.driver,
'_get_capacity_info',
return_value=(
total_bytes, available_bytes)):
with mock.patch.object(self.driver,
'max_over_subscription_ratio',
over):
with mock.patch.object(self.driver,
'reserved_percentage',
res):
result = self.driver._share_has_space_for_clone(
fake.NFS_SHARE,
size,
thin=thin)
self.assertEqual(expected, result)
@ddt.data(
{'size': 12, 'thin': False, 'over': 1.0, 'res': 0, 'expected': True},
{'size': 12, 'thin': False, 'over': 1.0, 'res': 5, 'expected': False},
{'size': 12, 'thin': True, 'over': 1.0, 'res': 5, 'expected': False},
{'size': 12, 'thin': True, 'over': 1.1, 'res': 5, 'expected': True},
{'size': 240, 'thin': True, 'over': 20.0, 'res': 0, 'expected': True},
{'size': 241, 'thin': True, 'over': 20.0, 'res': 0, 'expected': False},
)
@ddt.unpack
@mock.patch.object(nfs_base.NetAppNfsDriver, '_get_capacity_info')
def test_share_has_space_for_clone2(self,
mock_get_capacity,
size, thin, over, res, expected):
total_bytes = 20 * units.Gi
available_bytes = 12 * units.Gi
mock_get_capacity.return_value = (total_bytes, available_bytes)
with mock.patch.object(self.driver,
'max_over_subscription_ratio',
over):
with mock.patch.object(self.driver,
'reserved_percentage',
res):
result = self.driver._share_has_space_for_clone(
fake.NFS_SHARE,
size,
thin=thin)
self.assertEqual(expected, result)
|
HybridF5/jacket
|
jacket/tests/storage/unit/volume/drivers/netapp/dataontap/test_nfs_base.py
|
Python
|
apache-2.0
| 21,509
|
# Lint as: python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Public configuration parameters for object tracking."""
import dataclasses
from automl_video_ondevice.types import Tracker
@dataclasses.dataclass
class ObjectTrackingConfig:
score_threshold: float = 0.0
max_detections: int = 100
device: str = ""
tracker: Tracker = Tracker.NONE
|
google/automl-video-ondevice
|
automl_video_ondevice/object_tracking/config.py
|
Python
|
apache-2.0
| 967
|
#!/usr/bin/env python
# Author : Pierre Schnizer <pierre@itp.tu-graz.ac.at>
# Date : January 2003
"""
The functions described in this section can be used to perform
least-squares fits to a straight line model, Y = c_0 + c_1 X. For
weighted data the best-fit is found by minimizing the weighted sum of
squared residuals, \chi^2,
\chi^2 = \sum_i w_i (y_i - (c_0 + c_1 x_i))^2
for the parameters c_0, c_1. For unweighted data the sum is computed
with w_i = 1.
"""
from . import _callback
def linear(x, y):
"""
This function computes the best-fit linear regression coefficients
(C0,C1) of the model Y = c_0 + c_1 X for the datasets (X, Y). The
variance-covariance matrix for the parameters (C0, C1) is
estimated from the scatter of the points around the best-fit line
and returned via the parameters (COV00, COV01, COV11). The sum of
squares of the residuals from the best-fit line is returned in
SUMSQ.
"""
return _callback.gsl_fit_linear(x,y)
def wlinear(x, y, w):
"""
This function computes the best-fit linear regression coefficients
(C0,C1) of the model Y = c_0 + c_1 X for the weighted datasets (X,
Y). The
vector W specifies the weight of
each datapoint. The weight is the reciprocal of the variance for
each datapoint in Y.
The covariance matrix for the parameters (C0, C1) is estimated
from weighted data and returned via the parameters (COV00, COV01,
COV11). The weighted sum of squares of the residuals from the
best-fit line, \chi^2, is returned in CHISQ.
"""
return _callback.gsl_fit_wlinear(x, y, w)
def est(x, c0, c1, c00, c01, c10):
"""
This function uses the best-fit linear regression coefficients
C0,C1 and their estimated covariance COV00,COV01,COV11 to compute
the fitted function Y and its standard deviation Y_ERR for the
model Y = c_0 + c_1 X at the point X.
"""
return _callback.gsl_fit_linear_est(x, c0, c1, c00, c01, c10)
def mul(x,y):
"""
This function computes the best-fit linear regression coefficient
C1 of the model Y = c_1 X for the datasets (X, Y).
The variance of the
parameter C1 is estimated from the scatter of the points around
the best-fit line and returned via the parameter COV11. The sum
of squares of the residuals from the best-fit line is returned in
SUMSQ.
"""
return _callback.gsl_fit_mul(x,y)
def wmul(x, y, w):
"""
This function computes the best-fit linear regression coefficient
C1 of the model Y = c_1 X for the weighted datasets (X, Y).
The vector
W, of length N and stride WSTRIDE, specifies the weight of each
datapoint. The weight is the reciprocal of the variance for each
datapoint in Y.
The variance of the parameter C1 is estimated from the weighted
data and returned via the parameters COV11. The weighted sum of
squares of the residuals from the best-fit line, \chi^2, is
returned in CHISQ.
"""
return _callback.gsl_fit_wmul(x, y, w)
def mul_est(x, c1, c11):
"""
This function uses the best-fit linear regression coefficient C1
and its estimated covariance COV11 to compute the fitted function
Y and its standard deviation Y_ERR for the model Y = c_1 X at the
point X.
"""
return _callback.gsl_fit_mul_est(x, c1, c11)
# def gsl_fit_poly(x, w, y):
# return _callback.gsl_fit_poly(x, w, y)
#
# def gsl_fit_fns(A, w, y):
# return _callback.gsl_fit_fns(A, w, y)
#
# def gsl_fit_linear_nd(m, y, w):
# return _callback.gsl_fit_linear_nd(m, y, w)
|
poojavade/Genomics_Docker
|
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/pygsl/fit.py
|
Python
|
apache-2.0
| 3,618
|
#!/home/venturi/django/cms/env/bin/python
#
# The Python Imaging Library
# $Id$
#
# split an animation into a number of frame files
#
from __future__ import print_function
from PIL import Image
import os
import sys
class Interval(object):
def __init__(self, interval="0"):
self.setinterval(interval)
def setinterval(self, interval):
self.hilo = []
for s in interval.split(","):
if not s.strip():
continue
try:
v = int(s)
if v < 0:
lo, hi = 0, -v
else:
lo = hi = v
except ValueError:
i = s.find("-")
lo, hi = int(s[:i]), int(s[i+1:])
self.hilo.append((hi, lo))
if not self.hilo:
self.hilo = [(sys.maxsize, 0)]
def __getitem__(self, index):
for hi, lo in self.hilo:
if hi >= index >= lo:
return 1
return 0
# --------------------------------------------------------------------
# main program
html = 0
if sys.argv[1:2] == ["-h"]:
html = 1
del sys.argv[1]
if not sys.argv[2:]:
print()
print("Syntax: python explode.py infile template [range]")
print()
print("The template argument is used to construct the names of the")
print("individual frame files. The frames are numbered file001.ext,")
print("file002.ext, etc. You can insert %d to control the placement")
print("and syntax of the frame number.")
print()
print("The optional range argument specifies which frames to extract.")
print("You can give one or more ranges like 1-10, 5, -15 etc. If")
print("omitted, all frames are extracted.")
sys.exit(1)
infile = sys.argv[1]
outfile = sys.argv[2]
frames = Interval(",".join(sys.argv[3:]))
try:
# check if outfile contains a placeholder
outfile % 1
except TypeError:
file, ext = os.path.splitext(outfile)
outfile = file + "%03d" + ext
ix = 1
im = Image.open(infile)
if html:
file, ext = os.path.splitext(outfile)
html = open(file+".html", "w")
html.write("<html>\n<body>\n")
while True:
if frames[ix]:
im.save(outfile % ix)
print(outfile % ix)
if html:
html.write("<img src='%s'><br>\n" % outfile % ix)
try:
im.seek(ix)
except EOFError:
break
ix += 1
if html:
html.write("</body>\n</html>\n")
|
Venturi/oldcms
|
env/bin/explode.py
|
Python
|
apache-2.0
| 2,461
|
from six import PY2, PY3
import unittest
import re
from robot.utils import unic, prepr, DotDict, JYTHON, IRONPYTHON
from robot.utils.asserts import assert_equals, assert_true
if JYTHON:
from java.lang import String, Object, RuntimeException
import JavaObject
import UnicodeJavaLibrary
class TestJavaUnic(unittest.TestCase):
def test_with_java_object(self):
data = u'This is unicode \xe4\xf6'
assert_equals(unic(JavaObject(data)), data)
def test_with_class_type(self):
assert_true('java.lang.String' in unic(String('').getClass()))
def test_with_array_containing_unicode_objects(self):
assert_true('Circle is 360' in
unic(UnicodeJavaLibrary().javaObjectArray()))
def test_with_iterator(self):
iterator = UnicodeJavaLibrary().javaIterator()
assert_true('java.util' in unic(iterator))
assert_true('Circle is 360' in next(iterator))
def test_failure_in_toString(self):
class ToStringFails(Object, UnRepr):
def toString(self):
raise RuntimeException(self.error)
failing = ToStringFails()
assert_equals(unic(failing), failing.unrepr)
class TestUnic(unittest.TestCase):
if not (JYTHON or IRONPYTHON):
def test_unicode_nfc_and_nfd_decomposition_equality(self):
import unicodedata
text = u'Hyv\xe4'
assert_equals(unic(unicodedata.normalize('NFC', text)), text)
# In Mac filesystem umlaut characters are presented in NFD-format.
# This is to check that unic normalizes all strings to NFC
assert_equals(unic(unicodedata.normalize('NFD', text)), text)
if not IRONPYTHON:
def test_encoding(self):
good = u'hyv\xe4'
assert_equals(unic(good.encode('UTF-8'), 'UTF-8'), good)
assert_equals(unic(good.encode('UTF-8'), 'ASCII', 'ignore'), 'hyv')
def test_object_containing_unicode_repr(self):
assert_equals(unic(UnicodeRepr()), u'Hyv\xe4')
def test_list_with_objects_containing_unicode_repr(self):
objects = [UnicodeRepr(), UnicodeRepr()]
result = unic(objects)
if JYTHON:
# This is actually wrong behavior
assert_equals(result, '[Hyv\\xe4, Hyv\\xe4]')
elif IRONPYTHON or PY3:
# And so is this.
assert_equals(result, '[Hyv\xe4, Hyv\xe4]')
else:
expected = UnRepr.format('list', 'UnicodeEncodeError: ')[:-1]
assert_true(result.startswith(expected))
def test_bytes_below_128(self):
assert_equals(unic('\x00-\x01-\x02-\x7f'), u'\x00-\x01-\x02-\x7f')
if not (IRONPYTHON or PY3):
def test_bytes_above_128(self):
assert_equals(unic('hyv\xe4'), u'hyv\\xe4')
assert_equals(unic('\x00-\x01-\x02-\xe4'), u'\x00-\x01-\x02-\\xe4')
def test_bytes_with_newlines_tabs_etc(self):
# 'string_escape' escapes some chars we don't want to be escaped
assert_equals(unic("\x00\xe4\n\t\r\\'"), u"\x00\\xe4\n\t\r\\'")
else:
def test_bytes_above_128(self):
assert_equals(unic('hyv\xe4'), u'hyv\xe4')
assert_equals(unic('\x00-\x01-\x02-\xe4'), u'\x00-\x01-\x02-\xe4')
def test_bytes_with_newlines_tabs_etc(self):
# 'string_escape' escapes some chars we don't want to be escaped
assert_equals(unic("\x00\xe4\n\t\r\\'"), u"\x00\xe4\n\t\r\\'")
if not PY3:
def test_failure_in_unicode(self):
failing = UnicodeFails()
assert_equals(unic(failing), failing.unrepr)
def test_failure_in_str(self):
failing = StrFails()
assert_equals(unic(failing), failing.unrepr)
class TestPrettyRepr(unittest.TestCase):
def _verify(self, item, expected=None):
if not expected:
expected = repr(item)
elif (IRONPYTHON or PY3) and "b'" in expected:
expected = expected.replace("b'", "'")
assert_equals(prepr(item), expected)
def test_no_u_prefix(self):
self._verify(u'foo', "'foo'")
self._verify(u"f'o'o", "\"f'o'o\"")
self._verify(u'hyv\xe4', "'hyv\\xe4'" if PY2 else None)
def test_b_prefix(self):
self._verify('foo', "b'foo'")
self._verify('hyv\xe4', "b'hyv\\xe4'" if PY2 else None)
def test_non_strings(self):
for inp in [1, -2.0, True, None, -2.0, (), [], {},
StrFails(), UnicodeFails()]:
self._verify(inp)
def test_failing_repr(self):
failing = ReprFails()
self._verify(failing, failing.unrepr)
def test_unicode_repr(self):
invalid = UnicodeRepr()
if JYTHON:
expected = 'Hyv\\xe4'
elif IRONPYTHON or PY3:
expected = u'Hyv\xe4'
else:
expected = invalid.unrepr # This is correct.
self._verify(invalid, expected)
def test_non_ascii_repr(self):
non_ascii = NonAsciiRepr()
if IRONPYTHON or PY3:
expected = u'Hyv\xe4'
else:
expected = 'Hyv\\xe4' # This is correct.
self._verify(non_ascii, expected)
def test_collections(self):
self._verify([u'foo', 'bar', 3], "['foo', b'bar', 3]")
self._verify([u'foo', 'bar', (u'x', 'y')], "['foo', b'bar', ('x', b'y')]")
inp1, inp2 = ReprFails(), StrFails()
exp1, exp2 = inp1.unrepr, repr(inp2)
self._verify((inp1, inp2, [inp1]),
'(%s, %s, [%s])' % (exp1, exp2, exp1)
#PY3: different pprint.PrettyPrinter behavior
# - doesn't iterate container
# if PrettyPrinter.format() text of container
# is not longer than max line width
# - see PrettyPrinter._format(): ... if sepLines: ...
if PY2 else UnRepr.format('tuple', UnRepr.error))
self._verify({'x': 1, 2: u'y'},
"{2: 'y', b'x': 1}")
self._verify({1: inp1, None: ()},
'{None: (), 1: %s}' % exp1
#PY3: different pprint.PrettyPrinter behavior
# - see above
if PY2 else UnRepr.format('dict', UnRepr.error))
def test_dotdict(self):
self._verify(DotDict({'x': 1, 2: u'y'}),
"{2: 'y', b'x': 1}")
def test_recursive(self):
x = [1, 2]
x.append(x)
match = re.match(r'\[1, 2. <Recursion on list with id=\d+>\]', prepr(x))
assert_true(match is not None)
def test_split_big_collections(self):
self._verify(range(100))
self._verify([u'Hello, world!'] * 10,
'[%s]' % ', '.join(["'Hello, world!'"] * 10))
self._verify(list(range(300)),
'[%s]' % ',\n '.join(str(i) for i in range(300)))
self._verify([u'Hello, world!'] * 30,
'[%s]' % ',\n '.join(["'Hello, world!'"] * 30))
class UnRepr(object):
error = 'This, of course, should never happen...'
@property
def unrepr(self):
return self.format(type(self).__name__, self.error)
@staticmethod
def format(name, error):
return "<Unrepresentable object %s. Error: %s>" % (name, error)
class UnicodeFails(UnRepr):
def __unicode__(self):
raise RuntimeError(self.error)
class StrFails(UnRepr):
def __unicode__(self):
raise UnicodeError()
def __str__(self):
raise RuntimeError(self.error)
class ReprFails(UnRepr):
def __repr__(self):
raise RuntimeError(self.error)
class UnicodeRepr(UnRepr):
def __init__(self):
try:
repr(self)
except UnicodeEncodeError as err:
self.error = 'UnicodeEncodeError: %s' % err
def __repr__(self):
return u'Hyv\xe4'
class NonAsciiRepr(UnRepr):
def __init__(self):
try:
repr(self)
except UnicodeEncodeError as err:
self.error = 'UnicodeEncodeError: %s' % err
def __repr__(self):
return 'Hyv\xe4'
if __name__ == '__main__':
unittest.main()
|
userzimmermann/robotframework
|
utest/utils/test_unic.py
|
Python
|
apache-2.0
| 8,251
|
from insights.combiners.user_namespaces import UserNamespaces
from insights.parsers.cmdline import CmdLine
from insights.parsers.grub_conf import Grub2Config
from insights.tests import context_wrap
ENABLE_TOK_A = '''
user_namespaces.enable=1
'''.strip() # noqa
ENABLE_TOK_B = '''
user-namespaces.enable=1
'''.strip() # noqa
CMDLINE = '''
BOOT_IMAGE=/vmlinuz-3.10.0-514.6.1.el7.x86_64 root=/dev/mapper/rhel-root ro crashkernel=auto rd.lvm.lv=rhel/root rd.lvm.lv=rhel/swap {0}
'''.strip() # noqa
GRUB2_CONF = '''
### BEGIN /etc/grub.d/10_linux ###
menuentry 'Red Hat Enterprise Linux Server (3.10.0-514.16.1.el7.x86_64) 7.3 (Maipo)' --class red --class gnu-linux --class gnu --class os --unrestricted $menuentry_id_option 'gnulinux-3.10.0-514.el7.x86_64-advanced-9727cab4-12c2-41a8-9527-9644df34e586' {{
load_video
set gfxpayload=keep
insmod gzio
insmod part_gpt
insmod xfs
set root='hd0,gpt2'
if [ x$feature_platform_search_hint = xy ]; then
search --no-floppy --fs-uuid --set=root --hint-bios=hd0,gpt2 --hint-efi=hd0,gpt2 --hint-baremetal=ahci0,gpt2 d80fa96c-ffa1-4894-9282-aeda37f0befe
else
search --no-floppy --fs-uuid --set=root d80fa96c-ffa1-4894-9282-aeda37f0befe
fi
linuxefi /vmlinuz-3.10.0-514.16.1.el7.x86_64 root=/dev/mapper/rhel-root ro rd.luks.uuid=luks-a40b320e-0711-4cd6-8f9e-ce32810e2a79 rd.lvm.lv=rhel/root rd.lvm.lv=rhel/swap rhgb quiet LANG=en_US.UTF-8 {0}
initrdefi /initramfs-3.10.0-514.16.1.el7.x86_64.img
}}
menuentry 'Red Hat Enterprise Linux Server (3.10.0-514.10.2.el7.x86_64) 7.3 (Maipo)' --class red --class gnu-linux --class gnu --class os --unrestricted $menuentry_id_option 'gnulinux-3.10.0-514.el7.x86_64-advanced-9727cab4-12c2-41a8-9527-9644df34e586' {{
load_video
set gfxpayload=keep
insmod gzio
insmod part_gpt
insmod xfs
set root='hd0,gpt2'
if [ x$feature_platform_search_hint = xy ]; then
search --no-floppy --fs-uuid --set=root --hint-bios=hd0,gpt2 --hint-efi=hd0,gpt2 --hint-baremetal=ahci0,gpt2 d80fa96c-ffa1-4894-9282-aeda37f0befe
else
search --no-floppy --fs-uuid --set=root d80fa96c-ffa1-4894-9282-aeda37f0befe
fi
linuxefi /vmlinuz-3.10.0-514.10.2.el7.x86_64 root=/dev/mapper/rhel-root ro rd.luks.uuid=luks-a40b320e-0711-4cd6-8f9e-ce32810e2a79 rd.lvm.lv=rhel/root rd.lvm.lv=rhel/swap rhgb quiet LANG=en_US.UTF-8 {1}
initrdefi /initramfs-3.10.0-514.10.2.el7.x86_64.img
}}
''' # noqa
MENUENTRY_0 = '''
'Red Hat Enterprise Linux Server (3.10.0-514.16.1.el7.x86_64) 7.3 (Maipo)' --class red --class gnu-linux --class gnu --class os --unrestricted $menuentry_id_option 'gnulinux-3.10.0-514.el7.x86_64-advanced-9727cab4-12c2-41a8-9527-9644df34e586'
'''.strip() # noqa
MENUENTRY_1 = '''
'Red Hat Enterprise Linux Server (3.10.0-514.10.2.el7.x86_64) 7.3 (Maipo)' --class red --class gnu-linux --class gnu --class os --unrestricted $menuentry_id_option 'gnulinux-3.10.0-514.el7.x86_64-advanced-9727cab4-12c2-41a8-9527-9644df34e586'
'''.strip() # noqa
CASES = [
# noqa
# |-- provided --| |---- expected results ---|
# ((cmdline, grub), (enabled, enabled_configs))
# Not enabled, no grub data
((CMDLINE.format(''), None), (False, [])),
# Not enabled, not enabled in grub
((CMDLINE.format(''), GRUB2_CONF.format('', '')), (False, [])),
# Not enabled, but enabled in menuentry 1
((CMDLINE.format(''), GRUB2_CONF.format('', ENABLE_TOK_A)),
(False, [MENUENTRY_1])),
# Enabled, no grub data
((CMDLINE.format(ENABLE_TOK_A), None), (True, [])),
# Enabled, but not enabled in grub
((CMDLINE.format(ENABLE_TOK_A), GRUB2_CONF.format('', '')),
(True, [])),
# Enabled, enabled in menuentry 0
((CMDLINE.format(ENABLE_TOK_A), GRUB2_CONF.format(ENABLE_TOK_A, '')),
(True, [MENUENTRY_0])),
# Dash syntax, rather than underscore
((CMDLINE.format(ENABLE_TOK_B), GRUB2_CONF.format(ENABLE_TOK_B, '')),
(True, [MENUENTRY_0]))
]
def test_integration():
for case in CASES:
context = dict()
context[CmdLine] = CmdLine(context_wrap(case[0][0]))
if case[0][1] is not None:
context[Grub2Config] = Grub2Config(context_wrap(case[0][1]))
un = UserNamespaces(context.get(CmdLine), context.get(Grub2Config))
assert un.enabled() == case[1][0]
assert un.enabled_configs() == case[1][1]
|
RedHatInsights/insights-core
|
insights/combiners/tests/test_user_namespaces.py
|
Python
|
apache-2.0
| 4,553
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import collections
import textwrap
import sys
class MessageStructure(object):
"""A class representing the structure for a proto message.
This class is its own registry; calling the with the same message name
will return the same object.
"""
_registry = {}
# A random sequence of alphanumerical characters used as a token to
# concatenate different docstrings together, then make a single pypandoc
# call and split the returned result again using the same token. This allows
# us to reduce an average number of calls to a child process (pypandoc
# starts a pandoc subprocess) from several hundreds to a single call per
# API. This execution reduces time by several orders of magnitude
# (from ~10 secs to fractions of a second per API).
_BATCH_TOKEN = "D55406F6B511E8"
@classmethod
def get_or_create(cls, name):
"""Return a Message object.
Args:
name (str): The fully qualified name of the message (for example:
``google.protobuf.SourceCodeInfo``)
Returns:
``MessageStructure``: A ``MessageStructure`` object.
"""
cls._registry.setdefault(name, cls(name=name))
return cls._registry[name]
def __init__(self, name):
self.name = name
self.docstring = ''
self.members = collections.OrderedDict()
def __hash__(self):
"""Return a hash for this object based on its name.
This makes MessageStructure objects able to be placed into a set
to handle de-duplication properly.
"""
return hash(self.name)
def __repr__(self):
tw8 = textwrap.TextWrapper(
initial_indent=' ' * 8,
subsequent_indent=' ' * 8,
)
tw12 = textwrap.TextWrapper(
initial_indent=' ' * 12,
subsequent_indent=' ' * 12,
)
answer = 'MessageStructure {\n'
answer += ' name: {0}\n'.format(self.name)
answer += ' docstring:\n{0}\n'.format(
'\n'.join(tw8.wrap(self.docstring)),
)
if len(self.members):
answer += ' members:\n'
for k, v in self.members.items():
answer += ' {name}:\n{doc}\n'.format(
name=k,
doc='\n'.join(tw12.wrap(v)),
)
answer += '}\n'
return answer
def get_meta_docstring(self):
meta_docstring = ''
if self.docstring:
meta_docstring += self.docstring
# Concatenate members adding new line and a _BATCH_TOKEN between each
# member, such that the members list can be restored later (after
# formatting) by simply splitting the big string by the same
# _BATCH_TOKEN.
for k, v in self.members.items():
if meta_docstring:
meta_docstring += "\n%s" % MessageStructure._BATCH_TOKEN
meta_docstring += v
return meta_docstring
def get_python_docstring(self, docstring = None):
tw8 = textwrap.TextWrapper(
initial_indent=' ' * 8,
subsequent_indent=' ' * 8,
)
tw0 = textwrap.TextWrapper()
meta_docstring = docstring if docstring else self.get_meta_docstring()
answer = ''
# Reconstruct the docstrings list by splitting the meta_docstring
# by same _BATCH_TOKEN which was used to concatenate them
meta_vals = meta_docstring.split(MessageStructure._BATCH_TOKEN)
meta_index = 0
if self.docstring:
answer += '\n'.join(tw0.wrap(meta_vals[meta_index]))
meta_index += 1
if len(self.members):
answer += '\n\n'
if len(self.members):
answer += 'Attributes:\n'
keys = list(self.members.keys())
keys_index = 0
while meta_index < len(meta_vals) and keys_index < len(keys):
v = meta_vals[meta_index]
k = keys[keys_index]
answer += ' %s:\n%s\n' % (k, '\n'.join(tw8.wrap(v)))
meta_index += 1
keys_index += 1
# Done.
return answer
|
googleapis/protoc-docs-plugin
|
protoc_docs/code.py
|
Python
|
apache-2.0
| 4,774
|
#!/usr/bin/python2.7
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from google.appengine.api import datastore_errors
from create import create_photo_from_input
from model import *
from photo import PhotoError
from utils import *
from detect_spam import SpamDetector
import extend
import reveal
import subscribe
import utils
from django.utils.translation import ugettext as _
from urlparse import urlparse
# TODO(jessien): Clean up duplicate code here and in create.py.
# https://github.com/google/personfinder/issues/157
# how many days left before we warn about imminent expiration.
# Make this at least 1.
EXPIRY_WARNING_THRESHOLD = 7
class Handler(BaseHandler):
def get(self):
# Check the request parameters.
if not self.params.id:
return self.error(404, _('No person id was specified.'))
try:
person = Person.get(self.repo, self.params.id)
# TODO(ichikawa) Consider removing this "except" clause.
# I don't think ValueError is thrown here.
except ValueError:
return self.error(404,
_("This person's entry does not exist or has been deleted."))
if not person:
return self.error(404,
_("This person's entry does not exist or has been deleted."))
standalone = self.request.get('standalone')
# Render the page.
enable_notes_url = self.get_url('/enable_notes', id=self.params.id)
self.render('add_note.html',
person=person,
standalone=standalone,
enable_notes_url=enable_notes_url)
def post(self):
"""Post a note in person's record view page"""
if not self.params.text:
return self.error(
400, _('Message is required. Please go back and try again.'))
if not self.params.author_name:
return self.error(
400, _('Your name is required in the "About you" section. '
'Please go back and try again.'))
if (self.params.status == 'is_note_author' and
not self.params.author_made_contact):
return self.error(
400, _('Please check that you have been in contact with '
'the person after the disaster, or change the '
'"Status of this person" field.'))
if (self.params.status == 'believed_dead' and
not self.config.allow_believed_dead_via_ui):
return self.error(
400, _('Not authorized to post notes with the status '
'"believed_dead".'))
if (self.params.author_email and
not utils.validate_email(self.params.author_email)):
return self.error(400, _(
'The email address you entered appears to be invalid.'))
person = Person.get(self.repo, self.params.id)
if person.notes_disabled:
return self.error(
400, _('The author has disabled status updates '
'on this record.'))
try:
photo, photo_url = create_photo_from_input(
self, self.params.note_photo, self.params.note_photo_url)
except PhotoError, e:
return self.error(400, e.message)
if photo:
photo.put()
spam_detector = SpamDetector(self.config.bad_words)
spam_score = spam_detector.estimate_spam_score(self.params.text)
if (spam_score > 0):
note = NoteWithBadWords.create_original(
self.repo,
entry_date=get_utcnow(),
person_record_id=self.params.id,
author_name=self.params.author_name,
author_email=self.params.author_email,
author_phone=self.params.author_phone,
source_date=get_utcnow(),
author_made_contact=bool(self.params.author_made_contact),
status=self.params.status,
email_of_found_person=self.params.email_of_found_person,
phone_of_found_person=self.params.phone_of_found_person,
last_known_location=self.params.last_known_location,
text=self.params.text,
photo=photo,
photo_url=photo_url,
spam_score=spam_score,
confirmed=False)
# Write the new NoteWithBadWords to the datastore
note.put_new()
# When the note is detected as spam, we do not update person record
# or log action. We ask the note author for confirmation first.
return self.redirect('/post_flagged_note', id=note.get_record_id(),
author_email=note.author_email,
repo=self.repo)
else:
note = Note.create_original(
self.repo,
entry_date=get_utcnow(),
person_record_id=self.params.id,
author_name=self.params.author_name,
author_email=self.params.author_email,
author_phone=self.params.author_phone,
source_date=get_utcnow(),
author_made_contact=bool(self.params.author_made_contact),
status=self.params.status,
email_of_found_person=self.params.email_of_found_person,
phone_of_found_person=self.params.phone_of_found_person,
last_known_location=self.params.last_known_location,
text=self.params.text,
photo=photo,
photo_url=photo_url)
# Write the new regular Note to the datastore
note.put_new()
# Specially log 'believed_dead'.
if note.status == 'believed_dead':
UserActionLog.put_new(
'mark_dead', note, person.primary_full_name,
self.request.remote_addr)
# Specially log a switch to an alive status.
if (note.status in ['believed_alive', 'is_note_author'] and
person.latest_status not in ['believed_alive', 'is_note_author']):
UserActionLog.put_new('mark_alive', note, person.primary_full_name)
# Update the Person based on the Note.
if person:
person.update_from_note(note)
# Send notification to all people
# who subscribed to updates on this person
subscribe.send_notifications(self, person, [note])
# write the updated person record to datastore
db.put(person)
# If user wants to subscribe to updates, redirect to the subscribe page
if self.params.subscribe:
return self.redirect('/subscribe',
id=person.record_id,
subscribe_email=self.params.author_email,
context='add_note')
# Redirect to view page so the browser's back button works properly.
self.redirect('/view', id=self.params.id, query=self.params.query)
|
gimite/personfinder
|
app/add_note.py
|
Python
|
apache-2.0
| 7,635
|
SECRET_KEY = '\xef)*\xbc\xd7\xa9t\x7f\xbc3pH1o\xc1\xe2\xb0\x19\\L\xeb\xe3\x00\xa3'
|
ludovicchabant/Wikked
|
wikked/settings.py
|
Python
|
apache-2.0
| 85
|
"""
TLTK
Thai Language Toolkit
:See Also:
* \
https://pypi.org/project/tltk/
"""
from tltk.nlp import spell_candidates
from typing import List
def spell(text: str) -> List[str]:
return spell_candidates(text)
|
PyThaiNLP/pythainlp
|
pythainlp/spell/tltk.py
|
Python
|
apache-2.0
| 228
|
import sys
import requests
from time import time
from functools import wraps
from urlparse import urlparse
from os import unlink, makedirs
from os.path import isdir, exists
from optparse import OptionParser
from azure.storage import BlobService
VERSION = 'v1.0.0'
USAGE = 'usage: python %prog -u url -k account_key -p path -f filename\n' \
'*(Required field)'
def print_warning():
"""TODO: Docstring for print_warning.
:returns: TODO
"""
print 'Extension and Filename are mutually exclusive.'
return 1
def get_options():
"""TODO: Docstring for get_options.
:returns: TODO
"""
parser = OptionParser(usage=USAGE, version=VERSION)
parser.add_option('-u', '--url', action='store', type='string',
help='Url of the vhd *', dest='url', default='')
parser.add_option('-k', '--key', action='store', type='string',
help='Account Key', dest='account_key', default='')
parser.add_option('-f', '--file', action='store', type='string',
help='File name', dest='filename', default='')
parser.add_option('-p', '--path', action='store', type='string',
help='Searching path *', dest='path', default='/')
parser.add_option('-e', '--extension', action='store', type='string',
help='Extension', dest='extension', default='')
parser.add_option('-t', '--type', action='store', type='int',
help='EXT2/3/4; 2,3,4', dest='type', default='4')
parser.add_option('--ls', action='store_true',
help='List the dir', dest='ls', default=False)
(options, args) = parser.parse_args()
len(sys.argv) == 1 and exit(parser.print_help())
options.extension and options.filename and exit(print_warning())
tmp = urlparse(options.url)
options.account_name = tmp.netloc.split('.')[0]
options.container = tmp.path.split('/')[1]
options.vhd = tmp.path.split('/')[2]
options.host_base = tmp.netloc[tmp.netloc.find('.'):]
if options.account_key:
options.blob_service = BlobService(options.account_name,
options.account_key,
host_base=options.host_base)
options.blob_service._httpclient.request_session = requests.Session()
else:
options.blob_service = None
options.path_list = split_path(options.path)
return (options, args)
def log_time(fn):
"""TODO: Docstring for log_time.
:fn: TODO
:returns: TODO
"""
@wraps(fn)
def wrapper(*args, **kwargs):
start_time = time()
result = fn(*args, **kwargs)
print '%s -> Time used : %d\n' % (fn.__name__, time() - start_time)
return result
return wrapper
def embed_params(**kwargs):
"""TODO: Docstring for embed_params.
:**kwargs: TODO
:returns: TODO
"""
def decorator(fn):
@wraps(fn)
def wrapper(*arg):
return fn(*arg, **kwargs)
return wrapper
return decorator
def split_path(path):
"""TODO: Docstring for split_path.
:path: TODO
:returns: TODO
"""
item = [x for x in path.split('/') if x != '']
return item
def init_dir(path):
"""TODO: Docstring for init_dir.
:path: TODO
:returns: TODO
"""
if not isdir(path):
exists(path) and unlink(path)
makedirs(path)
|
shiehinms/vminspector
|
util.py
|
Python
|
apache-2.0
| 3,436
|
# Lint as: python3
# Copyright 2019 The AdaNet Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A phase in the AdaNet workflow."""
from typing import Callable, Iterable, Iterator, Union
from adanet.experimental.phases.phase import DatasetProvider
from adanet.experimental.phases.phase import ModelProvider
from adanet.experimental.storages.in_memory_storage import InMemoryStorage
from adanet.experimental.storages.storage import Storage
from adanet.experimental.work_units.keras_trainer_work_unit import KerasTrainerWorkUnit
from adanet.experimental.work_units.work_unit import WorkUnit
import tensorflow.compat.v2 as tf
class KerasTrainerPhase(DatasetProvider, ModelProvider):
"""Trains Keras models."""
def __init__(self,
models: Union[Iterable[tf.keras.Model],
Callable[[], Iterable[tf.keras.Model]]],
storage: Storage = InMemoryStorage()):
"""Initializes a KerasTrainerPhase.
Args:
models: A list of `tf.keras.Model` instances or a list of callables that
return `tf.keras.Model` instances.
storage: A `Storage` instance.
"""
# TODO: Consume arbitary fit inputs.
# Dataset should be wrapped inside a work unit.
# For instance when you create KerasTrainer work unit the dataset is
# encapsulated inside that work unit.
# What if you want to run on different (parts of the) datasets
# what if a work units consumes numpy arrays?
super().__init__(storage)
self._models = models
def work_units(self, previous_phase: DatasetProvider) -> Iterator[WorkUnit]:
self._train_dataset = previous_phase.get_train_dataset()
self._eval_dataset = previous_phase.get_eval_dataset()
models = self._models
if callable(models):
models = models()
for model in models:
yield KerasTrainerWorkUnit(model, self._train_dataset, self._eval_dataset,
self._storage)
def get_models(self) -> Iterable[tf.keras.Model]:
return self._storage.get_models()
def get_best_models(self, num_models) -> Iterable[tf.keras.Model]:
return self._storage.get_best_models(num_models)
def get_train_dataset(self) -> tf.data.Dataset:
return self._train_dataset
def get_eval_dataset(self) -> tf.data.Dataset:
return self._eval_dataset
|
tensorflow/adanet
|
adanet/experimental/phases/keras_trainer_phase.py
|
Python
|
apache-2.0
| 2,844
|
""" Cisco_IOS_XR_ipv6_ma_cfg
This module contains a collection of YANG definitions
for Cisco IOS\-XR ipv6\-ma package configuration.
This YANG module augments the
Cisco\-IOS\-XR\-ifmgr\-cfg
module with configuration data.
Copyright (c) 2013\-2015 by Cisco Systems, Inc.
All rights reserved.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
class Ipv6DefaultPingEnum(Enum):
"""
Ipv6DefaultPingEnum
Ipv6 default ping
.. data:: DISABLED = 0
Default route is not allowed to match when
checking source address
.. data:: ENABLED = 1
Allow default route to match when checking
source address
"""
DISABLED = 0
ENABLED = 1
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv6_ma_cfg as meta
return meta._meta_table['Ipv6DefaultPingEnum']
class Ipv6QppbEnum(Enum):
"""
Ipv6QppbEnum
Ipv6 qppb
.. data:: NONE = 0
No QPPB configuration
.. data:: IP_PRECEDENCE = 1
Enable ip-precedence based QPPB
.. data:: QOS_GROUP = 2
Enable qos-group based QPPB
.. data:: BOTH = 3
Enable both ip-precedence and qos-group based
QPPB
"""
NONE = 0
IP_PRECEDENCE = 1
QOS_GROUP = 2
BOTH = 3
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv6_ma_cfg as meta
return meta._meta_table['Ipv6QppbEnum']
class Ipv6ReachableEnum(Enum):
"""
Ipv6ReachableEnum
Ipv6 reachable
.. data:: ANY = 0
Source is reachable via any interface
.. data:: RECEIVED = 1
Source is reachable via interface on which
packet was received
"""
ANY = 0
RECEIVED = 1
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv6_ma_cfg as meta
return meta._meta_table['Ipv6ReachableEnum']
class Ipv6SelfPingEnum(Enum):
"""
Ipv6SelfPingEnum
Ipv6 self ping
.. data:: DISABLED = 0
Doesn't allow router to ping itself
.. data:: ENABLED = 1
Allow router to ping itself
"""
DISABLED = 0
ENABLED = 1
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv6_ma_cfg as meta
return meta._meta_table['Ipv6SelfPingEnum']
|
abhikeshav/ydk-py
|
cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_ipv6_ma_cfg.py
|
Python
|
apache-2.0
| 2,508
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2013 PolyBeacon, Inc.
#
# Author: Paul Belanger <paul.belanger@polybeacon.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cStringIO
import mock
from sarlacc.tests.asterisk.agi import test
class TestCase(test.TestCase):
@mock.patch('sys.stdin', cStringIO.StringIO("200 result=-1"))
def test_say_date_failure(self):
with mock.patch(
'sys.stdout', new_callable=cStringIO.StringIO) as mock_stdout:
res, dtmf = self.agi.say_date(epoch='1363725155')
self.assertEqual(
mock_stdout.getvalue(), 'SAY DATE 1363725155 ""\n'
)
self.assertFalse(res)
self.assertEqual(dtmf, '')
@mock.patch('sys.stdin', cStringIO.StringIO("200 result=0"))
def test_say_date_success(self):
with mock.patch(
'sys.stdout', new_callable=cStringIO.StringIO) as mock_stdout:
res, dtmf = self.agi.say_date(epoch='1363725172')
self.assertEqual(
mock_stdout.getvalue(), 'SAY DATE 1363725172 ""\n'
)
self.assertTrue(res)
self.assertEqual(dtmf, '')
@mock.patch('sys.stdin', cStringIO.StringIO("200 result=49"))
def test_say_date_digit_pressed(self):
with mock.patch(
'sys.stdout', new_callable=cStringIO.StringIO) as mock_stdout:
res, dtmf = self.agi.say_date(epoch='1363725192', digits='1234')
self.assertEqual(
mock_stdout.getvalue(), 'SAY DATE 1363725192 "1234"\n'
)
self.assertTrue(res)
self.assertEqual(dtmf, '1')
|
kickstandproject/sarlacc
|
sarlacc/tests/asterisk/agi/test_say_date.py
|
Python
|
apache-2.0
| 2,178
|
# -*- coding: utf-8 -*-
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "MasterNodeSettings",
"description": "Serialized MasterNodeSettings object",
"type": "object",
"properties": {
"settings": {"type": "object"}
}
}
|
SmartInfrastructures/fuel-web-dev
|
nailgun/nailgun/api/v1/validators/json_schema/master_node_settings.py
|
Python
|
apache-2.0
| 887
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v10.enums.types import (
response_content_type as gage_response_content_type,
)
from google.ads.googleads.v10.resources.types import ad_group as gagr_ad_group
from google.protobuf import field_mask_pb2 # type: ignore
from google.rpc import status_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v10.services",
marshal="google.ads.googleads.v10",
manifest={
"MutateAdGroupsRequest",
"AdGroupOperation",
"MutateAdGroupsResponse",
"MutateAdGroupResult",
},
)
class MutateAdGroupsRequest(proto.Message):
r"""Request message for
[AdGroupService.MutateAdGroups][google.ads.googleads.v10.services.AdGroupService.MutateAdGroups].
Attributes:
customer_id (str):
Required. The ID of the customer whose ad
groups are being modified.
operations (Sequence[google.ads.googleads.v10.services.types.AdGroupOperation]):
Required. The list of operations to perform
on individual ad groups.
partial_failure (bool):
If true, successful operations will be
carried out and invalid operations will return
errors. If false, all operations will be carried
out in one transaction if and only if they are
all valid. Default is false.
validate_only (bool):
If true, the request is validated but not
executed. Only errors are returned, not results.
response_content_type (google.ads.googleads.v10.enums.types.ResponseContentTypeEnum.ResponseContentType):
The response content type setting. Determines
whether the mutable resource or just the
resource name should be returned post mutation.
"""
customer_id = proto.Field(proto.STRING, number=1,)
operations = proto.RepeatedField(
proto.MESSAGE, number=2, message="AdGroupOperation",
)
partial_failure = proto.Field(proto.BOOL, number=3,)
validate_only = proto.Field(proto.BOOL, number=4,)
response_content_type = proto.Field(
proto.ENUM,
number=5,
enum=gage_response_content_type.ResponseContentTypeEnum.ResponseContentType,
)
class AdGroupOperation(proto.Message):
r"""A single operation (create, update, remove) on an ad group.
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
update_mask (google.protobuf.field_mask_pb2.FieldMask):
FieldMask that determines which resource
fields are modified in an update.
create (google.ads.googleads.v10.resources.types.AdGroup):
Create operation: No resource name is
expected for the new ad group.
This field is a member of `oneof`_ ``operation``.
update (google.ads.googleads.v10.resources.types.AdGroup):
Update operation: The ad group is expected to
have a valid resource name.
This field is a member of `oneof`_ ``operation``.
remove (str):
Remove operation: A resource name for the removed ad group
is expected, in this format:
``customers/{customer_id}/adGroups/{ad_group_id}``
This field is a member of `oneof`_ ``operation``.
"""
update_mask = proto.Field(
proto.MESSAGE, number=4, message=field_mask_pb2.FieldMask,
)
create = proto.Field(
proto.MESSAGE,
number=1,
oneof="operation",
message=gagr_ad_group.AdGroup,
)
update = proto.Field(
proto.MESSAGE,
number=2,
oneof="operation",
message=gagr_ad_group.AdGroup,
)
remove = proto.Field(proto.STRING, number=3, oneof="operation",)
class MutateAdGroupsResponse(proto.Message):
r"""Response message for an ad group mutate.
Attributes:
partial_failure_error (google.rpc.status_pb2.Status):
Errors that pertain to operation failures in the partial
failure mode. Returned only when partial_failure = true and
all errors occur inside the operations. If any errors occur
outside the operations (e.g. auth errors), we return an RPC
level error.
results (Sequence[google.ads.googleads.v10.services.types.MutateAdGroupResult]):
All results for the mutate.
"""
partial_failure_error = proto.Field(
proto.MESSAGE, number=3, message=status_pb2.Status,
)
results = proto.RepeatedField(
proto.MESSAGE, number=2, message="MutateAdGroupResult",
)
class MutateAdGroupResult(proto.Message):
r"""The result for the ad group mutate.
Attributes:
resource_name (str):
Returned for successful operations.
ad_group (google.ads.googleads.v10.resources.types.AdGroup):
The mutated ad group with only mutable fields after mutate.
The field will only be returned when response_content_type
is set to "MUTABLE_RESOURCE".
"""
resource_name = proto.Field(proto.STRING, number=1,)
ad_group = proto.Field(
proto.MESSAGE, number=2, message=gagr_ad_group.AdGroup,
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
googleads/google-ads-python
|
google/ads/googleads/v10/services/types/ad_group_service.py
|
Python
|
apache-2.0
| 6,155
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('fluentcms_teaser', '0002_auto_20150903_0711'),
]
operations = [
migrations.AddField(
model_name='teaseritem',
name='url_title',
field=models.CharField(max_length=200, null=True, verbose_name='URL title', blank=True),
),
]
|
bashu/fluentcms-teaser
|
fluentcms_teaser/migrations/0003_teaseritem_url_title.py
|
Python
|
apache-2.0
| 466
|
# -*- coding: utf-8 -*-
"""Pylab (matplotlib) support utilities."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from io import BytesIO
from IPython.core.display import _pngxy
from IPython.utils.decorators import flag_calls
# If user specifies a GUI, that dictates the backend, otherwise we read the
# user's mpl default from the mpl rc structure
backends = {'tk': 'TkAgg',
'gtk': 'GTKAgg',
'gtk3': 'GTK3Agg',
'wx': 'WXAgg',
'qt4': 'Qt4Agg',
'qt5': 'Qt5Agg',
'qt': 'Qt5Agg',
'osx': 'MacOSX',
'nbagg': 'nbAgg',
'notebook': 'nbAgg',
'agg': 'agg',
'svg': 'svg',
'pdf': 'pdf',
'ps': 'ps',
'inline': 'module://ipykernel.pylab.backend_inline',
'ipympl': 'module://ipympl.backend_nbagg',
'widget': 'module://ipympl.backend_nbagg',
}
# We also need a reverse backends2guis mapping that will properly choose which
# GUI support to activate based on the desired matplotlib backend. For the
# most part it's just a reverse of the above dict, but we also need to add a
# few others that map to the same GUI manually:
backend2gui = dict(zip(backends.values(), backends.keys()))
# Our tests expect backend2gui to just return 'qt'
backend2gui['Qt4Agg'] = 'qt'
# In the reverse mapping, there are a few extra valid matplotlib backends that
# map to the same GUI support
backend2gui['GTK'] = backend2gui['GTKCairo'] = 'gtk'
backend2gui['GTK3Cairo'] = 'gtk3'
backend2gui['WX'] = 'wx'
backend2gui['CocoaAgg'] = 'osx'
# And some backends that don't need GUI integration
del backend2gui['nbAgg']
del backend2gui['agg']
del backend2gui['module://ipykernel.pylab.backend_inline']
#-----------------------------------------------------------------------------
# Matplotlib utilities
#-----------------------------------------------------------------------------
def getfigs(*fig_nums):
"""Get a list of matplotlib figures by figure numbers.
If no arguments are given, all available figures are returned. If the
argument list contains references to invalid figures, a warning is printed
but the function continues pasting further figures.
Parameters
----------
figs : tuple
A tuple of ints giving the figure numbers of the figures to return.
"""
from matplotlib._pylab_helpers import Gcf
if not fig_nums:
fig_managers = Gcf.get_all_fig_managers()
return [fm.canvas.figure for fm in fig_managers]
else:
figs = []
for num in fig_nums:
f = Gcf.figs.get(num)
if f is None:
print('Warning: figure %s not available.' % num)
else:
figs.append(f.canvas.figure)
return figs
def figsize(sizex, sizey):
"""Set the default figure size to be [sizex, sizey].
This is just an easy to remember, convenience wrapper that sets::
matplotlib.rcParams['figure.figsize'] = [sizex, sizey]
"""
import matplotlib
matplotlib.rcParams['figure.figsize'] = [sizex, sizey]
def print_figure(fig, fmt='png', bbox_inches='tight', **kwargs):
"""Print a figure to an image, and return the resulting file data
Returned data will be bytes unless ``fmt='svg'``,
in which case it will be unicode.
Any keyword args are passed to fig.canvas.print_figure,
such as ``quality`` or ``bbox_inches``.
"""
# When there's an empty figure, we shouldn't return anything, otherwise we
# get big blank areas in the qt console.
if not fig.axes and not fig.lines:
return
dpi = fig.dpi
if fmt == 'retina':
dpi = dpi * 2
fmt = 'png'
# build keyword args
kw = {
"format":fmt,
"facecolor":fig.get_facecolor(),
"edgecolor":fig.get_edgecolor(),
"dpi":dpi,
"bbox_inches":bbox_inches,
}
# **kwargs get higher priority
kw.update(kwargs)
bytes_io = BytesIO()
fig.canvas.print_figure(bytes_io, **kw)
data = bytes_io.getvalue()
if fmt == 'svg':
data = data.decode('utf-8')
return data
def retina_figure(fig, **kwargs):
"""format a figure as a pixel-doubled (retina) PNG"""
pngdata = print_figure(fig, fmt='retina', **kwargs)
# Make sure that retina_figure acts just like print_figure and returns
# None when the figure is empty.
if pngdata is None:
return
w, h = _pngxy(pngdata)
metadata = {"width": w//2, "height":h//2}
return pngdata, metadata
# We need a little factory function here to create the closure where
# safe_execfile can live.
def mpl_runner(safe_execfile):
"""Factory to return a matplotlib-enabled runner for %run.
Parameters
----------
safe_execfile : function
This must be a function with the same interface as the
:meth:`safe_execfile` method of IPython.
Returns
-------
A function suitable for use as the ``runner`` argument of the %run magic
function.
"""
def mpl_execfile(fname,*where,**kw):
"""matplotlib-aware wrapper around safe_execfile.
Its interface is identical to that of the :func:`execfile` builtin.
This is ultimately a call to execfile(), but wrapped in safeties to
properly handle interactive rendering."""
import matplotlib
import matplotlib.pyplot as plt
#print '*** Matplotlib runner ***' # dbg
# turn off rendering until end of script
is_interactive = matplotlib.rcParams['interactive']
matplotlib.interactive(False)
safe_execfile(fname,*where,**kw)
matplotlib.interactive(is_interactive)
# make rendering call now, if the user tried to do it
if plt.draw_if_interactive.called:
plt.draw()
plt.draw_if_interactive.called = False
# re-draw everything that is stale
try:
da = plt.draw_all
except AttributeError:
pass
else:
da()
return mpl_execfile
def _reshow_nbagg_figure(fig):
"""reshow an nbagg figure"""
try:
reshow = fig.canvas.manager.reshow
except AttributeError:
raise NotImplementedError()
else:
reshow()
def select_figure_formats(shell, formats, **kwargs):
"""Select figure formats for the inline backend.
Parameters
==========
shell : InteractiveShell
The main IPython instance.
formats : str or set
One or a set of figure formats to enable: 'png', 'retina', 'jpeg', 'svg', 'pdf'.
**kwargs : any
Extra keyword arguments to be passed to fig.canvas.print_figure.
"""
import matplotlib
from matplotlib.figure import Figure
svg_formatter = shell.display_formatter.formatters['image/svg+xml']
png_formatter = shell.display_formatter.formatters['image/png']
jpg_formatter = shell.display_formatter.formatters['image/jpeg']
pdf_formatter = shell.display_formatter.formatters['application/pdf']
if isinstance(formats, str):
formats = {formats}
# cast in case of list / tuple
formats = set(formats)
[ f.pop(Figure, None) for f in shell.display_formatter.formatters.values() ]
mplbackend = matplotlib.get_backend().lower()
if mplbackend == 'nbagg' or mplbackend == 'module://ipympl.backend_nbagg':
formatter = shell.display_formatter.ipython_display_formatter
formatter.for_type(Figure, _reshow_nbagg_figure)
supported = {'png', 'png2x', 'retina', 'jpg', 'jpeg', 'svg', 'pdf'}
bad = formats.difference(supported)
if bad:
bs = "%s" % ','.join([repr(f) for f in bad])
gs = "%s" % ','.join([repr(f) for f in supported])
raise ValueError("supported formats are: %s not %s" % (gs, bs))
if 'png' in formats:
png_formatter.for_type(Figure, lambda fig: print_figure(fig, 'png', **kwargs))
if 'retina' in formats or 'png2x' in formats:
png_formatter.for_type(Figure, lambda fig: retina_figure(fig, **kwargs))
if 'jpg' in formats or 'jpeg' in formats:
jpg_formatter.for_type(Figure, lambda fig: print_figure(fig, 'jpg', **kwargs))
if 'svg' in formats:
svg_formatter.for_type(Figure, lambda fig: print_figure(fig, 'svg', **kwargs))
if 'pdf' in formats:
pdf_formatter.for_type(Figure, lambda fig: print_figure(fig, 'pdf', **kwargs))
#-----------------------------------------------------------------------------
# Code for initializing matplotlib and importing pylab
#-----------------------------------------------------------------------------
def find_gui_and_backend(gui=None, gui_select=None):
"""Given a gui string return the gui and mpl backend.
Parameters
----------
gui : str
Can be one of ('tk','gtk','wx','qt','qt4','inline','agg').
gui_select : str
Can be one of ('tk','gtk','wx','qt','qt4','inline').
This is any gui already selected by the shell.
Returns
-------
A tuple of (gui, backend) where backend is one of ('TkAgg','GTKAgg',
'WXAgg','Qt4Agg','module://ipykernel.pylab.backend_inline','agg').
"""
import matplotlib
if gui and gui != 'auto':
# select backend based on requested gui
backend = backends[gui]
if gui == 'agg':
gui = None
else:
# We need to read the backend from the original data structure, *not*
# from mpl.rcParams, since a prior invocation of %matplotlib may have
# overwritten that.
# WARNING: this assumes matplotlib 1.1 or newer!!
backend = matplotlib.rcParamsOrig['backend']
# In this case, we need to find what the appropriate gui selection call
# should be for IPython, so we can activate inputhook accordingly
gui = backend2gui.get(backend, None)
# If we have already had a gui active, we need it and inline are the
# ones allowed.
if gui_select and gui != gui_select:
gui = gui_select
backend = backends[gui]
return gui, backend
def activate_matplotlib(backend):
"""Activate the given backend and set interactive to True."""
import matplotlib
matplotlib.interactive(True)
# Matplotlib had a bug where even switch_backend could not force
# the rcParam to update. This needs to be set *before* the module
# magic of switch_backend().
matplotlib.rcParams['backend'] = backend
import matplotlib.pyplot
matplotlib.pyplot.switch_backend(backend)
# This must be imported last in the matplotlib series, after
# backend/interactivity choices have been made
import matplotlib.pyplot as plt
plt.show._needmain = False
# We need to detect at runtime whether show() is called by the user.
# For this, we wrap it into a decorator which adds a 'called' flag.
plt.draw_if_interactive = flag_calls(plt.draw_if_interactive)
def import_pylab(user_ns, import_all=True):
"""Populate the namespace with pylab-related values.
Imports matplotlib, pylab, numpy, and everything from pylab and numpy.
Also imports a few names from IPython (figsize, display, getfigs)
"""
# Import numpy as np/pyplot as plt are conventions we're trying to
# somewhat standardize on. Making them available to users by default
# will greatly help this.
s = ("import numpy\n"
"import matplotlib\n"
"from matplotlib import pylab, mlab, pyplot\n"
"np = numpy\n"
"plt = pyplot\n"
)
exec(s, user_ns)
if import_all:
s = ("from matplotlib.pylab import *\n"
"from numpy import *\n")
exec(s, user_ns)
# IPython symbols to add
user_ns['figsize'] = figsize
from IPython.core.display import display
# Add display and getfigs to the user's namespace
user_ns['display'] = display
user_ns['getfigs'] = getfigs
def configure_inline_support(shell, backend):
"""Configure an IPython shell object for matplotlib use.
Parameters
----------
shell : InteractiveShell instance
backend : matplotlib backend
"""
# If using our svg payload backend, register the post-execution
# function that will pick up the results for display. This can only be
# done with access to the real shell object.
# Note: if we can't load the inline backend, then there's no point
# continuing (such as in terminal-only shells in environments without
# zeromq available).
try:
from ipykernel.pylab.backend_inline import InlineBackend
except ImportError:
return
import matplotlib
cfg = InlineBackend.instance(parent=shell)
cfg.shell = shell
if cfg not in shell.configurables:
shell.configurables.append(cfg)
if backend == backends['inline']:
from ipykernel.pylab.backend_inline import flush_figures
shell.events.register('post_execute', flush_figures)
# Save rcParams that will be overwrittern
shell._saved_rcParams = {}
for k in cfg.rc:
shell._saved_rcParams[k] = matplotlib.rcParams[k]
# load inline_rc
matplotlib.rcParams.update(cfg.rc)
new_backend_name = "inline"
else:
from ipykernel.pylab.backend_inline import flush_figures
try:
shell.events.unregister('post_execute', flush_figures)
except ValueError:
pass
if hasattr(shell, '_saved_rcParams'):
matplotlib.rcParams.update(shell._saved_rcParams)
del shell._saved_rcParams
new_backend_name = "other"
# only enable the formats once -> don't change the enabled formats (which the user may
# has changed) when getting another "%matplotlib inline" call.
# See https://github.com/ipython/ipykernel/issues/29
cur_backend = getattr(configure_inline_support, "current_backend", "unset")
if new_backend_name != cur_backend:
# Setup the default figure format
select_figure_formats(shell, cfg.figure_formats, **cfg.print_figure_kwargs)
configure_inline_support.current_backend = new_backend_name
|
Foxfanmedium/python_training
|
OnlineCoursera/mail_ru/Python_1/env/Lib/site-packages/IPython/core/pylabtools.py
|
Python
|
apache-2.0
| 14,262
|
from troposphere import (
Parameter,
Ref,
Output,
Tags,
GetAtt,
Base64,
Join,
Equals,
ec2,
elasticloadbalancing as elb,
autoscaling as asg
)
from cfn.utils.cfn import get_recent_ami
from cfn.utils.constants import (
ALLOW_ALL_CIDR,
EC2_INSTANCE_TYPES,
HTTP,
HTTPS,
POSTGRESQL,
REDIS,
SSH,
VPC_CIDR
)
from majorkirby import StackNode, MKUnresolvableInputError
class Application(StackNode):
INPUTS = {
'Tags': ['global:Tags'],
'Region': ['global:Region'],
'StackType': ['global:StackType'],
'StackColor': ['global:StackColor'],
'KeyName': ['global:KeyName'],
'AvailabilityZones': ['global:AvailabilityZones',
'VPC:AvailabilityZones'],
'RDSPassword': ['global:RDSPassword', 'DataPlane:RDSPassword'],
'AppServerInstanceType': ['global:AppServerInstanceType'],
'AppServerAMI': ['global:AppServerAMI'],
'AppServerInstanceProfile': ['global:AppServerInstanceProfile'],
'AppServerAutoScalingDesired': ['global:AppServerAutoScalingDesired'],
'AppServerAutoScalingMin': ['global:AppServerAutoScalingMin'],
'AppServerAutoScalingMax': ['global:AppServerAutoScalingMax'],
'AppServerAutoScalingScheduleStartCapacity': ['global:AppServerAutoScalingScheduleStartCapacity'], # NOQA
'AppServerAutoScalingScheduleStartRecurrence': ['global:AppServerAutoScalingScheduleStartRecurrence'], # NOQA
'AppServerAutoScalingScheduleEndCapacity': ['global:AppServerAutoScalingScheduleEndCapacity'], # NOQA
'AppServerAutoScalingScheduleEndRecurrence': ['global:AppServerAutoScalingScheduleEndRecurrence'], # NOQA
'SSLCertificateARN': ['global:SSLCertificateARN'],
'BackwardCompatSSLCertificateARN':
['global:BackwardCompatSSLCertificateARN'],
'PublicSubnets': ['global:PublicSubnets', 'VPC:PublicSubnets'],
'PrivateSubnets': ['global:PrivateSubnets', 'VPC:PrivateSubnets'],
'PublicHostedZoneName': ['global:PublicHostedZoneName'],
'VpcId': ['global:VpcId', 'VPC:VpcId'],
'GlobalNotificationsARN': ['global:GlobalNotificationsARN'],
'BlueTileServerDistributionEndpoint':
['global:BlueTileServerDistributionEndpoint',
'TileDeliveryNetwork:BlueTileServerDistributionEndpoint'],
'GreenTileServerDistributionEndpoint':
['global:GreenTileServerDistributionEndpoint',
'TileDeliveryNetwork:GreenTileServerDistributionEndpoint'],
'ITSIBaseURL': ['global:ITSIBaseURL'],
'ITSISecretKey': ['global:ITSISecretKey'],
'ConcordSecretKey': ['global:ConcordSecretKey'],
'HydroShareBaseURL': ['global:HydroShareBaseURL'],
'HydroShareSecretKey': ['global:HydroShareSecretKey'],
'SRATCatchmentAPIURL': ['global:SRATCatchmentAPIURL'],
'SRATCatchmentAPIKey': ['global:SRATCatchmentAPIKey'],
'RollbarServerSideAccessToken':
['global:RollbarServerSideAccessToken'],
'ClientAppUserPassword': ['global:ClientAppUserPassword'],
'PapertrailHost': ['global:PapertrailHost'],
'PapertrailPort': ['global:PapertrailPort'],
}
DEFAULTS = {
'Tags': {},
'Region': 'us-east-1',
'StackType': 'Staging',
'StackColor': 'Green',
'KeyName': 'mmw-stg',
'AppServerInstanceType': 't2.small',
'AppServerInstanceProfile': 'AppServerInstanceProfile',
'AppServerAutoScalingDesired': '1',
'AppServerAutoScalingMin': '1',
'AppServerAutoScalingMax': '1',
}
ATTRIBUTES = {
'StackType': 'StackType',
'StackColor': 'StackColor',
}
def set_up_stack(self):
super(Application, self).set_up_stack()
self.default_tags = self.get_input('Tags').copy()
self.region = self.get_input('Region')
self.add_description('Application server stack for MMW')
# Parameters
self.color = self.add_parameter(Parameter(
'StackColor', Type='String',
Description='Stack color', AllowedValues=['Blue', 'Green']
), 'StackColor')
self.keyname = self.add_parameter(Parameter(
'KeyName', Type='String',
Description='Name of an existing EC2 key pair'
), 'KeyName')
self.availability_zones = self.add_parameter(Parameter(
'AvailabilityZones', Type='CommaDelimitedList',
Description='Comma delimited list of availability zones'
), 'AvailabilityZones')
self.rds_password = self.add_parameter(Parameter(
'RDSPassword', Type='String', NoEcho=True,
Description='Database password',
), 'RDSPassword')
self.app_server_instance_type = self.add_parameter(Parameter(
'AppServerInstanceType', Type='String', Default='t2.small',
Description='Application server EC2 instance type',
AllowedValues=EC2_INSTANCE_TYPES,
ConstraintDescription='must be a valid EC2 instance type.'
), 'AppServerInstanceType')
self.app_server_ami = self.add_parameter(Parameter(
'AppServerAMI', Type='String',
Default=self.get_recent_app_server_ami(),
Description='Application server AMI'
), 'AppServerAMI')
self.app_server_instance_profile = self.add_parameter(Parameter(
'AppServerInstanceProfile', Type='String',
Default='AppServerInstanceProfile',
Description='Application server instance profile'
), 'AppServerInstanceProfile')
self.app_server_auto_scaling_desired = self.add_parameter(Parameter(
'AppServerAutoScalingDesired', Type='String', Default='1',
Description='Application server AutoScalingGroup desired'
), 'AppServerAutoScalingDesired')
self.app_server_auto_scaling_min = self.add_parameter(Parameter(
'AppServerAutoScalingMin', Type='String', Default='1',
Description='Application server AutoScalingGroup minimum'
), 'AppServerAutoScalingMin')
self.app_server_auto_scaling_max = self.add_parameter(Parameter(
'AppServerAutoScalingMax', Type='String', Default='1',
Description='Application server AutoScalingGroup maximum'
), 'AppServerAutoScalingMax')
self.app_server_auto_scaling_schedule_start_recurrence = self.add_parameter( # NOQA
Parameter(
'AppServerAutoScalingScheduleStartRecurrence', Type='String',
Default='0 12 * * 1-5',
Description='Application server ASG schedule start recurrence'
), 'AppServerAutoScalingScheduleStartRecurrence')
self.app_server_auto_scaling_schedule_start_capacity = self.add_parameter( # NOQA
Parameter(
'AppServerAutoScalingScheduleStartCapacity', Type='String',
Default='1',
Description='Application server ASG schedule start capacity'
), 'AppServerAutoScalingScheduleStartCapacity')
self.app_server_auto_scaling_schedule_end_recurrence = self.add_parameter( # NOQA
Parameter(
'AppServerAutoScalingScheduleEndRecurrence', Type='String',
Default='0 0 * * *',
Description='Application server ASG schedule end recurrence'
), 'AppServerAutoScalingScheduleEndRecurrence')
self.app_server_auto_scaling_schedule_end_capacity = self.add_parameter( # NOQA
Parameter(
'AppServerAutoScalingScheduleEndCapacity', Type='String',
Default='1',
Description='Application server ASG schedule end capacity'
), 'AppServerAutoScalingScheduleEndCapacity')
self.ssl_certificate_arn = self.add_parameter(Parameter(
'SSLCertificateARN', Type='String',
Description='ARN for a SSL certificate stored in IAM'
), 'SSLCertificateARN')
self.backward_compat_ssl_certificate_arn = self.add_parameter(
Parameter(
'BackwardCompatSSLCertificateARN', Type='String',
Description='ARN for a SSL certificate stored in IAM'
), 'BackwardCompatSSLCertificateARN')
self.public_subnets = self.add_parameter(Parameter(
'PublicSubnets', Type='CommaDelimitedList',
Description='A list of public subnets'
), 'PublicSubnets')
self.private_subnets = self.add_parameter(Parameter(
'PrivateSubnets', Type='CommaDelimitedList',
Description='A list of private subnets'
), 'PrivateSubnets')
self.public_hosted_zone_name = self.add_parameter(Parameter(
'PublicHostedZoneName', Type='String',
Description='Route 53 public hosted zone name'
), 'PublicHostedZoneName')
self.vpc_id = self.add_parameter(Parameter(
'VpcId', Type='String',
Description='VPC ID'
), 'VpcId')
self.notification_topic_arn = self.add_parameter(Parameter(
'GlobalNotificationsARN', Type='String',
Description='ARN for an SNS topic to broadcast notifications'
), 'GlobalNotificationsARN')
self.blue_tile_distribution_endpoint = self.add_parameter(Parameter(
'BlueTileServerDistributionEndpoint', Type='String',
Description='Endpoint for blue tile CloudFront distribution'
), 'BlueTileServerDistributionEndpoint')
self.green_tile_distribution_endpoint = self.add_parameter(Parameter(
'GreenTileServerDistributionEndpoint', Type='String',
Description='Endpoint for green tile CloudFront distribution'
), 'GreenTileServerDistributionEndpoint')
self.itsi_base_url = self.add_parameter(Parameter(
'ITSIBaseURL', Type='String',
Description='Base URL for ITSI portal'
), 'ITSIBaseURL')
self.itsi_secret_key = self.add_parameter(Parameter(
'ITSISecretKey', Type='String', NoEcho=True,
Description='Secret key for ITSI portal integration'
), 'ITSISecretKey')
self.concord_secret_key = self.add_parameter(Parameter(
'ConcordSecretKey', Type='String', NoEcho=True,
Description='Secret key for Concord OAuth integration'
), 'ConcordSecretKey')
self.hydroshare_base_url = self.add_parameter(Parameter(
'HydroShareBaseURL', Type='String',
Description='Base URL for HydroShare portal'
), 'HydroShareBaseURL')
self.hydroshare_secret_key = self.add_parameter(Parameter(
'HydroShareSecretKey', Type='String', NoEcho=True,
Description='Secret key for HydroShare portal integration'
), 'HydroShareSecretKey')
self.srat_catchment_api_url = self.add_parameter(Parameter(
'SRATCatchmentAPIURL', Type='String',
Description='URL for the SRAT Catchment API'
), 'SRATCatchmentAPIURL')
self.srat_catchment_api_key = self.add_parameter(Parameter(
'SRATCatchmentAPIKey', Type='String', NoEcho=True,
Description='API key for the SRAT Catchment API'
), 'SRATCatchmentAPIKey')
self.client_app_user_password = self.add_parameter(Parameter(
'ClientAppUserPassword', Type='String', NoEcho=True,
Description='Password for the client apps django account',
), 'ClientAppUserPassword')
self.papertrail_host = self.add_parameter(Parameter(
'PapertrailHost', Type='String',
Description='Hostname for Papertrail log destination',
), 'PapertrailHost')
self.papertrail_port = self.add_parameter(Parameter(
'PapertrailPort', Type='String',
Description='Port for Papertrail log destination',
), 'PapertrailPort')
app_server_lb_security_group, \
app_server_security_group = self.create_security_groups()
app_server_lb, \
backward_compat_app_server_lb = self.create_load_balancers(
app_server_lb_security_group)
self.create_auto_scaling_resources(app_server_security_group,
app_server_lb,
backward_compat_app_server_lb)
self.add_output(Output('AppServerLoadBalancerEndpoint',
Value=GetAtt(app_server_lb, 'DNSName')))
self.add_output(Output('AppServerLoadBalancerHostedZoneNameID',
Value=GetAtt(app_server_lb,
'CanonicalHostedZoneNameID')))
self.add_output(Output('BackwardCompatAppServerLoadBalancerEndpoint',
Value=GetAtt(backward_compat_app_server_lb,
'DNSName')))
self.add_output(
Output('BackwardCompatAppServerLoadBalancerHostedZoneNameID',
Value=GetAtt(backward_compat_app_server_lb,
'CanonicalHostedZoneNameID')))
def get_recent_app_server_ami(self):
try:
app_server_ami_id = self.get_input('AppServerAMI')
except MKUnresolvableInputError:
filters = {'name': 'mmw-app-*'}
app_server_ami_id = get_recent_ami(self.aws_profile, filters=filters,
region=self.region)
return app_server_ami_id
def create_security_groups(self):
app_server_lb_security_group_name = 'sgAppServerLoadBalancer'
app_server_lb_security_group = self.add_resource(ec2.SecurityGroup(
app_server_lb_security_group_name,
GroupDescription='Enables access to application servers via a '
'load balancer',
VpcId=Ref(self.vpc_id),
SecurityGroupIngress=[
ec2.SecurityGroupRule(
IpProtocol='tcp', CidrIp=ALLOW_ALL_CIDR, FromPort=p,
ToPort=p
)
for p in [HTTP, HTTPS]
],
SecurityGroupEgress=[
ec2.SecurityGroupRule(
IpProtocol='tcp', CidrIp=VPC_CIDR, FromPort=p, ToPort=p
)
for p in [HTTP]
],
Tags=self.get_tags(Name=app_server_lb_security_group_name)
))
app_server_security_group_name = 'sgAppServer'
app_server_security_group = self.add_resource(ec2.SecurityGroup(
app_server_security_group_name,
DependsOn='sgAppServerLoadBalancer',
GroupDescription='Enables access to application servers',
VpcId=Ref(self.vpc_id),
SecurityGroupIngress=[
ec2.SecurityGroupRule(
IpProtocol='tcp', CidrIp=VPC_CIDR, FromPort=p, ToPort=p
)
for p in [SSH, HTTP]
] + [
ec2.SecurityGroupRule(
IpProtocol='tcp', SourceSecurityGroupId=Ref(sg),
FromPort=HTTP, ToPort=HTTP
)
for sg in [app_server_lb_security_group]
],
SecurityGroupEgress=[
ec2.SecurityGroupRule(
IpProtocol='tcp', CidrIp=VPC_CIDR, FromPort=p, ToPort=p
)
for p in [POSTGRESQL, REDIS]
] + [
ec2.SecurityGroupRule(
IpProtocol='tcp', CidrIp=ALLOW_ALL_CIDR, FromPort=p,
ToPort=p
)
for p in [HTTP, HTTPS, self.get_input('PapertrailPort')]
],
Tags=self.get_tags(Name=app_server_security_group_name)
))
return app_server_lb_security_group, app_server_security_group
def create_load_balancers(self, app_server_lb_security_group):
app_server_lb_name = 'elbAppServer'
backward_compat_app_server_lb_name = 'elbBackwardCompatAppServer'
return [
self.add_resource(elb.LoadBalancer(
app_server_lb_name,
ConnectionDrainingPolicy=elb.ConnectionDrainingPolicy(
Enabled=True,
Timeout=300,
),
CrossZone=True,
SecurityGroups=[Ref(app_server_lb_security_group)],
Listeners=[
elb.Listener(
LoadBalancerPort='80',
InstancePort='80',
Protocol='HTTP',
),
elb.Listener(
LoadBalancerPort='443',
InstancePort='80',
Protocol='HTTPS',
SSLCertificateId=Ref(self.ssl_certificate_arn)
)
],
HealthCheck=elb.HealthCheck(
Target='HTTP:80/health-check/',
HealthyThreshold='3',
UnhealthyThreshold='2',
Interval='30',
Timeout='5',
),
Subnets=Ref(self.public_subnets),
Tags=self.get_tags(Name=app_server_lb_name)
)),
self.add_resource(elb.LoadBalancer(
backward_compat_app_server_lb_name,
ConnectionDrainingPolicy=elb.ConnectionDrainingPolicy(
Enabled=True,
Timeout=300,
),
CrossZone=True,
SecurityGroups=[Ref(app_server_lb_security_group)],
Listeners=[
elb.Listener(
LoadBalancerPort='80',
InstancePort='80',
Protocol='HTTP',
),
elb.Listener(
LoadBalancerPort='443',
InstancePort='80',
Protocol='HTTPS',
SSLCertificateId=Ref(
self.backward_compat_ssl_certificate_arn)
)
],
HealthCheck=elb.HealthCheck(
Target='HTTP:80/health-check/',
HealthyThreshold='3',
UnhealthyThreshold='2',
Interval='30',
Timeout='5',
),
Subnets=Ref(self.public_subnets),
Tags=self.get_tags(Name=backward_compat_app_server_lb_name)
))]
def create_auto_scaling_resources(self, app_server_security_group,
app_server_lb,
backward_compat_app_server_lb):
self.add_condition('BlueCondition', Equals('Blue', Ref(self.color)))
self.add_condition('GreenCondition', Equals('Green', Ref(self.color)))
blue_app_server_launch_config = self.add_resource(
asg.LaunchConfiguration(
'lcAppServerBlue',
Condition='BlueCondition',
ImageId=Ref(self.app_server_ami),
IamInstanceProfile=Ref(self.app_server_instance_profile),
InstanceType=Ref(self.app_server_instance_type),
KeyName=Ref(self.keyname),
SecurityGroups=[Ref(app_server_security_group)],
UserData=Base64(
Join('', self.get_cloud_config(
self.blue_tile_distribution_endpoint)))
))
blue_app_server_asg = self.add_resource(
asg.AutoScalingGroup(
'asgAppServerBlue',
AvailabilityZones=Ref(self.availability_zones),
Condition='BlueCondition',
Cooldown=300,
DesiredCapacity=Ref(self.app_server_auto_scaling_desired),
HealthCheckGracePeriod=600,
HealthCheckType='ELB',
LaunchConfigurationName=Ref(blue_app_server_launch_config),
LoadBalancerNames=[Ref(app_server_lb),
Ref(backward_compat_app_server_lb)],
MaxSize=Ref(self.app_server_auto_scaling_max),
MinSize=Ref(self.app_server_auto_scaling_min),
NotificationConfigurations=[
asg.NotificationConfigurations(
TopicARN=Ref(self.notification_topic_arn),
NotificationTypes=[
asg.EC2_INSTANCE_LAUNCH,
asg.EC2_INSTANCE_LAUNCH_ERROR,
asg.EC2_INSTANCE_TERMINATE,
asg.EC2_INSTANCE_TERMINATE_ERROR
]
)
],
VPCZoneIdentifier=Ref(self.private_subnets),
Tags=[asg.Tag('Name', 'AppServer', True)])
)
self.add_resource(
asg.ScheduledAction(
'schedTileServerAutoScalingStartBlue',
AutoScalingGroupName=Ref(blue_app_server_asg),
Condition='BlueCondition',
DesiredCapacity=Ref(
self.app_server_auto_scaling_schedule_start_capacity),
Recurrence=Ref(
self.app_server_auto_scaling_schedule_start_recurrence)
)
)
self.add_resource(
asg.ScheduledAction(
'schedTileServerAutoScalingEndBlue',
AutoScalingGroupName=Ref(blue_app_server_asg),
Condition='BlueCondition',
DesiredCapacity=Ref(
self.app_server_auto_scaling_schedule_end_capacity),
Recurrence=Ref(
self.app_server_auto_scaling_schedule_end_recurrence)
)
)
green_app_server_launch_config = self.add_resource(
asg.LaunchConfiguration(
'lcAppServerGreen',
Condition='GreenCondition',
ImageId=Ref(self.app_server_ami),
IamInstanceProfile=Ref(self.app_server_instance_profile),
InstanceType=Ref(self.app_server_instance_type),
KeyName=Ref(self.keyname),
SecurityGroups=[Ref(app_server_security_group)],
UserData=Base64(
Join('', self.get_cloud_config(
self.green_tile_distribution_endpoint)))
))
green_app_server_asg = self.add_resource(
asg.AutoScalingGroup(
'asgAppServerGreen',
AvailabilityZones=Ref(self.availability_zones),
Condition='GreenCondition',
Cooldown=300,
DesiredCapacity=Ref(self.app_server_auto_scaling_desired),
HealthCheckGracePeriod=600,
HealthCheckType='ELB',
LaunchConfigurationName=Ref(green_app_server_launch_config),
LoadBalancerNames=[Ref(app_server_lb),
Ref(backward_compat_app_server_lb)],
MaxSize=Ref(self.app_server_auto_scaling_max),
MinSize=Ref(self.app_server_auto_scaling_min),
NotificationConfigurations=[
asg.NotificationConfigurations(
TopicARN=Ref(self.notification_topic_arn),
NotificationTypes=[
asg.EC2_INSTANCE_LAUNCH,
asg.EC2_INSTANCE_LAUNCH_ERROR,
asg.EC2_INSTANCE_TERMINATE,
asg.EC2_INSTANCE_TERMINATE_ERROR
]
)
],
VPCZoneIdentifier=Ref(self.private_subnets),
Tags=[asg.Tag('Name', 'AppServer', True)])
)
self.add_resource(
asg.ScheduledAction(
'schedTileServerAutoScalingStartGreen',
AutoScalingGroupName=Ref(green_app_server_asg),
Condition='GreenCondition',
DesiredCapacity=Ref(
self.app_server_auto_scaling_schedule_start_capacity),
Recurrence=Ref(
self.app_server_auto_scaling_schedule_start_recurrence)
)
)
self.add_resource(
asg.ScheduledAction(
'schedTileServerAutoScalingEndGreen',
AutoScalingGroupName=Ref(green_app_server_asg),
Condition='GreenCondition',
DesiredCapacity=Ref(
self.app_server_auto_scaling_schedule_end_capacity),
Recurrence=Ref(
self.app_server_auto_scaling_schedule_end_recurrence)
)
)
def get_cloud_config(self, tile_distribution_endpoint):
return ['#cloud-config\n',
'\n',
'write_files:\n',
' - path: /etc/mmw.d/env/MMW_STACK_COLOR\n',
' permissions: 0750\n',
' owner: root:mmw\n',
' content: ', Ref(self.color), '\n',
' - path: /etc/mmw.d/env/MMW_STACK_TYPE\n',
' permissions: 0750\n',
' owner: root:mmw\n',
' content: ', self.get_input('StackType'), '\n',
' - path: /etc/mmw.d/env/MMW_PUBLIC_HOSTED_ZONE_NAME\n',
' permissions: 0750\n',
' owner: root:mmw\n',
' content: ', Ref(self.public_hosted_zone_name), '\n',
' - path: /etc/mmw.d/env/MMW_DB_PASSWORD\n',
' permissions: 0750\n',
' owner: root:mmw\n',
' content: ', Ref(self.rds_password), '\n',
' - path: /etc/mmw.d/env/MMW_TILER_HOST\n',
' permissions: 0750\n',
' owner: root:mmw\n',
' content: ', Ref(tile_distribution_endpoint), '\n',
' - path: /etc/mmw.d/env/ROLLBAR_SERVER_SIDE_ACCESS_TOKEN\n',
' permissions: 0750\n',
' owner: root:mmw\n',
' content: ', self.get_input('RollbarServerSideAccessToken'), '\n', # NOQA
' - path: /etc/mmw.d/env/MMW_ITSI_BASE_URL\n',
' permissions: 0750\n',
' owner: root:mmw\n',
' content: ', Ref(self.itsi_base_url), '\n',
' - path: /etc/mmw.d/env/MMW_ITSI_SECRET_KEY\n',
' permissions: 0750\n',
' owner: root:mmw\n',
' content: ', Ref(self.itsi_secret_key), '\n',
' - path: /etc/mmw.d/env/MMW_CONCORD_SECRET_KEY\n',
' permissions: 0750\n',
' owner: root:mmw\n',
' content: ', Ref(self.concord_secret_key), '\n',
' - path: /etc/mmw.d/env/MMW_HYDROSHARE_BASE_URL\n',
' permissions: 0750\n',
' owner: root:mmw\n',
' content: ', Ref(self.hydroshare_base_url), '\n',
' - path: /etc/mmw.d/env/MMW_HYDROSHARE_SECRET_KEY\n',
' permissions: 0750\n',
' owner: root:mmw\n',
' content: ', Ref(self.hydroshare_secret_key), '\n',
' - path: /etc/mmw.d/env/MMW_SRAT_CATCHMENT_API_URL\n',
' permissions: 0750\n',
' owner: root:mmw\n',
' content: ', Ref(self.srat_catchment_api_url), '\n',
' - path: /etc/mmw.d/env/MMW_SRAT_CATCHMENT_API_KEY\n',
' permissions: 0750\n',
' owner: root:mmw\n',
' content: ', Ref(self.srat_catchment_api_key), '\n',
' - path: /etc/mmw.d/env/MMW_CLIENT_APP_USER_PASSWORD\n',
' permissions: 0750\n',
' owner: root:mmw\n',
' content: ', Ref(self.client_app_user_password), '\n',
'\n',
'rsyslog:\n',
' - $DefaultNetstreamDriverCAFile /etc/papertrail-bundle.pem # trust these CAs\n',
' - $PreserveFQDN off\n',
' - $ActionSendStreamDriver gtls # use gtls netstream driver\n',
' - $ActionSendStreamDriverMode 1 # require TLS\n',
' - $ActionSendStreamDriverAuthMode x509/name # authenticate by hostname\n',
' - $ActionSendStreamDriverPermittedPeer *.papertrailapp.com\n',
' - $ActionResumeInterval 10\n',
' - $ActionQueueSize 100000\n',
' - $ActionQueueDiscardMark 97500\n',
' - $ActionQueueHighWaterMark 80000\n',
' - $ActionQueueType LinkedList\n',
' - $ActionQueueFileName papertrailqueue\n',
' - $ActionQueueCheckpointInterval 100\n',
' - $ActionQueueMaxDiskSpace 2g\n',
' - $ActionResumeRetryCount -1\n',
' - $ActionQueueSaveOnShutdown on\n',
' - $ActionQueueTimeoutEnqueue 2\n',
' - $ActionQueueDiscardSeverity 0\n',
' - "*.* @@', Ref(self.papertrail_host), ':', Ref(
self.papertrail_port), '"\n',
'rsyslog_filename: 22-mmw-papertrail.conf\n']
def get_tags(self, **kwargs):
"""Helper method to return Troposphere tags + default tags
Args:
**kwargs: arbitrary keyword arguments to be used as tags
"""
kwargs.update(self.default_tags)
return Tags(**kwargs)
|
WikiWatershed/model-my-watershed
|
deployment/cfn/application.py
|
Python
|
apache-2.0
| 29,879
|
# encoding: utf-8
import os
import sys
sys.path.append( os.path.join(os.path.dirname(__file__), '..', '..') )
import airflow
from dcmp.models import DcmpDag
from dcmp.dag_converter import dag_converter
from airflow.utils.db import provide_session
@provide_session
def main(session=None):
dcmp_dags = session.query(DcmpDag).order_by(DcmpDag.dag_name).all()
for dcmp_dag in dcmp_dags:
print ("cleaning %s" % dcmp_dag)
dcmp_dag_conf = dcmp_dag.get_dcmp_dag_conf(session=session)
dcmp_dag_conf.conf = dag_converter.dict_to_json(dcmp_dag_conf.conf)
session.commit()
print ("%s cleaned" % dcmp_dag)
if __name__ == "__main__":
main()
|
lattebank/airflow-dag-creation-manager-plugin
|
plugins/dcmp/tools/clean_dag_conf.py
|
Python
|
apache-2.0
| 685
|
import sqlite3
import urllib
import re
from urllib.request import urlopen
from bs4 import BeautifulSoup
from phyllo.phyllo_logger import logger
# good to go!
def main():
# The collection URL below.
collURL = 'http://www.thelatinlibrary.com/eucherius.html'
collOpen = urllib.request.urlopen(collURL)
collSOUP = BeautifulSoup(collOpen, 'html5lib')
author = collSOUP.title.string.split(':')[0].strip()
colltitle = collSOUP.title.string.split(':')[1].strip()
date = "no date found"
textsURL = [collURL]
with sqlite3.connect('texts.db') as db:
c = db.cursor()
c.execute(
'CREATE TABLE IF NOT EXISTS texts (id INTEGER PRIMARY KEY, title TEXT, book TEXT,'
' language TEXT, author TEXT, date TEXT, chapter TEXT, verse TEXT, passage TEXT,'
' link TEXT, documentType TEXT)')
c.execute("DELETE FROM texts WHERE author = 'Eucherius'")
for url in textsURL:
openurl = urllib.request.urlopen(url)
textsoup = BeautifulSoup(openurl, 'html5lib')
title = colltitle
getp = textsoup.find_all('p')
verse = 0
chapter = -1
for p in getp:
try:
if p['class'][0].lower() in ['border', 'pagehead', 'shortborder', 'smallborder', 'margin',
'internal_navigation']: # these are not part of the main t
continue
except:
pass
verses = []
text = p.get_text()
text = text.strip()
if p.find('b') is not None:
continue
if re.match('2\.', text):
# handle 2 verses in same <p>
verse = 1
text = text.replace("2.", "").strip()
lines = re.split("3\.", text)
for l in lines:
if l is None or l == '' or l.isspace():
continue
if l.startswith('Christian'):
continue
verse += 1
c.execute("INSERT INTO texts VALUES (?,?,?,?,?,?,?, ?, ?, ?, ?)",
(None, colltitle, title, 'Latin', author, date, chapter,
verse, l.strip(), url, 'prose'))
continue
elif re.match('[0-9]+\.', text):
# get verse numbers
verse = text.split(".")[0].strip()
text = text.replace(verse + ".", "").strip()
verses.append(text)
for v in verses:
if v.startswith('Christian'):
continue
if v is None or v == '' or v.isspace():
continue
c.execute("INSERT INTO texts VALUES (?,?,?,?,?,?,?, ?, ?, ?, ?)",
(None, colltitle, title, 'Latin', author, date, chapter,
verse, v.strip(), url, 'prose'))
if __name__ == '__main__':
main()
|
oudalab/phyllo
|
phyllo/extractors/eucheriusDB.py
|
Python
|
apache-2.0
| 3,202
|
"""
Lvm - Combiner for lvm information
==================================
This shared combiner for LVM parsers consolidates all of the information for
the following information:
* LVS
* PVS
* VGS
The parsers gather this information from multiple locations such as Insights
data and SOS Report data and combines the data. Sample input data and examples
are shown for LVS, with PVS and VGS being similar.
Sample input data for LVS commands as parsed by the parsers::
# Output of the command:
# /sbin/lvs -a -o +lv_tags,devices --config="global{locking_type=0}"
WARNING: Locking disabled. Be careful! This could corrupt your metadata.
LV VG Attr LSize Pool Origin Data% Meta% Move Log Cpy%Sync Convert LV Tags Devices
root rhel -wi-ao---- 17.47g /dev/vda2(512)
swap rhel -wi-ao---- 2.00g /dev/vda2(0)
# Output of the command:
# /sbin/lvs --nameprefixes --noheadings --separator='|' -a -o lv_name,vg_name,lv_size,region_size,mirror_log,lv_attr,devices,region_size --config="global{locking_type=0}"
WARNING: Locking disabled. Be careful! This could corrupt your metadata.
LVM2_LV_NAME='root'|LVM2_VG_NAME='rhel'|LVM2_LV_SIZE='17.47g'|LVM2_REGION_SIZE='0 '|LVM2_MIRROR_LOG=''|LVM2_LV_ATTR='-wi-ao----'|LVM2_DEVICES='/dev/vda2(512)'|LVM2_REGION_SIZE='0 '
LVM2_LV_NAME='swap'|LVM2_VG_NAME='rhel'|LVM2_LV_SIZE='2.00g'|LVM2_REGION_SIZE='0 '|LVM2_MIRROR_LOG=''|LVM2_LV_ATTR='-wi-ao----'|LVM2_DEVICES='/dev/vda2(0)'|LVM2_REGION_SIZE='0 '
Because logical volume names may be duplicated on different volume groups, the
key used for the logical volume information is a named tuple of type `LvVgName`.
Physical volumes and volume groups do not have the same limitation so the
key used for that information is simply the string name of the physical
device or volume group.
Examples:
>>> lvm_info = shared[Lvm]
>>> lvm_info.logical_volumes[LvVgName(LV='root', VG='rhel')]
{
'Log': '', 'LPerms': None, 'Health': None, 'MaxSync': None, 'Pool_UUID': None, 'DevOpen': None, 'SkipAct': None,
'Parent': None, 'Descendants': None, 'WhenFull': None, 'Lock_Args': None, 'CacheReadMisses': None, 'Host': None,
'CacheWriteHits': None, 'Active': None, 'Path': None, 'LV_UUID': None, 'Data': None, 'LV_Tags': None, 'Pool': None,
'CacheDirtyBlocks': None, 'InitImgSync': None, 'Region': '0', 'LiveTable': None, 'MinSync': None,
'Devices': '/dev/vda2(512)', 'ActLocal': None, 'Time': None, 'Cpy%Sync': None, 'Modules': None, 'Data_UUID': None, 'Origin': None,
'Move': None, 'Origin_UUID': None, 'Converting': None, 'LSize': '17.47g', '#Seg': None, 'Ancestors': None, 'Layout': None,
'Meta%': None, 'Min': None, 'Data%': None, 'AllocLock': None, 'CacheWriteMisses': None, 'AllocPol': None,
'CacheTotalBlocks': None, 'MergeFailed': None, 'Mismatches': None, 'WBehind': None, 'ActExcl': None, 'ActRemote': None,
'OSize': None, 'KMin': None, 'LV': 'root', 'InactiveTable': None, 'Move_UUID': None, 'Maj': None, 'Role': None, 'KMaj': None,
'Convert': None, 'LProfile': None, 'Attr': '-wi-ao----', 'VG': 'rhel', 'KRahead': None, 'Rahead': None, 'Log_UUID': None,
'MSize': None, 'Merging': None, 'DMPath': None, 'Meta_UUID': None, 'SnapInvalid': None, 'ImgSynced': None,
'CacheReadHits': None, 'Meta': None, 'Snap%': None, 'Suspended': None, 'FixMin': None, 'CacheUsedBlocks': None, 'SyncAction': None
}
>>> lvm_info.logical_volumes[LvVgName('root','rhel')]['LSize']
'17.47g'
>>> lvm_info.logical_volume_names
{LvVgName(LV='root', VG='rhel'), LvVgName(LV='swap', VG='rhel')}
>>> lvm_info.filter_logical_volumes(lv_filter='root')
{LvVgName(LV='root', VG='rhel'): {
'Log': '', 'LPerms': None, 'Health': None, 'MaxSync': None, 'Pool_UUID': None, 'DevOpen': None, 'SkipAct': None,
'Parent': None, 'Descendants': None, 'WhenFull': None, 'Lock_Args': None, 'CacheReadMisses': None, 'Host': None,
'CacheWriteHits': None, 'Active': None, 'Path': None, 'LV_UUID': None, 'Data': None, 'LV_Tags': None, 'Pool': None,
'CacheDirtyBlocks': None, 'InitImgSync': None, 'Region': '0', 'LiveTable': None, 'MinSync': None,
'Devices': '/dev/vda2(512)', 'ActLocal': None, 'Time': None, 'Cpy%Sync': None, 'Modules': None, 'Data_UUID': None, 'Origin': None,
'Move': None, 'Origin_UUID': None, 'Converting': None, 'LSize': '17.47g', '#Seg': None, 'Ancestors': None, 'Layout': None,
'Meta%': None, 'Min': None, 'Data%': None, 'AllocLock': None, 'CacheWriteMisses': None, 'AllocPol': None,
'CacheTotalBlocks': None, 'MergeFailed': None, 'Mismatches': None, 'WBehind': None, 'ActExcl': None, 'ActRemote': None,
'OSize': None, 'KMin': None, 'LV': 'root', 'InactiveTable': None, 'Move_UUID': None, 'Maj': None, 'Role': None, 'KMaj': None,
'Convert': None, 'LProfile': None, 'Attr': '-wi-ao----', 'VG': 'rhel', 'KRahead': None, 'Rahead': None, 'Log_UUID': None,
'MSize': None, 'Merging': None, 'DMPath': None, 'Meta_UUID': None, 'SnapInvalid': None, 'ImgSynced': None,
'CacheReadHits': None, 'Meta': None, 'Snap%': None, 'Suspended': None, 'FixMin': None, 'CacheUsedBlocks': None, 'SyncAction': None
}}
"""
import copy
from collections import namedtuple
from insights.core.plugins import combiner
from insights.parsers.lvm import Lvs, LvsHeadings, Pvs, PvsHeadings, Vgs, VgsHeadings
from insights.parsers.lvm import LvsAll, PvsAll, VgsAll
def get_shared_data(component):
"""
Returns the actual list of component data based on how data is
stored in component, either from the `data` attribute or from the
`data['content']` attribute.
Returns:
list: List of component data.
"""
if component:
return (copy.deepcopy(component.data)
if 'content' not in component.data
else copy.deepcopy(component.data['content']))
else:
return []
def to_name_key_dict(data, name_key):
"""
Iterates a list of dictionaries where each dictionary has a `name_key`
value that is used to return a single dictionary indexed by those
values.
Returns:
dict: Dictionary keyed by `name_key` values having the information
contained in the original input list `data`.
"""
return dict((row[name_key], row) for row in data)
def merge_lvm_data(primary, secondary, name_key):
"""
Returns a dictionary containing the set of data from primary and secondary
where values in primary will always be returned if present, and values in
secondary will only be returned if not present in primary, or if the value
in primary is `None`.
Sample input Data::
primary = [
{'a': 1, 'b': 2, 'c': 3, 'd': 4, 'name_key': 'xyz'},
{'a': None, 'b': 12, 'c': 13, 'd': 14, 'name_key': 'qrs'},
{'a': None, 'b': 12, 'c': 13, 'd': 14, 'name_key': 'def'},
]
secondary = [
{'a': 31, 'e': 33, 'name_key': 'xyz'},
{'a': 11, 'e': 23, 'name_key': 'qrs'},
{'a': 1, 'e': 3, 'name_key': 'ghi'},
]
Returns:
dict: Dictionary of key value pairs from obj1 and obj2::
{
'xyz': {'a': 1, 'b': 2, 'c': 3, 'd': 4, 'e': 33, 'name_key': 'xyz'},
'qrs': {'a': 11, 'b': 12, 'c': 13, d: 14, e: 23, 'name_key': 'qrs'},
'def': {'a': None, 'b': 12, 'c': 13, 'd': 14, 'name_key': 'def'},
'ghi': {'a': 1, 'e': 3, 'name_key': 'ghi'}
}
"""
pri_data = to_name_key_dict(primary, name_key)
# Prime results with secondary data, to be updated with primary data
combined_data = to_name_key_dict(secondary, name_key)
for name in pri_data:
if name not in combined_data:
# Data only in primary
combined_data[name] = pri_data[name]
else:
# Data in both primary and secondary, pick primary if better or no secondary
combined_data[name].update(dict(
(k, v) for k, v in pri_data[name].items()
if v is not None or k not in combined_data[name]
))
return set_defaults(combined_data)
def set_defaults(lvm_data):
"""dict: Sets all existing null string values to None."""
for l in lvm_data:
for k, v in lvm_data[l].items():
if v == '':
lvm_data[l][k] = None
return lvm_data
@combiner([Lvs, LvsHeadings, Pvs, PvsHeadings, Vgs, VgsHeadings])
class Lvm(object):
"""Class implements shared combiner for LVM information."""
LvVgName = namedtuple('LvVgName', ['LV', 'VG'])
"""Named tuple used as key for logical volumes."""
def __init__(self, lvs, lvs_headings, pvs, pvs_headings, vgs, vgs_headings):
# Volume Groups information
self.volume_groups = merge_lvm_data(get_shared_data(vgs),
get_shared_data(vgs_headings),
'VG')
"""dict: Contains a dictionary of volume group data with keys
from the original output."""
# Physical Volumes information
self.physical_volumes = merge_lvm_data(get_shared_data(pvs),
get_shared_data(pvs_headings),
'PV_KEY')
"""dict: Contains a dictionary of physical volume data with keys
from the original output."""
# Logical Volumes information
# Since logical volume names can be duplicated across volume
# groups we use a new key that combines the logical volume
# name with the volume group name to ensure it is unique
pri_lvs_data = get_shared_data(lvs)
for l in pri_lvs_data:
l['LVVG'] = Lvm.LvVgName(LV=l['LV'], VG=l['VG'])
sec_lvs_data = get_shared_data(lvs_headings)
for l in sec_lvs_data:
l['LVVG'] = Lvm.LvVgName(LV=l['LV'], VG=l['VG'])
self.logical_volumes = merge_lvm_data(pri_lvs_data,
sec_lvs_data,
'LVVG')
"""dict: Contains a dictionary of logical volume data with keys
from the original output. The key is a tuple of the
logical volume name and the volume group name. This tuple
avoids the case where logical volume names are the same
across volume groups."""
self.logical_volumes = set_defaults(self.logical_volumes)
# Since name is not used as the key we need to create the name list
self.physical_volume_names = set([p['PV'] for p in self.physical_volumes.values()])
@property
def volume_group_names(self):
"""set: Returns a set of keys from the volume group information."""
return set(self.volume_groups.keys())
@property
def logical_volume_names(self):
"""set: Returns a set of tuple keys from the logical volume information."""
return set(self.logical_volumes.keys())
def filter_volume_groups(self, vg_filter):
"""dict: Returns dictionary of volume group information with keys
containing `vg_filter`."""
return dict((k, v) for k, v in self.volume_groups.items() if vg_filter in k)
def filter_physical_volumes(self, pv_filter):
"""dict: Returns dictionary of physical volume information with keys
containing `pv_filter`."""
return dict((k, v) for k, v in self.physical_volumes.items() if pv_filter in k)
def filter_logical_volumes(self, lv_filter, vg_filter=None):
"""dict: Returns dictionary of logical volume information having the
`lv_filter` in the logical volume and if specified `vg_filter` in the
volume group."""
if vg_filter is None:
return dict((k, v) for k, v in self.logical_volumes.items()
if lv_filter in k.LV)
else:
return dict((k, v) for k, v in self.logical_volumes.items()
if lv_filter in k.LV and vg_filter in k.VG)
@combiner([LvsAll, PvsAll, VgsAll])
class LvmAll(Lvm):
"""A Lvm like shared combiner for processing LVM information including all rejected
and accepted devices"""
def __init__(self, lvsall, pvsall, vgsall):
# Volume Groups information
self.volume_groups = merge_lvm_data(get_shared_data(vgsall), [], 'VG')
"""dict: Contains a dictionary of volume group data with keys
from the original output."""
# Physical Volumes information
self.physical_volumes = merge_lvm_data(get_shared_data(pvsall), [], 'PV_KEY')
"""dict: Contains a dictionary of physical volume data with keys
from the original output."""
pri_lvs_data = get_shared_data(lvsall)
for l in pri_lvs_data:
l['LVVG'] = Lvm.LvVgName(LV=l['LV'], VG=l['VG'])
self.logical_volumes = merge_lvm_data(pri_lvs_data, [], 'LVVG')
"""dict: Contains a dictionary of logical volume data with keys
from the original output. The key is a tuple of the
logical volume name and the volume group name. This tuple
avoids the case where logical volume names are the same
across volume groups."""
self.logical_volumes = set_defaults(self.logical_volumes)
# Since name is not used as the key we need to create the name list
self.physical_volume_names = set([p['PV'] for p in self.physical_volumes.values()])
|
RedHatInsights/insights-core
|
insights/combiners/lvm.py
|
Python
|
apache-2.0
| 13,715
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import sqlite3
from builtins import object, str
from contextlib import contextmanager
from pants.subsystem.subsystem import Subsystem
from pants.util.dirutil import safe_mkdir_for
class StatsDBError(Exception): pass
class StatsDBFactory(Subsystem):
options_scope = 'statsdb'
@classmethod
def register_options(cls, register):
super(StatsDBFactory, cls).register_options(register)
register('--path',
default=os.path.join(register.bootstrap.pants_bootstrapdir, 'stats', 'statsdb.sqlite'),
help='Location of statsdb file.')
def get_db(self):
"""Returns a StatsDB instance configured by this factory."""
ret = StatsDB(self.get_options().path)
ret.ensure_tables()
return ret
class StatsDB(object):
def __init__(self, path):
super(StatsDB, self).__init__()
self._path = path
def ensure_tables(self):
with self._cursor() as c:
def create_index(tab, col):
c.execute("""CREATE INDEX IF NOT EXISTS {tab}_{col}_idx ON {tab}({col})""".format(
tab=tab, col=col))
c.execute("""
CREATE TABLE IF NOT EXISTS run_info (
id TEXT PRIMARY KEY,
timestamp INTEGER, -- Seconds since the epoch.
machine TEXT,
user TEXT,
version TEXT,
buildroot TEXT,
outcome TEXT,
cmd_line TEXT
)
""")
create_index('run_info', 'cmd_line')
def create_timings_table(tab):
c.execute("""
CREATE TABLE IF NOT EXISTS {tab} (
run_info_id TEXT,
label TEXT,
timing INTEGER, -- Milliseconds
FOREIGN KEY (run_info_id) REFERENCES run_info(id)
)
""".format(tab=tab))
create_index(tab, 'label')
create_timings_table('cumulative_timings')
create_timings_table('self_timings')
def insert_stats(self, stats):
try:
with self._cursor() as c:
ri = stats['run_info']
try:
c.execute("""INSERT INTO run_info VALUES (?, ?, ?, ?, ?, ?, ?, ?)""",
[ri['id'], int(float(ri['timestamp'])), ri['machine'], ri['user'],
ri['version'], ri['buildroot'], ri['outcome'], ri['cmd_line']])
except KeyError as e:
raise StatsDBError('Failed to insert stats. Key {} not found in RunInfo: {}'.format(
e.args[0], str(ri)))
rid = ri['id']
for table in ['cumulative_timings', 'self_timings']:
timings = stats[table]
for timing in timings:
try:
c.execute("""INSERT INTO {} VALUES (?, ?, ?)""".format(table),
[rid, timing['label'], self._to_ms(timing['timing'])])
except KeyError as e:
raise StatsDBError('Failed to insert stats. Key {} not found in timing: {}'.format(
e.args[0], str(timing)))
except KeyError as e:
raise StatsDBError('Failed to insert stats. Key {} not found in stats object.'.format(
e.args[0]))
def get_stats_for_cmd_line(self, timing_table, cmd_line_like):
"""Returns a generator over all (label, timing) pairs for a given cmd line.
:param timing_table: One of 'cumulative_timings' or 'self_timings'.
:param cmd_line_like: Look at all cmd lines that are LIKE this string, in the sql sense.
"""
with self._cursor() as c:
for row in c.execute("""
SELECT t.label, t.timing
FROM {} AS t INNER JOIN run_info AS ri ON (t.run_info_id=ri.id)
WHERE ri.cmd_line LIKE ?
""".format(timing_table), [cmd_line_like]):
yield row
def get_aggregated_stats_for_cmd_line(self, timing_table, cmd_line_like):
"""Returns a generator over aggregated stats for a given cmd line.
:param timing_table: One of 'cumulative_timings' or 'self_timings'.
:param cmd_line_like: Look at all cmd lines that are LIKE this string, in the sql sense.
"""
with self._cursor() as c:
for row in c.execute("""
SELECT date(ri.timestamp, 'unixepoch') as dt, t.label as label, count(*), sum(t.timing)
FROM {} AS t INNER JOIN run_info AS ri ON (t.run_info_id=ri.id)
WHERE ri.cmd_line LIKE ?
GROUP BY dt, label
ORDER BY dt, label
""".format(timing_table), [cmd_line_like]):
yield row
@staticmethod
def _to_ms(timing_secs):
"""Convert a string representing a float of seconds to an int representing milliseconds."""
return int(float(timing_secs) * 1000 + 0.5)
@contextmanager
def _connection(self):
safe_mkdir_for(self._path)
conn = sqlite3.connect(self._path)
yield conn
conn.commit()
conn.close()
@contextmanager
def _cursor(self):
with self._connection() as conn:
yield conn.cursor()
|
foursquare/pants
|
src/python/pants/stats/statsdb.py
|
Python
|
apache-2.0
| 5,022
|
"""py-motmetrics - metrics for multiple object tracker (MOT) benchmarking.
Christoph Heindl, 2017
https://github.com/cheind/py-motmetrics
"""
import numpy as np
import numpy.ma as ma
import pandas as pd
from collections import OrderedDict
from itertools import count
from motmetrics.lap import linear_sum_assignment
class MOTAccumulator(object):
"""Manage tracking events.
This class computes per-frame tracking events from a given set of object / hypothesis
ids and pairwise distances. Indended usage
import motmetrics as mm
acc = mm.MOTAccumulator()
acc.update(['a', 'b'], [0, 1, 2], dists, frameid=0)
...
acc.update(['d'], [6,10], other_dists, frameid=76)
summary = mm.metrics.summarize(acc)
print(mm.io.render_summary(summary))
Update is called once per frame and takes objects / hypothesis ids and a pairwise distance
matrix between those (see distances module for support). Per frame max(len(objects), len(hypothesis))
events are generated. Each event type is one of the following
- `'MATCH'` a match between a object and hypothesis was found
- `'SWITCH'` a match between a object and hypothesis was found but differs from previous assignment
- `'MISS'` no match for an object was found
- `'FP'` no match for an hypothesis was found (spurious detections)
- `'RAW'` events corresponding to raw input
Events are tracked in a pandas Dataframe. The dataframe is hierarchically indexed by (`FrameId`, `EventId`),
where `FrameId` is either provided during the call to `update` or auto-incremented when `auto_id` is set
true during construction of MOTAccumulator. `EventId` is auto-incremented. The dataframe has the following
columns
- `Type` one of `('MATCH', 'SWITCH', 'MISS', 'FP', 'RAW')`
- `OId` object id or np.nan when `'FP'` or `'RAW'` and object is not present
- `HId` hypothesis id or np.nan when `'MISS'` or `'RAW'` and hypothesis is not present
- `D` distance or np.nan when `'FP'` or `'MISS'` or `'RAW'` and either object/hypothesis is absent
From the events and associated fields the entire tracking history can be recovered. Once the accumulator
has been populated with per-frame data use `metrics.summarize` to compute statistics. See `metrics.compute_metrics`
for a list of metrics computed.
References
----------
1. Bernardin, Keni, and Rainer Stiefelhagen. "Evaluating multiple object tracking performance: the CLEAR MOT metrics."
EURASIP Journal on Image and Video Processing 2008.1 (2008): 1-10.
2. Milan, Anton, et al. "Mot16: A benchmark for multi-object tracking." arXiv preprint arXiv:1603.00831 (2016).
3. Li, Yuan, Chang Huang, and Ram Nevatia. "Learning to associate: Hybridboosted multi-target tracker for crowded scene."
Computer Vision and Pattern Recognition, 2009. CVPR 2009. IEEE Conference on. IEEE, 2009.
"""
def __init__(self, auto_id=False, max_switch_time=float('inf')):
"""Create a MOTAccumulator.
Params
------
auto_id : bool, optional
Whether or not frame indices are auto-incremented or provided upon
updating. Defaults to false. Not specifying a frame-id when this value
is true results in an error. Specifying a frame-id when this value is
false also results in an error.
max_switch_time : scalar, optional
Allows specifying an upper bound on the timespan an unobserved but
tracked object is allowed to generate track switch events. Useful if groundtruth
objects leaving the field of view keep their ID when they reappear,
but your tracker is not capable of recognizing this (resulting in
track switch events). The default is that there is no upper bound
on the timespan. In units of frame timestamps. When using auto_id
in units of count.
"""
self.auto_id = auto_id
self.max_switch_time = max_switch_time
self.reset()
def reset(self):
"""Reset the accumulator to empty state."""
self._events = []
self._indices = []
#self.events = MOTAccumulator.new_event_dataframe()
self.m = {} # Pairings up to current timestamp
self.last_occurrence = {} # Tracks most recent occurance of object
self.dirty_events = True
self.cached_events_df = None
def update(self, oids, hids, dists, frameid=None):
"""Updates the accumulator with frame specific objects/detections.
This method generates events based on the following algorithm [1]:
1. Try to carry forward already established tracks. If any paired object / hypothesis
from previous timestamps are still visible in the current frame, create a 'MATCH'
event between them.
2. For the remaining constellations minimize the total object / hypothesis distance
error (Kuhn-Munkres algorithm). If a correspondence made contradicts a previous
match create a 'SWITCH' else a 'MATCH' event.
3. Create 'MISS' events for all remaining unassigned objects.
4. Create 'FP' events for all remaining unassigned hypotheses.
Params
------
oids : N array
Array of object ids.
hids : M array
Array of hypothesis ids.
dists: NxM array
Distance matrix. np.nan values to signal do-not-pair constellations.
See `distances` module for support methods.
Kwargs
------
frameId : id
Unique frame id. Optional when MOTAccumulator.auto_id is specified during
construction.
Returns
-------
frame_events : pd.DataFrame
Dataframe containing generated events
References
----------
1. Bernardin, Keni, and Rainer Stiefelhagen. "Evaluating multiple object tracking performance: the CLEAR MOT metrics."
EURASIP Journal on Image and Video Processing 2008.1 (2008): 1-10.
"""
self.dirty_events = True
oids = ma.array(oids, mask=np.zeros(len(oids)))
hids = ma.array(hids, mask=np.zeros(len(hids)))
dists = np.atleast_2d(dists).astype(float).reshape(oids.shape[0], hids.shape[0]).copy()
if frameid is None:
assert self.auto_id, 'auto-id is not enabled'
if len(self._indices) > 0:
frameid = self._indices[-1][0] + 1
else:
frameid = 0
else:
assert not self.auto_id, 'Cannot provide frame id when auto-id is enabled'
eid = count()
# 0. Record raw events
no = len(oids)
nh = len(hids)
if no * nh > 0:
for i in range(no):
for j in range(nh):
self._indices.append((frameid, next(eid)))
self._events.append(['RAW', oids[i], hids[j], dists[i,j]])
elif no == 0:
for i in range(nh):
self._indices.append((frameid, next(eid)))
self._events.append(['RAW', np.nan, hids[i], np.nan])
elif nh == 0:
for i in range(no):
self._indices.append((frameid, next(eid)))
self._events.append(['RAW', oids[i], np.nan, np.nan])
if oids.size * hids.size > 0:
# 1. Try to re-establish tracks from previous correspondences
for i in range(oids.shape[0]):
if not oids[i] in self.m:
continue
hprev = self.m[oids[i]]
j, = np.where(hids==hprev)
if j.shape[0] == 0:
continue
j = j[0]
if np.isfinite(dists[i,j]):
oids[i] = ma.masked
hids[j] = ma.masked
self.m[oids.data[i]] = hids.data[j]
self._indices.append((frameid, next(eid)))
self._events.append(['MATCH', oids.data[i], hids.data[j], dists[i, j]])
# 2. Try to remaining objects/hypotheses
dists[oids.mask, :] = np.nan
dists[:, hids.mask] = np.nan
rids, cids = linear_sum_assignment(dists)
for i, j in zip(rids, cids):
if not np.isfinite(dists[i,j]):
continue
o = oids[i]
h = hids.data[j]
is_switch = o in self.m and \
self.m[o] != h and \
abs(frameid - self.last_occurrence[o]) <= self.max_switch_time
cat = 'SWITCH' if is_switch else 'MATCH'
self._indices.append((frameid, next(eid)))
self._events.append([cat, oids.data[i], hids.data[j], dists[i, j]])
oids[i] = ma.masked
hids[j] = ma.masked
self.m[o] = h
# 3. All remaining objects are missed
for o in oids[~oids.mask]:
self._indices.append((frameid, next(eid)))
self._events.append(['MISS', o, np.nan, np.nan])
# 4. All remaining hypotheses are false alarms
for h in hids[~hids.mask]:
self._indices.append((frameid, next(eid)))
self._events.append(['FP', np.nan, h, np.nan])
# 5. Update occurance state
for o in oids.data:
self.last_occurrence[o] = frameid
return frameid
@property
def events(self):
if self.dirty_events:
self.cached_events_df = MOTAccumulator.new_event_dataframe_with_data(self._indices, self._events)
self.dirty_events = False
return self.cached_events_df
@property
def mot_events(self):
df = self.events
return df[df.Type != 'RAW']
@staticmethod
def new_event_dataframe():
"""Create a new DataFrame for event tracking."""
idx = pd.MultiIndex(levels=[[],[]], codes=[[],[]], names=['FrameId','Event'])
cats = pd.Categorical([], categories=['RAW', 'FP', 'MISS', 'SWITCH', 'MATCH'])
df = pd.DataFrame(
OrderedDict([
('Type', pd.Series(cats)), # Type of event. One of FP (false positive), MISS, SWITCH, MATCH
('OId', pd.Series(dtype=object)), # Object ID or -1 if FP. Using float as missing values will be converted to NaN anyways.
('HId', pd.Series(dtype=object)), # Hypothesis ID or NaN if MISS. Using float as missing values will be converted to NaN anyways.
('D', pd.Series(dtype=float)), # Distance or NaN when FP or MISS
]),
index=idx
)
return df
@staticmethod
def new_event_dataframe_with_data(indices, events):
"""Create a new DataFrame filled with data.
Params
------
indices: list
list of tuples (frameid, eventid)
events: list
list of events where each event is a list containing
'Type', 'OId', HId', 'D'
"""
if events:
tevents = list(zip(*events))
else:
tevents = [[], [], [], []]
raw_type = pd.Categorical(tevents[0], categories=['RAW', 'FP', 'MISS', 'SWITCH', 'MATCH'], ordered=False)
series = [
pd.Series(raw_type, name='Type'),
pd.Series(tevents[1], dtype=object, name='OId'),
pd.Series(tevents[2], dtype=object, name='HId'),
pd.Series(tevents[3], dtype=float, name='D')
]
idx = pd.MultiIndex.from_tuples(indices, names=['FrameId','Event'])
df = pd.concat(series, axis=1)
df.index = idx
return df
@staticmethod
def merge_event_dataframes(dfs, update_frame_indices=True, update_oids=True, update_hids=True, return_mappings=False):
"""Merge dataframes.
Params
------
dfs : list of pandas.DataFrame or MotAccumulator
A list of event containers to merge
Kwargs
------
update_frame_indices : boolean, optional
Ensure that frame indices are unique in the merged container
update_oids : boolean, unique
Ensure that object ids are unique in the merged container
update_hids : boolean, unique
Ensure that hypothesis ids are unique in the merged container
return_mappings : boolean, unique
Whether or not to return mapping information
Returns
-------
df : pandas.DataFrame
Merged event data frame
"""
mapping_infos = []
new_oid = count()
new_hid = count()
r = MOTAccumulator.new_event_dataframe()
for df in dfs:
if isinstance(df, MOTAccumulator):
df = df.events
copy = df.copy()
infos = {}
# Update index
if update_frame_indices:
next_frame_id = max(r.index.get_level_values(0).max()+1, r.index.get_level_values(0).unique().shape[0])
if np.isnan(next_frame_id):
next_frame_id = 0
copy.index = copy.index.map(lambda x: (x[0]+next_frame_id, x[1]))
infos['frame_offset'] = next_frame_id
# Update object / hypothesis ids
if update_oids:
oid_map = dict([oid, str(next(new_oid))] for oid in copy['OId'].dropna().unique())
copy['OId'] = copy['OId'].map(lambda x: oid_map[x], na_action='ignore')
infos['oid_map'] = oid_map
if update_hids:
hid_map = dict([hid, str(next(new_hid))] for hid in copy['HId'].dropna().unique())
copy['HId'] = copy['HId'].map(lambda x: hid_map[x], na_action='ignore')
infos['hid_map'] = hid_map
r = r.append(copy)
mapping_infos.append(infos)
if return_mappings:
return r, mapping_infos
else:
return r
|
e2crawfo/dps
|
motmetrics/mot.py
|
Python
|
apache-2.0
| 14,443
|
# Copyright (C) 2014 VA Linux Systems Japan K.K.
# Copyright (C) 2014 Fumihiko Kakuma <kakuma at valinux co jp>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron_lib import constants as n_const
from neutron.tests.unit.plugins.ml2.drivers.l2pop.rpc_manager \
import l2population_rpc_base
class TestL2populationRpcCallBackTunnelMixin(
l2population_rpc_base.TestL2populationRpcCallBackTunnelMixinBase):
def test_get_agent_ports_no_data(self):
self.assertFalse(
list(self.fakeagent.get_agent_ports(self.fdb_entries1, {})))
def test_get_agent_ports_non_existence_key_in_lvm(self):
results = {}
del self.local_vlan_map1[self.lvms[1].net]
for lvm, agent_ports in self.fakeagent.get_agent_ports(
self.fdb_entries1, self.local_vlan_map1):
results[lvm] = agent_ports
expected = {
self.lvm1: {
self.ports[0].ip: [(self.lvms[0].mac, self.lvms[0].ip)],
self.local_ip: []},
self.lvm3: {
self.ports[2].ip: [(self.lvms[2].mac, self.lvms[2].ip)],
self.local_ip: []},
}
self.assertEqual(expected, results)
def test_get_agent_ports_no_agent_ports(self):
results = {}
self.fdb_entries1[self.lvms[1].net]['ports'] = {}
for lvm, agent_ports in self.fakeagent.get_agent_ports(
self.fdb_entries1, self.local_vlan_map1):
results[lvm] = agent_ports
expected = {
self.lvm1: {
self.ports[0].ip: [(self.lvms[0].mac, self.lvms[0].ip)],
self.local_ip: []},
self.lvm2: {},
self.lvm3: {
self.ports[2].ip: [(self.lvms[2].mac, self.lvms[2].ip)],
self.local_ip: []},
}
self.assertEqual(expected, results)
def test_fdb_add_tun(self):
with mock.patch.object(self.fakeagent, 'setup_tunnel_port'),\
mock.patch.object(self.fakeagent, 'add_fdb_flow'
) as mock_add_fdb_flow:
self.fakeagent.fdb_add_tun('context', self.fakebr, self.lvm1,
self.agent_ports,
self._tunnel_port_lookup)
expected = [
mock.call(self.fakebr, (self.lvms[0].mac, self.lvms[0].ip),
self.ports[0].ip, self.lvm1, self.ports[0].ofport),
mock.call(self.fakebr, (self.lvms[1].mac, self.lvms[1].ip),
self.ports[1].ip, self.lvm1, self.ports[1].ofport),
mock.call(self.fakebr, (self.lvms[2].mac, self.lvms[2].ip),
self.ports[2].ip, self.lvm1, self.ports[2].ofport),
]
self.assertEqual(sorted(expected),
sorted(mock_add_fdb_flow.call_args_list))
def test_fdb_add_tun_non_existence_key_in_ofports(self):
ofport = self.lvm1.network_type + '0a0a0a0a'
del self.ofports[self.type_gre][self.ports[1].ip]
with mock.patch.object(self.fakeagent, 'setup_tunnel_port',
return_value=ofport
) as mock_setup_tunnel_port,\
mock.patch.object(self.fakeagent, 'add_fdb_flow'
) as mock_add_fdb_flow:
self.fakeagent.fdb_add_tun('context', self.fakebr, self.lvm1,
self.agent_ports,
self._tunnel_port_lookup)
mock_setup_tunnel_port.assert_called_once_with(
self.fakebr, self.ports[1].ip, self.lvm1.network_type)
expected = [
mock.call(self.fakebr, (self.lvms[0].mac, self.lvms[0].ip),
self.ports[0].ip, self.lvm1, self.ports[0].ofport),
mock.call(self.fakebr, (self.lvms[1].mac, self.lvms[1].ip),
self.ports[1].ip, self.lvm1, ofport),
mock.call(self.fakebr, (self.lvms[2].mac, self.lvms[2].ip),
self.ports[2].ip, self.lvm1, self.ports[2].ofport),
]
self.assertEqual(sorted(expected),
sorted(mock_add_fdb_flow.call_args_list))
def test_fdb_add_tun_unavailable_ofport(self):
del self.ofports[self.type_gre][self.ports[1].ip]
with mock.patch.object(self.fakeagent, 'setup_tunnel_port',
return_value=0
) as mock_setup_tunnel_port,\
mock.patch.object(self.fakeagent, 'add_fdb_flow'
) as mock_add_fdb_flow:
self.fakeagent.fdb_add_tun('context', self.fakebr, self.lvm1,
self.agent_ports,
self._tunnel_port_lookup)
mock_setup_tunnel_port.assert_called_once_with(
self.fakebr, self.ports[1].ip, self.lvm1.network_type)
expected = [
mock.call(self.fakebr, (self.lvms[0].mac, self.lvms[0].ip),
self.ports[0].ip, self.lvm1, self.ports[0].ofport),
mock.call(self.fakebr, (self.lvms[2].mac, self.lvms[2].ip),
self.ports[2].ip, self.lvm1, self.ports[2].ofport),
]
self.assertEqual(sorted(expected),
sorted(mock_add_fdb_flow.call_args_list))
def test_fdb_remove_tun(self):
with mock.patch.object(
self.fakeagent, 'del_fdb_flow') as mock_del_fdb_flow:
self.fakeagent.fdb_remove_tun('context', self.fakebr, self.lvm1,
self.agent_ports,
self._tunnel_port_lookup)
expected = [
mock.call(self.fakebr, (self.lvms[0].mac, self.lvms[0].ip),
self.ports[0].ip, self.lvm1, self.ports[0].ofport),
mock.call(self.fakebr, (self.lvms[1].mac, self.lvms[1].ip),
self.ports[1].ip, self.lvm1, self.ports[1].ofport),
mock.call(self.fakebr, (self.lvms[2].mac, self.lvms[2].ip),
self.ports[2].ip, self.lvm1, self.ports[2].ofport),
]
self.assertEqual(sorted(expected),
sorted(mock_del_fdb_flow.call_args_list))
def test_fdb_remove_tun_flooding_entry(self):
self.agent_ports[self.ports[1].ip] = [n_const.FLOODING_ENTRY]
with mock.patch.object(self.fakeagent, 'del_fdb_flow'
) as mock_del_fdb_flow,\
mock.patch.object(self.fakeagent, 'cleanup_tunnel_port'
) as mock_cleanup_tunnel_port:
self.fakeagent.fdb_remove_tun('context', self.fakebr, self.lvm1,
self.agent_ports,
self._tunnel_port_lookup)
expected = [
mock.call(self.fakebr, (self.lvms[0].mac, self.lvms[0].ip),
self.ports[0].ip, self.lvm1, self.ports[0].ofport),
mock.call(self.fakebr,
(n_const.FLOODING_ENTRY[0], n_const.FLOODING_ENTRY[1]),
self.ports[1].ip, self.lvm1, self.ports[1].ofport),
mock.call(self.fakebr, (self.lvms[2].mac, self.lvms[2].ip),
self.ports[2].ip, self.lvm1, self.ports[2].ofport),
]
self.assertEqual(sorted(expected),
sorted(mock_del_fdb_flow.call_args_list))
mock_cleanup_tunnel_port.assert_called_once_with(
self.fakebr, self.ports[1].ofport, self.lvm1.network_type)
def test_fdb_remove_tun_non_existence_key_in_ofports(self):
del self.ofports[self.type_gre][self.ports[1].ip]
with mock.patch.object(
self.fakeagent, 'del_fdb_flow') as mock_del_fdb_flow:
self.fakeagent.fdb_remove_tun('context', self.fakebr, self.lvm1,
self.agent_ports,
self._tunnel_port_lookup)
expected = [
mock.call(self.fakebr, (self.lvms[0].mac, self.lvms[0].ip),
self.ports[0].ip, self.lvm1, self.ports[0].ofport),
mock.call(self.fakebr, (self.lvms[2].mac, self.lvms[2].ip),
self.ports[2].ip, self.lvm1, self.ports[2].ofport),
]
self.assertEqual(sorted(expected),
sorted(mock_del_fdb_flow.call_args_list))
def test_fdb_update(self):
fake__fdb_chg_ip = mock.Mock()
self.fakeagent._fdb_chg_ip = fake__fdb_chg_ip
self.fakeagent.fdb_update('context', self.upd_fdb_entry1)
fake__fdb_chg_ip.assert_called_once_with(
'context', self.upd_fdb_entry1_val)
def test_fdb_update_non_existence_method(self):
self.assertRaises(NotImplementedError,
self.fakeagent.fdb_update,
'context', self.upd_fdb_entry1)
def test__fdb_chg_ip(self):
m_setup_entry_for_arp_reply = mock.Mock()
self.fakeagent.setup_entry_for_arp_reply = m_setup_entry_for_arp_reply
self.fakeagent.fdb_chg_ip_tun('context', self.fakebr,
self.upd_fdb_entry1_val, self.local_ip,
self.local_vlan_map1)
expected = [
mock.call(self.fakebr, 'remove', self.lvm1.vlan, self.lvms[0].mac,
self.lvms[0].ip),
mock.call(self.fakebr, 'add', self.lvm1.vlan, self.lvms[1].mac,
self.lvms[1].ip),
mock.call(self.fakebr, 'remove', self.lvm1.vlan, self.lvms[0].mac,
self.lvms[0].ip),
mock.call(self.fakebr, 'add', self.lvm1.vlan, self.lvms[1].mac,
self.lvms[1].ip),
mock.call(self.fakebr, 'remove', self.lvm2.vlan, self.lvms[0].mac,
self.lvms[0].ip),
mock.call(self.fakebr, 'add', self.lvm2.vlan, self.lvms[2].mac,
self.lvms[2].ip),
]
m_setup_entry_for_arp_reply.assert_has_calls(expected, any_order=True)
def test__fdb_chg_ip_no_lvm(self):
m_setup_entry_for_arp_reply = mock.Mock()
self.fakeagent.setup_entry_for_arp_reply = m_setup_entry_for_arp_reply
self.fakeagent.fdb_chg_ip_tun(
'context', self.fakebr, self.upd_fdb_entry1, self.local_ip, {})
self.assertFalse(m_setup_entry_for_arp_reply.call_count)
def test__fdb_chg_ip_ip_is_local_ip(self):
upd_fdb_entry_val = {
self.lvms[0].net: {
self.local_ip: {
'before': [(self.lvms[0].mac, self.lvms[0].ip)],
'after': [(self.lvms[1].mac, self.lvms[1].ip)],
},
},
}
m_setup_entry_for_arp_reply = mock.Mock()
self.fakeagent.setup_entry_for_arp_reply = m_setup_entry_for_arp_reply
self.fakeagent.fdb_chg_ip_tun('context', self.fakebr,
upd_fdb_entry_val, self.local_ip,
self.local_vlan_map1)
self.assertFalse(m_setup_entry_for_arp_reply.call_count)
def test_fdb_chg_ip_tun_empty_before_after(self):
upd_fdb_entry_val = {
self.lvms[0].net: {
self.local_ip: {},
},
}
m_setup_entry_for_arp_reply = mock.Mock()
self.fakeagent.setup_entry_for_arp_reply = m_setup_entry_for_arp_reply
# passing non-local ip
self.fakeagent.fdb_chg_ip_tun('context', self.fakebr,
upd_fdb_entry_val, "8.8.8.8",
self.local_vlan_map1)
self.assertFalse(m_setup_entry_for_arp_reply.call_count)
|
bigswitch/neutron
|
neutron/tests/unit/plugins/ml2/drivers/l2pop/rpc_manager/test_l2population_rpc.py
|
Python
|
apache-2.0
| 12,378
|
'''Slickbird base class for tests'''
import sys
import re
import os
import shutil
import tempfile
import json
import requests
from requests.utils import quote
from concurrent.futures import ThreadPoolExecutor
from tornado.concurrent import run_on_executor
from tornado import gen
from tornado.testing import AsyncHTTPTestCase
try:
import slickbird.web
except ImportError:
osp = os.path
APP_ROOT = osp.abspath(osp.join(osp.dirname(__file__), '..'))
sys.path.append(osp.join(APP_ROOT, '..'))
import slickbird.web
class TestSlickbirdBase(AsyncHTTPTestCase):
executor = ThreadPoolExecutor(max_workers=1)
def setUp(self):
self.db = tempfile.NamedTemporaryFile(delete=False)
self.home = tempfile.mkdtemp()
self.scanningdir = tempfile.mkdtemp()
AsyncHTTPTestCase.setUp(self)
def tearDown(self):
os.unlink(self.db.name)
shutil.rmtree(self.home, ignore_errors=True)
shutil.rmtree(self.scanningdir, ignore_errors=True)
shutil.rmtree(self.scanningdir, ignore_errors=True)
def get_app(self):
return slickbird.web.make_app(xsrf_cookies=False,
database='sqlite:///' + self.db.name,
autoreload=False,
home=self.home,
)
@run_on_executor
def collectionadd_bg(self, name, filename):
files = {'datfile': open(filename)}
data = {'name': name, 'directory': name}
return requests.post(self.get_url('/collection/add'),
data=data,
files=files)
@gen.coroutine
def collectionadd(self, name, filename):
addresp = yield self.collectionadd_bg(
name,
filename)
self.assertEqual(addresp.status_code, 200)
name = re.sub(r'''.*/collection/([^/]+)/list''',
'\\1',
addresp.url)
c = yield self.collectionget(name)
raise gen.Return(c)
@gen.coroutine
def collectionget(self, name, hidemissing=False):
cstatus = None
while cstatus != 'ready':
resp = yield self.http_client \
.fetch(self.get_url('/api/collection/{}.json?hidemissing={}'
.format(
quote(name),
str(hidemissing).lower())))
self.assertEqual(resp.code, 200)
c = json.loads(resp.body.decode('utf-8'))
cstatus = c['collection']['status']
raise gen.Return(c)
|
lpenz/slickbird
|
tests/base.py
|
Python
|
apache-2.0
| 2,667
|
# stdlib
from datetime import timedelta
from typing import Any
# third party
from fastapi import APIRouter
from fastapi import Body
from fastapi import HTTPException
from fastapi.responses import JSONResponse
from loguru import logger
# syft absolute
from syft import serialize # type: ignore
from syft.core.node.common.exceptions import InvalidCredentialsError
# grid absolute
from grid.core import security
from grid.core.config import settings
from grid.core.node import node
router = APIRouter()
@router.post("/login", name="login", status_code=200, response_class=JSONResponse)
def login_access_token(
email: str = Body(..., example="info@openmined.org"),
password: str = Body(..., example="changethis"),
) -> Any:
"""
You must pass valid credentials to log in. An account in any of the network
domains is sufficient for logging in.
"""
try:
node.users.login(email=email, password=password)
except InvalidCredentialsError as err:
logger.bind(payload={"email": email}).error(err)
raise HTTPException(status_code=401, detail="Incorrect email or password")
user = node.users.first(email=email)
access_token_expires = timedelta(minutes=settings.ACCESS_TOKEN_EXPIRE_MINUTES)
access_token = security.create_access_token(
user.id, expires_delta=access_token_expires
)
metadata = (
serialize(node.get_metadata_for_client())
.SerializeToString()
.decode("ISO-8859-1")
)
return {
"access_token": access_token,
"token_type": "bearer",
"metadata": metadata,
"key": user.private_key,
}
|
OpenMined/PySyft
|
packages/grid/backend/grid/api/auth/login.py
|
Python
|
apache-2.0
| 1,640
|
#Copyright 2013 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from twisted.trial import unittest
from mock import MagicMock
from callsign.restapi import (
RootResource,
DomainResource,
RecordResource,
MissingDomainResource,
ForbiddenDomainResource,
)
import socket
class TestRootResource(unittest.TestCase):
def setUp(self):
self.config = MagicMock()
self.dnsserver = MagicMock()
self.resource = RootResource(self.config, self.dnsserver)
def test_get(self):
self.dnsserver.zones = MagicMock(return_value=["foo", "bar"])
rv = self.resource.render_GET(None)
self.assertEqual(rv, "\n".join(["foo", "bar"]))
def test_getChild_exists(self):
self.config.get = MagicMock(return_value="")
zone = MagicMock()
def get_zone(x):
if x == "foo":
return zone
raise KeyError
self.dnsserver.get_zone.side_effect = get_zone
rv = self.resource.getChild("foo", None)
self.assert_(isinstance(rv, DomainResource))
self.assertEqual(rv.zone, zone)
rv = self.resource.getChild("bar", None)
self.assert_(isinstance(rv, MissingDomainResource))
self.assertEqual(rv.name, "bar")
def test_getChild_exists_with_lockdown(self):
self.config.get = MagicMock(return_value="foo bar")
zone = MagicMock()
def get_zone(x):
if x == "foo":
return zone
raise KeyError
self.dnsserver.get_zone.side_effect = get_zone
rv = self.resource.getChild("foo", None)
self.assert_(isinstance(rv, DomainResource))
self.assertEqual(rv.zone, zone)
rv = self.resource.getChild("bar", None)
self.assert_(isinstance(rv, MissingDomainResource))
self.assertEqual(rv.name, "bar")
rv = self.resource.getChild("baz", None)
self.assert_(isinstance(rv, ForbiddenDomainResource))
class TestDomainResource(unittest.TestCase):
def setUp(self):
self.zone = MagicMock()
self.dnsserver = MagicMock()
self.resource = DomainResource(self.zone, self.dnsserver)
def test_GET(self):
data = [
("A", "www", "192.168.0.1"),
("A", "x", "192.168.0.2"),
]
self.zone.a_records = MagicMock(return_value=data)
rv = self.resource.render_GET(None)
self.assertEqual(rv, "\n".join(["%s %s %s" % (x, y, z) for (x, y, z) in data]))
class TestMissingDomainResource(unittest.TestCase):
def setUp(self):
self.name = "foo"
self.dnsserver = MagicMock()
self.resource = MissingDomainResource(self.name, self.dnsserver)
def test_GET(self):
request = MagicMock()
self.resource.render_GET(request)
request.setResponseCode.assert_called_once_with(404)
def test_PUT(self):
request = MagicMock()
self.resource.render_PUT(request)
self.dnsserver.add_zone.assert_called_once_with(self.name)
request.setResponseCode.assert_called_once_with(201)
def test_HEAD(self):
request = MagicMock()
self.resource.render_GET(request)
request.setResponseCode.assert_called_once_with(404)
def test_DELETE(self):
request = MagicMock()
self.resource.render_GET(request)
request.setResponseCode.assert_called_once_with(404)
class TestRecordResource(unittest.TestCase):
def setUp(self):
self.name = "foo"
self.zone = MagicMock()
self.resource = RecordResource(self.name, self.zone)
def test_PUT(self):
request = MagicMock()
request.content.read.return_value = "A 192.168.0.1"
self.resource.render_PUT(request)
self.zone.set_record.assert_called_once_with(self.name, "192.168.0.1")
request.setResponseCode.assert_called_once_with(201)
def test_PUT_invalid_body(self):
request = MagicMock()
request.content.read.return_value = "wrong"
self.resource.render_PUT(request)
request.setResponseCode.assert_called_once_with(400, message=self.resource.err_invalid_body)
def test_PUT_wrong_record_type(self):
request = MagicMock()
request.content.read.return_value = "MX 192.168.0.1"
self.zone.set_record.return_value = (False, "foo")
self.resource.render_PUT(request)
request.setResponseCode.assert_called_once_with(400, message=self.resource.err_wrong_record_type)
def test_PUT_malformed(self):
request = MagicMock()
request.content.read.return_value = "A foo"
self.zone.set_record.side_effect = socket.error()
self.resource.render_PUT(request)
request.setResponseCode.assert_called_once_with(400, message=self.resource.err_malformed)
def test_DELETE(self):
request = MagicMock()
self.resource.render_DELETE(request)
self.zone.delete_record.assert_called_once_with(self.name)
request.setResponseCode.assert_called_once_with(204)
def test_DELETE_missing(self):
request = MagicMock()
self.zone.delete_record.side_effect = KeyError()
self.resource.render_DELETE(request)
self.zone.delete_record.assert_called_once_with(self.name)
request.setResponseCode.assert_called_once_with(404)
def test_GET(self):
self.zone.get_record.return_value = ("A", "192.168.0.1")
rv = self.resource.render_GET(None)
self.assertEqual(rv, "A 192.168.0.1")
|
yaybu/callsign
|
callsign/tests/test_restapi.py
|
Python
|
apache-2.0
| 6,051
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from __future__ import unicode_literals
try:
import unittest
from unittest import mock
except ImportError:
import mock
import unittest2 as unittest
from acos_client.v30.slb import port
class TestPort(unittest.TestCase):
def setUp(self):
self.client = mock.MagicMock()
self.port = port.Port(self.client)
# common test parameter(s) throughout all test-cases
self._server_name = 'test_server'
def test_create_port(self):
expected = {
'port': {
"stats-data-action": "stats-data-enable",
"weight": 1,
"port-number": 80,
"range": 0,
"action": "enable",
"protocol": 'tcp'
}
}
self.port.create('test_server', 80, 'tcp')
((method, url, params, header), kwargs) = self.client.http.request.call_args
self.assertEqual(method, 'POST')
self.assertEqual(url, '/axapi/v3/slb/server/%s/port/' % self._server_name)
self.assertEqual(params, expected)
def test_create_port_with_params(self):
expected = {
'port': {
"conn-resume": 500,
"conn-limit": 600,
"stats-data-action": "stats-data-disable",
"weight": 3,
"port-number": 80,
"range": 30,
"action": "disable-with-health-check",
"protocol": 'tcp'
}
}
self.port.create('test_server', 80, 'tcp', conn_resume=500, conn_limit=600,
stats_data_action="stats-data-disable", weight=3, range=30,
action="disable-with-health-check")
((method, url, params, header), kwargs) = self.client.http.request.call_args
self.assertEqual(method, 'POST')
self.assertEqual(url, '/axapi/v3/slb/server/%s/port/' % self._server_name)
self.assertEqual(params, expected)
def test_update_port(self):
expected = {
'port': {
"conn-resume": 500,
"conn-limit": 600,
"stats-data-action": "stats-data-disable",
"weight": 3,
"port-number": 80,
"range": 30,
"action": "disable-with-health-check",
"protocol": 'tcp'
}
}
self.port.update('test_server', 80, 'tcp', conn_resume=500, conn_limit=600,
stats_data_action="stats-data-disable", weight=3, range=30,
action="disable-with-health-check")
((method, url, params, header), kwargs) = self.client.http.request.call_args
self.assertEqual(method, 'PUT')
self.assertEqual(url, '/axapi/v3/slb/server/%s/port/%s+%s/' %
(self._server_name, 80, 'tcp'))
self.assertEqual(params, expected)
def test_delete_port(self):
self.port.delete('test_server', 80, 'tcp')
((method, url, params, header), kwargs) = self.client.http.request.call_args
self.assertEqual(method, 'DELETE')
self.assertEqual(url, '/axapi/v3/slb/server/%s/port/%s+%s/' %
(self._server_name, 80, 'tcp'))
|
a10networks/acos-client
|
acos_client/tests/unit/v30/test_port.py
|
Python
|
apache-2.0
| 3,881
|
# -*- encoding: utf-8 -*-
"""Pinpoints a file path given the __name__ of the calling module.
**Usage**
Example coded on home/repo/drawing_app/draw/smart_draw.py:
::
from pyghosts.ioioio.pinpoint import projectfile_fullpath
print(projectfile_fullpath(__file__))
>> home/repo/drawing_app/draw
print(projectfile_fullpath(__file__))
>> home/repo/drawing_app/draw
print(projectfile_fullpath(__file__))
>> home/repo/drawing_app/draw
print(projectfile_fullpath(__file__))
>> home/repo/drawing_app/draw
print(projectfile_fullpath(__file__))
>> home/repo/drawing_app/draw
print(projectfile_fullpath(__file__))
>> home/repo/drawing_app/draw
print(projectfile_fullpath(__file__))
>> home/repo/drawing_app/draw
"""
import os
def _is_yaml_folder(known_path):
"""Utility: Reports whether the known_path has a setup.yaml file."""
return os.path.exists(os.path.join(known_path, 'setup.yaml'))
def projectfile_fullpath(project_file):
"""A "projectfile" is the starting point. Expects the calling __file__.
>> home/repo/drawing_app/draw/smart_draw.py
>> home/repo/drawing_app/draw/smart_draw.py
"""
return os.path.realpath(project_file)
def projectfile_filename_ext(project_file):
"""The filename and file extension.
>> home/repo/drawing_app/draw/smart_draw.py
>> smart_draw.py
"""
_known_path = projectfile_fullpath(project_file)
return os.path.split(_known_path)[1]
def projectfile_filename(project_file):
"""The name of the file without extension.
>> home/repo/drawing_app/draw/smart_draw.py
>> smart_draw
"""
_filename_ext = projectfile_filename_ext(project_file)
return os.path.splitext(_filename_ext)[0]
def projectfile_ext(project_file):
"""The file extension.
>> home/repo/drawing_app/draw/smart_draw.py
>> .py
"""
_filename_ext = projectfile_filename_ext(project_file)
return os.path.splitext(_filename_ext)[1]
def projectfile_path(project_file):
"""Expects __file__.
>> home/repo/drawing_app/draw/smart_draw.py
>> home/repo/drawing_app/draw/
"""
_projectfile_filepath = projectfile_fullpath(project_file)
return os.path.split(_projectfile_filepath)[0]
def projectfile_folder(project_file):
"""The direct folder of __file__.
>> home/repo/drawing_app/draw/smart_draw.py
>> home/repo/drawing_app/draw/
"""
_projectfile_folder = projectfile_path(project_file)
return _projectfile_folder
def projectfile_apppath(project_file):
"""The python app folder of __file__.
IE: The one below yaml.
>> home/repo/drawing_app/draw/smart_draw.py
>> home/repo/drawing_app/draw/
"""
_known_path = projectfile_folder(project_file)
while not _is_yaml_folder(os.path.split(_known_path)[0]):
"""Keep climbing up the known_path until you find the "yaml" file.
Assumes your app folders are children of a folder with yaml.
"""
_known_path = os.path.split(_known_path)[0]
return _known_path
def projectfile_projectpath(project_file):
"""The python project folder of __file__.
IE: Parent of `projectfile_apppath`.
>> home/repo/drawing_app/draw/smart_draw.py
>> home/repo/drawing_app/
"""
_app_path = projectfile_apppath(project_file)
return os.path.split(_app_path)[0]
def project_path():
"""The python project folder, whatever you are using.
>> home/repo/drawing_app/draw/smart_draw.py
>> home/repo/drawing_app/
"""
return os.getcwd()
|
timitee/pyghosts
|
ghosts/ioioio/pinpoint.py
|
Python
|
apache-2.0
| 3,615
|
import json
class ConfigHelper(object):
@staticmethod
def load_json_data(config_file):
with open(config_file, 'r') as reader:
json_data = json.loads(reader.read())
reader.close()
return json_data
@staticmethod
def update_config(json_data, config_file):
with open(config_file, "w") as writer:
writer.write(json.dumps(json_data, indent=4, sort_keys=True))
writer.close()
|
ShifuML/pyshifu
|
pyshifu/util/config_helper.py
|
Python
|
apache-2.0
| 452
|
import os
import time
import docker
import requests
from docker.models.containers import Container
from requests import Response
from ..utils import (
CONTAINER_NAME,
get_logs,
get_nginx_config,
get_response_text1,
remove_previous_container,
)
client = docker.from_env()
def verify_container(container: Container, response_text: str) -> None:
nginx_config = get_nginx_config(container)
assert "client_max_body_size 1m;" in nginx_config
assert "worker_processes 2;" in nginx_config
assert "listen 80;" in nginx_config
assert "worker_connections 2048;" in nginx_config
assert "worker_rlimit_nofile 2048;" in nginx_config
assert "daemon off;" in nginx_config
assert "listen 80;" in nginx_config
assert "include uwsgi_params;" in nginx_config
assert "uwsgi_pass unix:///tmp/uwsgi.sock;" in nginx_config
assert "try_files $uri @app;" in nginx_config
assert "location @app {" in nginx_config
assert "include uwsgi_params;" in nginx_config
assert "location /static {" in nginx_config
assert "alias /app/static;" in nginx_config
# Nginx index.html specific
assert "location = / {" not in nginx_config
assert "index /static/index.html;" not in nginx_config
logs = get_logs(container)
assert "getting INI configuration from /app/uwsgi.ini" in logs
assert "getting INI configuration from /etc/uwsgi/uwsgi.ini" in logs
assert "ini = /app/uwsgi.ini" in logs
assert "ini = /etc/uwsgi/uwsgi.ini" in logs
assert "socket = /tmp/uwsgi.sock" in logs
assert "chown-socket = nginx:nginx" in logs
assert "chmod-socket = 664" in logs
assert "hook-master-start = unix_signal:15 gracefully_kill_them_all" in logs
assert "need-app = true" in logs
assert "die-on-term = true" in logs
assert "show-config = true" in logs
assert "module = main" in logs
assert "callable = app" in logs
assert "processes = 8" in logs
assert "cheaper = 3" in logs
assert "spawned uWSGI master process" in logs
assert "spawned uWSGI worker 1" in logs
assert "spawned uWSGI worker 2" in logs
assert "spawned uWSGI worker 3" in logs
assert "spawned uWSGI worker 4" not in logs
assert 'running "unix_signal:15 gracefully_kill_them_all" (master-start)' in logs
assert "success: nginx entered RUNNING state, process has stayed up for" in logs
assert "success: uwsgi entered RUNNING state, process has stayed up for" in logs
assert "Checking for script in /app/prestart.sh" in logs
assert "Running script /app/prestart.sh" in logs
assert (
"Running inside /app/prestart.sh, you could add migrations to this file" in logs
)
response: Response = requests.get("http://127.0.0.1:8000")
assert response.status_code == 200
assert response.text == response_text
def test_defaults() -> None:
name = os.getenv("NAME", "")
image = f"tiangolo/uwsgi-nginx-flask:{name}"
response_text = get_response_text1()
sleep_time = int(os.getenv("SLEEP_TIME", 3))
remove_previous_container(client)
container = client.containers.run(
image,
name=CONTAINER_NAME,
environment={
"UWSGI_CHEAPER": 3,
"UWSGI_PROCESSES": 8,
"NGINX_MAX_UPLOAD": "1m",
"NGINX_WORKER_PROCESSES": 2,
"NGINX_WORKER_CONNECTIONS": 2048,
"NGINX_WORKER_OPEN_FILES": 2048,
},
ports={"80": "8000"},
detach=True,
)
time.sleep(sleep_time)
verify_container(container, response_text)
container.stop()
# Test that everything works after restarting too
container.start()
time.sleep(sleep_time)
verify_container(container, response_text)
container.stop()
container.remove()
|
tiangolo/uwsgi-nginx-flask-docker
|
tests/test_01_main/test_env_vars_03.py
|
Python
|
apache-2.0
| 3,769
|
#!/usr/bin/env python2.7
#####################################################################
#
# Copyright 2015 Mayur Patel
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#####################################################################
import dirb.ds as ds
import dirb.localclient as localclient
import dirb.sexpr as sexpr
import dirb.pathexpr as pathexpr
import unittest
import os
# ==========================================
class SimpleSexprTest(unittest.TestCase):
# ----------------------------------------
def test_identity( self ):
e = "( and (bookmark alpha) (parameter (key value) (key value) (key value)) )"
self.assertEqual( sexpr.loads( e ), sexpr.loads(sexpr.dumps( sexpr.loads( e ))) )
self.assertEqual( sexpr.loads( e ), ['and', ['bookmark', 'alpha'], ['parameter', ['key','value'], ['key','value'],['key','value']]] )
def test_escape_bracket( self ):
e = r'("(name)" in bracket)'
self.assertEqual( sexpr.loads( e ), ['(name)', 'in', 'bracket'] )
def test_bracket( self ):
e = r'(\(name\) in bracket)'
self.assertEqual( sexpr.loads( e ), ['\\', ['name\\'], 'in', 'bracket'] )
def test_quote( self ):
e = '("(name) (value)\"\"" token2)'
self.assertEqual( sexpr.loads( e ), ['(name) (value)\"\"', 'token2'] )
# ==========================================
# /<show>/sequence/<sequence>/<shot>/<dept>
# /<show>/asset/<assettype>/<asset>/<dept>
class SimpleLocalClientTest(unittest.TestCase):
def setUp(self):
self.dirlist = (
'/tmp/dirbtest1/projects/',
'/tmp/dirbtest1/projects/show',
'/tmp/dirbtest1/projects/show/asset',
'/tmp/dirbtest1/projects/show/asset/vehicle',
'/tmp/dirbtest1/projects/show/asset/vehicle/car1',
'/tmp/dirbtest1/projects/show/asset/vehicle/car1/lighting',
'/tmp/dirbtest1/projects/show/asset/vehicle/car1/dontfind',
'/tmp/dirbtest1/projects/show/sequence',
'/tmp/dirbtest1/projects/show/sequence/aa',
'/tmp/dirbtest1/projects/show/sequence/aa/xx',
'/tmp/dirbtest1/projects/show/sequence/bb',
'/tmp/dirbtest1/projects/show/sequence/bb/xx',
'/tmp/dirbtest1/projects/show/sequence/bb/xx/animation',
'/tmp/dirbtest1/projects/show/sequence/bb/xx/lighting',
'/tmp/dirbtest1/projects/show/sequence/bb/xx/dontfind',
'/tmp/dirbtest1/projects/show/sequence/bb/yy',
'/tmp/dirbtest1/projects/show/sequence/bb/zz',
'/tmp/dirbtest1/projects/show/sequence/cc'
)
self.doc = ds.compile_dir_structure( {
'collections' : {"department":["animation","lighting"], "app":['katana','maya']},
'rules' : {
'ROOT' : [
['ParameterizedLevel', { "bookmarks":["showroot"], "key":'show'}],
['BranchLevel', {"rules":["sequence","asset"]}],
],
'sequence' :[
['FixedLevel', {"name":'sequence'}],
['ParameterizedLevel', { "key":'sequence'}],
['ParameterizedLevel', { "key":'shot', "bookmarks":['shotroot']}],
['ParameterizedLevel', { "key":'dept', "collection":"department", 'bookmarks':['workarea']}]
],
'asset' : [
['FixedLevel', {"name":'asset'}],
['ParameterizedLevel', { "key":'assettype'}],
['ParameterizedLevel', { "key":'asset', 'bookmarks':['assetroot']}],
['ParameterizedLevel', { "key":'dept', "collection":"department", 'bookmarks':['workarea']}]
]
}
} )
self.d = localclient.LocalClient( self.doc, "/tmp/dirbtest1/projects" )
for d in self.dirlist:
if not os.path.isdir( d ):
os.makedirs( d )
# ----------------------------------------
def test_simple_search(self):
class ShotSearcher( object ) :
def __init__( self ) :
self.hold = []
def does_intersect_rule( self, rulectx ):
return 'shotroot' in rulectx.bookmarks
def does_intersect_path( self, pathctx ):
return True
def test( self, pathctx, levelctx ):
ret = 'shotroot' in levelctx.bookmarks
if ret :
self.hold.append( pathctx.path )
return ret
def do_existing_paths( self ):
return False
def get_parameters( self, key, levelctx, pathctxlist ) :
if key == "sequence" :
return ("SEQUENCE",)
if key == "shot" :
return ("SHOT",)
if key == "show" :
return ("SHOW",)
if key == 'dept':
return ( "animation","lighting" )
return []
s = ShotSearcher()
self.d.traverse( s )
self.assertEqual(s.hold, ['/tmp/dirbtest1/projects/SHOW/sequence/SEQUENCE/SHOT'])
# ----------------------------------------
def test_bookmark_names(self):
bookmarks = set( self.d.get_bookmark_names() )
expected = set(('showroot','shotroot','assetroot','workarea'))
self.assertEqual(bookmarks, expected)
# ----------------------------------------
def test_bookmark_parameters(self):
found = self.d.get_bookmark_parameters('workarea')
found = sorted( [ sorted(x.items()) for x in found ] )
expected = [{'dept': 'department', 'show': None, 'shot': None, 'sequence': None}, {'dept': 'department', 'show': None, 'asset': None, 'assettype': None}]
expected = sorted( [ sorted( x.items() ) for x in expected ] )
self.assertEqual(found, expected)
# ----------------------------------------
def test_search_paths_and(self):
searchexpr = '(and (bookmark shotroot) (parameters (show show)(shot xx)(sequence bb)))'
foundlist = self.d.search_paths( searchexpr )
self.assertEqual( len(foundlist), 1 )
pathctx = foundlist[0]
self.assertEqual( pathctx.path, '/tmp/dirbtest1/projects/show/sequence/bb/xx' )
self.assertEqual( pathctx.parameters, {'show': 'show', 'shot': 'xx', 'sequence': 'bb'} )
self.assertEqual( pathctx.bookmarks, ['shotroot'] )
# ----------------------------------------
def test_search_paths_multifinder_parameters(self):
searchexpr = '(parameters (show show)(shot xx)(sequence bb))'
foundlist = self.d.search_paths( searchexpr )
foundlist = set( x.path for x in foundlist )
expected = set((
'/tmp/dirbtest1/projects/show/sequence/bb/xx/animation',
'/tmp/dirbtest1/projects/show/sequence/bb/xx/lighting',
'/tmp/dirbtest1/projects/show/sequence/bb/xx',
'/tmp/dirbtest1/projects/show/sequence/bb',
'/tmp/dirbtest1/projects/show/sequence',
'/tmp/dirbtest1/projects/show' ))
self.assertEqual( foundlist, expected )
# ----------------------------------------
def test_search_paths_andor(self):
searchexpr = '(and (bookmark workarea) (or (parameters (sequence bb))(parameters (asset car1))))'
foundlist = self.d.search_paths( searchexpr )
foundlist = set( x.path for x in foundlist )
expected = set((
'/tmp/dirbtest1/projects/show/asset/vehicle/car1/lighting',
'/tmp/dirbtest1/projects/show/sequence/bb/xx/animation',
'/tmp/dirbtest1/projects/show/sequence/bb/xx/lighting'))
self.assertEqual( foundlist, expected )
# ----------------------------------------
def test_search_paths_multifinder_bookmarks(self):
searchexpr = '(bookmark shotroot)'
foundlist = self.d.search_paths( searchexpr )
foundlist = set( x.path for x in foundlist )
expected = set((
'/tmp/dirbtest1/projects/show/sequence/aa/xx',
'/tmp/dirbtest1/projects/show/sequence/bb/xx',
'/tmp/dirbtest1/projects/show/sequence/bb/yy',
'/tmp/dirbtest1/projects/show/sequence/bb/zz'))
self.assertEqual( foundlist, expected )
# ----------------------------------------
def test_parameter_collect_parameter(self):
found = pathexpr.create_parameter_collect( sexpr.loads( "(parameters (key1 value1) (key2 value2))" ))
expected = {'key2': ('value2',), 'key1': ('value1',)}
self.assertEqual( found, expected )
# ----------------------------------------
def test_parameter_collect_and(self):
found = pathexpr.create_parameter_collect( sexpr.loads( "(and (parameters (key1 value1)) (parameters (key1 value1) (key2 value2)))" ))
self.assertEqual( set(found['key1']), set(('value1',)) )
self.assertEqual( set(found.keys()), set(('key1',)) )
# ----------------------------------------
def test_parameter_collect_or(self):
found = pathexpr.create_parameter_collect( sexpr.loads( "(or (parameters (key1 value1)) (parameters (key2 value2)))" ))
self.assertEqual( set(found['key1']), set(('value1',)) )
self.assertEqual( set(found['key2']), set(('value2',)) )
self.assertEqual( set(found.keys()), set(('key1','key2')) )
# ----------------------------------------
def test_depict_paths_rootonly(self):
searchexpr = '(parameters (show SHOW))'
foundlist = self.d.depict_paths( searchexpr )
foundlist = set( x.path for x in foundlist )
expected = set((
'/tmp/dirbtest1/projects/SHOW',))
self.assertEqual( foundlist, expected )
# ----------------------------------------
def test_depict_paths_collect_exception(self):
searchexpr = '(parameters (show SHOW) (sequence SEQUENCE) (shot SHOT) (dept DEPT))'
# this is not a valid path specification, because DEPT is not in the 'department' collection.
self.assertRaises( KeyError, self.d.depict_paths, searchexpr )
# ----------------------------------------
def test_depict_paths_multiparam_multidir(self):
searchexpr = '(parameters (show SHOW) (sequence SEQUENCE) (shot SHOT) (dept animation))'
# "open" parameterizations like this will build the entire ancestor hierarchy
foundlist = self.d.depict_paths( searchexpr )
foundlist = set( x.path for x in foundlist )
expected = set((
'/tmp/dirbtest1/projects/SHOW/sequence/SEQUENCE/SHOT',
'/tmp/dirbtest1/projects/SHOW/asset',
'/tmp/dirbtest1/projects/SHOW',
'/tmp/dirbtest1/projects/SHOW/sequence',
'/tmp/dirbtest1/projects/SHOW/sequence/SEQUENCE',
'/tmp/dirbtest1/projects/SHOW/sequence/SEQUENCE/SHOT/animation'))
self.assertEqual( foundlist, expected )
# ----------------------------------------
def test_depict_paths_multiparam_bookmark(self):
searchexpr = '(and (bookmark workarea) (parameters (show SHOW) (sequence SEQUENCE) (shot SHOT) (dept animation)))'
# the bookmark forces only workareas, not the entire hierarchy up to the parameterized leaf.
foundlist = self.d.depict_paths( searchexpr )
foundlist = set( x.path for x in foundlist )
expected = set((
'/tmp/dirbtest1/projects/SHOW/sequence/SEQUENCE/SHOT/animation',))
self.assertEqual( foundlist, expected )
# ----------------------------------------
def test_depict_paths_andor(self):
searchexpr = "(and (bookmark workarea) (or (parameters (sequence SEQUENCE) (shot SHOT))(parameters (assettype TYPE) (asset ASSET))(parameters (show SHOW) (dept lighting))))"
# the bookmark forces only workareas, not the entire hierarchy up to the parameterized leaf.
foundlist = self.d.depict_paths( searchexpr )
foundlist = set( x.path for x in foundlist )
expected = set((
'/tmp/dirbtest1/projects/SHOW/sequence/SEQUENCE/SHOT/lighting',
'/tmp/dirbtest1/projects/SHOW/asset/TYPE/ASSET/lighting'))
self.assertEqual( foundlist, expected )
# ----------------------------------------
def test_get_path_context_realpath( self ):
targetpath = '/tmp/dirbtest1/projects/show/asset/vehicle/car1/lighting'
found = self.d.get_path_context( targetpath )
expected = set( {'dept': 'lighting', 'assettype': 'vehicle', 'asset': 'car1', 'show': 'show'}.items() )
self.assertEqual( found.path, targetpath )
self.assertEqual( set(found.parameters.items()), expected )
# ----------------------------------------
def test_get_path_context_realpath2( self ):
targetpath = '/tmp/dirbtest1/projects/show/sequence/bb'
found = self.d.get_path_context( targetpath )
expected = set( {'sequence': 'bb', 'show': 'show'}.items() )
self.assertEqual( found.path, targetpath )
self.assertEqual( set(found.parameters.items()), expected )
# ----------------------------------------
def test_get_path_context_depictedpath( self ):
targetpath = '/tmp/dirbtest1/projects/newshow/asset/character/bigguy/animation'
# this targetpath does not actually exist on disk, but can still be interrogated
found = self.d.get_path_context( targetpath )
expected = set( {'dept': 'animation', 'assettype': 'character', 'asset': 'bigguy', 'show': 'newshow'}.items() )
self.assertEqual( found.path, targetpath )
self.assertEqual( set(found.parameters.items()), expected )
# ----------------------------------------
def test_get_path_context_depictedfilename( self ):
targetpath = '/tmp/dirbtest1/projects/SHOW/sequence/SEQUENCE/SHOT/animation/application/scenes/filename.scene'
# it is okay to go deeper than the directory structure understands, it will return the deepest context it knows
found = self.d.get_path_context( targetpath )
expected = set( {'dept': 'animation', 'sequence': 'SEQUENCE', 'shot': 'SHOT', 'show': 'SHOW'}.items() )
self.assertEqual( found.path, '/tmp/dirbtest1/projects/SHOW/sequence/SEQUENCE/SHOT/animation' )
self.assertEqual( set(found.parameters.items()), expected )
# ----------------------------------------
def test_get_path_context_depictedpath_badcollection( self ):
targetpath = '/tmp/dirbtest1/projects/falseshow/asset/set/castle/infantry'
# department value in this targetpath is not a member of the department collection
self.assertRaises( KeyError, self.d.get_path_context, targetpath )
# ----------------------------------------
def test_get_path_context_shallow( self ):
targetpath = '/tmp/dirbtest1/projects/SHOW/editorial/workarea'
# targetpath is not compatible with this directory structure
found = self.d.get_path_context( targetpath )
self.assertEqual( found.path, '/tmp/dirbtest1/projects/SHOW' )
# ----------------------------------------
def test_get_path_context_notvalidpath( self ):
targetpath = '/tmp/dirbtest1/thing/SHOW'
# targetpath is not compatible with this directory structure
found = self.d.get_path_context( targetpath )
self.assertEqual( found, None )
# ----------------------------------------
def test_get_frontier_contexts_root( self ):
targetpath = '/tmp/dirbtest1/projects'
found = self.d.get_frontier_contexts( targetpath )
expected_keys = ["show"]
expected_parameters = {'show':'show'}
self.assertEqual( set(found.keys()), set(expected_keys) )
self.assertEqual( len( found['show'] ), 1 )
self.assertEqual( found['show'][0].parameters, expected_parameters )
# ----------------------------------------
def test_get_frontier_contexts_cluster( self ):
targetpath = '/tmp/dirbtest1/projects/show/sequence'
found = self.d.get_frontier_contexts( targetpath )
expected_keys = ["sequence"]
expected_parameters = set(['aa','bb','cc'])
self.assertEqual( set(found.keys()), set(expected_keys) )
self.assertEqual( len( found['sequence'] ), len(expected_parameters) )
found_parameters = set( i.parameters['sequence'] for i in found['sequence'] )
self.assertEqual( set(found_parameters), expected_parameters )
# ----------------------------------------
def test_get_frontier_contexts_branch( self ):
targetpath = '/tmp/dirbtest1/projects/show'
found = self.d.get_frontier_contexts( targetpath )
expected_keys = set(["sequence",'assettype'])
expected_parameters = set(['aa','bb','cc'])
self.assertEqual( set(found.keys()), expected_keys )
self.assertEqual( len( found['sequence'] ), len(expected_parameters) )
found_parameters = set( i.parameters['sequence'] for i in found['sequence'] )
self.assertEqual( set(found_parameters), expected_parameters )
expected_parameters = set(['vehicle'])
self.assertEqual( len( found['assettype'] ), len(expected_parameters) )
found_parameters = set( i.parameters['assettype'] for i in found['assettype'] )
self.assertEqual( set(found_parameters), expected_parameters )
# ----------------------------------------
def tearDown(self):
# TODO should we remove the directories we created?
pass
# ==========================================
class SimplePermissionsTest(unittest.TestCase):
def setUp(self):
self.doc = ds.compile_dir_structure( {
'collections' : {"datatype":["caches","scenes","images"], 'assettype':['character','prop','vehicle','set']},
'rules' : {
'ROOT' : [
['BranchLevel', {'rules':['assets','shots']}],
],
'shots' : [
['ParameterizedLevel', { "key":'datatype', "collection":"datatype", 'user':'root', 'group':'root', 'permissions':'rwxr-xr-x'}],
['ParameterizedLevel', { "key":'show'}],
['ParameterizedLevel', { "key":'sequence'}],
['ParameterizedLevel', { "key":'shot', 'bookmarks':['shotroot']}],
['ParameterizedLevel', { "key":'user', 'bookmarks':['workarea'], 'user':'(parameter user)', 'group':'shotdept', 'permissions':'rwxr-x---' }]
],
'assets' :[
['FixedLevel', {"name":'assets', 'user':'root', 'group':'root', 'permissions':'rwxr-xr-x'}],
['ParameterizedLevel', { "key":'show', 'group':'assetdept'}],
['ParameterizedLevel', { "key":'assettype', 'collection':'assettype'}],
['ParameterizedLevel', { "key":'assetname', 'bookmarks':['assetroot'] }],
['ParameterizedLevel', { "key":'user', 'bookmarks':['workarea'], 'user':'(parameter user)', 'permissions':'rwxr-x---' }]
]
}
} )
self.d = localclient.LocalClient( self.doc, "/tmp/dirbtest1/projects" )
# ----------------------------------------
def test_simple_depict1(self):
createexpr = '(and (bookmark workarea) (parameters (show diehard)(assettype vehicle)(assetname gunshipA)(user bwillis)))'
foundlist = self.d.depict_paths( createexpr )
self.assertEqual( 1, len(foundlist) )
expected = { 'attributes':{}, 'parameters':{'assetname': 'gunshipA', 'assettype': 'vehicle', 'user': 'bwillis', 'show': 'diehard'}, 'path':'/tmp/dirbtest1/projects/assets/diehard/vehicle/gunshipA/bwillis', 'collections':{'assettype': 'assettype'}, 'user':'bwillis', 'group':'assetdept', 'permissions':488 }
found = foundlist[0]
self.assertEqual( found.attributes, expected['attributes'] )
self.assertEqual( found.parameters, expected['parameters'] )
self.assertEqual( found.path, expected['path'] )
self.assertEqual( found.collections, expected['collections'] )
self.assertEqual( found.user, expected['user'] )
self.assertEqual( found.group, expected['group'] )
self.assertEqual( found.permissions, expected['permissions'] )
# ----------------------------------------
def test_simple_depict2(self):
createexpr = '(and (bookmark workarea) (parameters (datatype caches)(show diehard)(sequence QQQ)(shot TTT)(user bwillis)))'
foundlist = self.d.depict_paths( createexpr )
self.assertEqual( 1, len(foundlist) )
expected = { 'attributes':{}, 'parameters':{'datatype': 'caches', 'sequence': 'QQQ', 'shot': 'TTT', 'user': 'bwillis', 'show': 'diehard'}, 'path':'/tmp/dirbtest1/projects/caches/diehard/QQQ/TTT/bwillis', 'collections':{'datatype': 'datatype'}, 'user':'bwillis', 'group':'shotdept', 'permissions':488 }
found = foundlist[0]
self.assertEqual( found.attributes, expected['attributes'] )
self.assertEqual( found.parameters, expected['parameters'] )
self.assertEqual( found.path, expected['path'] )
self.assertEqual( found.collections, expected['collections'] )
self.assertEqual( found.user, expected['user'] )
self.assertEqual( found.group, expected['group'] )
self.assertEqual( found.permissions, expected['permissions'] )
# ----------------------------------------
def test_simple_depict3(self):
createexpr = '(and (bookmark shotroot) (parameters (datatype images)(show dh2)(sequence qqq)(shot ttt)(user arickman)))'
foundlist = self.d.depict_paths( createexpr )
self.assertEqual( 1, len(foundlist) )
expected = { 'attributes':{}, 'parameters':{'datatype': 'images', 'show': 'dh2', 'shot': 'ttt', 'sequence': 'qqq'}, 'path':'/tmp/dirbtest1/projects/images/dh2/qqq/ttt', 'collections':{'datatype': 'datatype'}, 'user':'root', 'group':'root', 'permissions':493 }
found = foundlist[0]
self.assertEqual( found.attributes, expected['attributes'] )
self.assertEqual( found.parameters, expected['parameters'] )
self.assertEqual( found.path, expected['path'] )
self.assertEqual( found.collections, expected['collections'] )
self.assertEqual( found.user, expected['user'] )
self.assertEqual( found.group, expected['group'] )
self.assertEqual( found.permissions, expected['permissions'] )
# ----------------------------------------
def tearDown(self):
pass
# ==========================================
class SimpleFormattedLevelTest(unittest.TestCase):
def setUp(self):
self.doc = ds.compile_dir_structure( {
'collections' : {"datatype":["caches","scenes","images"], 'assettype':['chr','prp','veh','set']},
'rules' : {
'ROOT' : [
['ParameterizedLevel', { "key":'show'}],
['BranchLevel', {'rules':['assets','shots']}],
],
'shots' : [
['ParameterizedLevel', { "key":'datatype', "collection":"datatype"}],
['FormattedLevel', { 'format': "seq_{}", "keys":['sequence'], 'bookmarks':['sequenceroot']}],
['FormattedLevel', { 'format': "shot_{}", "keys":['shot'] , 'bookmarks':['shotroot']}],
['ParameterizedLevel', { "key":'user', 'bookmarks':['shotarea'] }]
],
'assets' :[
['FixedLevel', {"name":'assets'}],
['FormattedLevel', { 'format':'{}_{}', 'keys':['assettype','assetname'], 'bookmarks':['assetroot'], 'collections':{ 'assettype':'assettype'} } ],
['ParameterizedLevel', { "key":'user', 'bookmarks':['assetarea'], }]
]
}
} )
self.rootdir = "/tmp/dirbtest2/projects"
self.d = localclient.LocalClient( self.doc, self.rootdir )
self.dirlist = (
"diehard/caches/seq_0001/shot_0003/johnm/",
"diehard/caches/seq_0001/shot_0007/johnm/",
"diehard/scenes/seq_0001/shot_0003/johnm/",
"diehard/scenes/seq_0002/shot_0012/hansg/",
"diehard/images/seq_0001/shot_0003/johnm/",
"diehard/dontfind/seq_0001/shot_0003/johnm/",
"diehard/assets/chr_partypal/johnm/",
"diehard/assets/chr_eurotrash/hansg/",
"diehard/assets/prp_ducttape/johnm",
"diehard/assets/veh_gunship/johnson",
"diehard/assets/dont_find/johnm"
)
for d in self.dirlist:
if not os.path.isdir( os.path.join( self.rootdir, d) ):
os.makedirs( os.path.join( self.rootdir, d) )
# ----------------------------------------
def test_simple_search1(self):
searchexpr = '(and (bookmark shotarea) (parameters (show diehard)(datatype caches)(sequence 0001)(shot 0007)(user johnm)))'
foundlist = self.d.search_paths( searchexpr )
expected = ( '/tmp/dirbtest2/projects/diehard/caches/seq_0001/shot_0007/johnm',)
self.assertEqual( set(expected), set( x.path for x in foundlist ) )
# ----------------------------------------
def test_simple_search2(self):
searchexpr = '(and (bookmark assetarea) (parameters (user johnm)))'
foundlist = self.d.search_paths( searchexpr )
expected = ( '/tmp/dirbtest2/projects/diehard/assets/chr_partypal/johnm','/tmp/dirbtest2/projects/diehard/assets/prp_ducttape/johnm')
self.assertEqual( set(expected), set( x.path for x in foundlist ) )
# ----------------------------------------
def tearDown(self):
pass
# ==========================================
class SimpleNegativeOpsTest(unittest.TestCase):
def setUp(self):
self.doc = ds.compile_dir_structure( {
'collections' : {"datatype":["caches","scenes","images"], 'assettype':['chr','prp','veh','set']},
'rules' : {
'ROOT' : [
['ParameterizedLevel', { "key":'show'}],
['BranchLevel', {'rules':['assets','shots']}],
],
'shots' : [
['ParameterizedLevel', { "key":'datatype', "collection":"datatype", "treeattributes":{'subtree':'shots'}}],
['FormattedLevel', { 'format': "seq_{}", "keys":['sequence'], 'bookmarks':['sequenceroot']}],
['FormattedLevel', { 'format': "shot_{}", "keys":['shot'] , 'bookmarks':['shotroot']}],
['ParameterizedLevel', { "key":'user', 'bookmarks':['workarea'] }]
],
'assets' :[
['FixedLevel', {"name":'assets', 'treeattributes':{'subtree':'assets'}}],
['FormattedLevel', { 'format':'{}_{}', 'keys':['assettype','assetname'], 'bookmarks':['assetroot'], 'collections':{ 'assettype':'assettype'} } ],
['ParameterizedLevel', { "key":'user', 'bookmarks':['workarea'], }]
]
}
} )
self.rootdir = "/tmp/dirbtest3/projects"
self.d = localclient.LocalClient( self.doc, self.rootdir )
self.dirlist = (
"diehard/caches/seq_0001/shot_0003/johnm/",
"diehard/caches/seq_0001/shot_0007/johnm/",
"diehard/scenes/seq_0001/shot_0003/johnm/",
"diehard/scenes/seq_0002/shot_0012/hansg/",
"diehard/images/seq_0001/shot_0003/johnm/",
"diehard/dontfind/seq_0001/shot_0003/johnm/",
"diehard/assets/chr_partypal/johnm/",
"diehard/assets/chr_eurotrash/hansg/",
"diehard/assets/prp_ducttape/johnm",
"diehard/assets/veh_gunship/johnson",
"diehard/assets/dont_find/johnm"
)
for d in self.dirlist:
if not os.path.isdir( os.path.join( self.rootdir, d) ):
os.makedirs( os.path.join( self.rootdir, d) )
# ----------------------------------------
def test_simple_search1(self):
searchexpr = '(and (bookmark workarea)(parameters (user johnm)))'
foundlist = self.d.search_paths( searchexpr )
expected = (
'/tmp/dirbtest3/projects/diehard/assets/chr_partypal/johnm',
'/tmp/dirbtest3/projects/diehard/assets/prp_ducttape/johnm',
'/tmp/dirbtest3/projects/diehard/caches/seq_0001/shot_0007/johnm',
'/tmp/dirbtest3/projects/diehard/caches/seq_0001/shot_0003/johnm',
'/tmp/dirbtest3/projects/diehard/scenes/seq_0001/shot_0003/johnm',
'/tmp/dirbtest3/projects/diehard/images/seq_0001/shot_0003/johnm'
)
self.assertEqual( set(expected), set( x.path for x in foundlist ) )
# ----------------------------------------
def test_simple_notparameter1(self):
searchexpr = '(and (bookmark workarea)(-parameters (user johnm)))'
foundlist = self.d.search_paths( searchexpr )
expected = (
"/tmp/dirbtest3/projects/diehard/scenes/seq_0002/shot_0012/hansg",
"/tmp/dirbtest3/projects/diehard/assets/chr_eurotrash/hansg",
"/tmp/dirbtest3/projects/diehard/assets/veh_gunship/johnson"
)
self.assertEqual( set(expected), set( x.path for x in foundlist ) )
# ----------------------------------------
def test_simple_notparameter2(self):
searchexpr = '(and (bookmark workarea) (-parameters (sequence 0001)) (parameters (user johnm)))'
foundlist = self.d.search_paths( searchexpr )
expected = (
'/tmp/dirbtest3/projects/diehard/assets/chr_partypal/johnm',
'/tmp/dirbtest3/projects/diehard/assets/prp_ducttape/johnm'
)
self.assertEqual( set(expected), set( x.path for x in foundlist ) )
# ----------------------------------------
def test_simple_notattribute1(self):
searchexpr = '(and (bookmark workarea)(-attributes (subtree shots)))'
foundlist = self.d.search_paths( searchexpr )
expected = (
"/tmp/dirbtest3/projects/diehard/assets/chr_eurotrash/hansg",
"/tmp/dirbtest3/projects/diehard/assets/chr_partypal/johnm",
"/tmp/dirbtest3/projects/diehard/assets/prp_ducttape/johnm",
"/tmp/dirbtest3/projects/diehard/assets/veh_gunship/johnson"
)
self.assertEqual( set(expected), set( x.path for x in foundlist ) )
# ----------------------------------------
def test_simple_notattribute2(self):
searchexpr = '(and (bookmark workarea)(-parameters (user johnm))(-attributes (subtree assets)))'
foundlist = self.d.search_paths( searchexpr )
expected = (
"/tmp/dirbtest3/projects/diehard/scenes/seq_0002/shot_0012/hansg",
)
self.assertEqual( set(expected), set( x.path for x in foundlist ) )
# ----------------------------------------
def test_simple_notattribute3(self):
searchexpr = '(and (bookmark workarea)(-parameters (assettype chr)(user johnm))(-attributes (subtree shots)))'
foundlist = self.d.search_paths( searchexpr )
expected = (
"/tmp/dirbtest3/projects/diehard/assets/veh_gunship/johnson",
)
self.assertEqual( set(expected), set( x.path for x in foundlist ) )
# ----------------------------------------
def test_simple_notbookmark1(self):
searchexpr = '(and (-bookmark workarea)(attributes (subtree shots))(parameters (datatype caches)(user johnm)))'
foundlist = self.d.search_paths( searchexpr )
expected = (
'/tmp/dirbtest3/projects/diehard/caches',
'/tmp/dirbtest3/projects/diehard/caches/seq_0001/shot_0007',
'/tmp/dirbtest3/projects/diehard',
'/tmp/dirbtest3/projects/diehard/caches/seq_0001/shot_0003',
'/tmp/dirbtest3/projects/diehard/caches/seq_0001'
)
self.assertEqual( set(expected), set( x.path for x in foundlist ) )
# ----------------------------------------
def tearDown(self):
pass
# ==========================================
class SimpleGlobTest1(unittest.TestCase):
def setUp(self):
self.dirlist = (
'show/veh_car1/lighting',
'show/chr_bob/lighting',
'show/dontfind_blank/lighting',
'show/100x140/animation',
'show/100x140/lighting',
'show/100x140/dontfind',
'show/110x360/lighting',
'show/110x360/animation',
'show/110x570/animation',
'show/350x220/animation'
)
self.doc = ds.compile_dir_structure( {
'collections' : {"department":["animation","lighting"], 'assettype':['veh','chr','set','prp']},
'rules' : {
'ROOT' : [
['ParameterizedLevel', { "bookmarks":["showroot"], "key":'show'}],
['BranchLevel', {"rules":["sequence","asset"]}],
],
'sequence' :[
['FormattedLevel', { 'format': "{}x{}", "keys":['sequence','shot'] , 'treeattributes':{'areatype':'shots'}}],
['ParameterizedLevel', { "key":'dept', "collection":"department", 'bookmarks':['workarea']}]
],
'asset' : [
['FormattedLevel', { 'format': "{}_{}", "keys":['assettype','assetname'] , 'collections':{'assettype':'assettype'}, 'bookmarks':['assetroot'], 'treeattributes':{'areatype':'assets'}}],
['ParameterizedLevel', { "key":'dept', "collection":"department", 'bookmarks':['workarea']}]
]
}
} )
self.rootdir = "/tmp/dirbtest4/projects"
self.d = localclient.LocalClient( self.doc, self.rootdir )
for d in self.dirlist:
fulldir = os.path.join( self.rootdir, d )
if not os.path.isdir( fulldir ):
os.makedirs( fulldir )
# ----------------------------------------
def test_simple_globtest1(self):
searchexpr = '(and (bookmark workarea)(attributes (areatype shots))(parameters (sequence 1*)))'
foundlist = self.d.search_paths( searchexpr )
expected = (
'/tmp/dirbtest4/projects/show/100x140/animation',
'/tmp/dirbtest4/projects/show/100x140/lighting',
'/tmp/dirbtest4/projects/show/110x360/lighting',
'/tmp/dirbtest4/projects/show/110x360/animation',
'/tmp/dirbtest4/projects/show/110x570/animation'
)
self.assertEqual( set(expected), set( x.path for x in foundlist ) )
# ----------------------------------------
def test_simple_globtest2(self):
searchexpr = '(and (bookmark workarea)(-attributes (areatype s????)))'
foundlist = self.d.search_paths( searchexpr )
expected = (
'/tmp/dirbtest4/projects/show/veh_car1/lighting',
'/tmp/dirbtest4/projects/show/chr_bob/lighting'
)
self.assertEqual( set(expected), set( x.path for x in foundlist ) )
# ----------------------------------------
def test_simple_globtest3(self):
searchexpr = '(and (bookmark *area)(attributes (areatype assets)))'
foundlist = self.d.search_paths( searchexpr )
expected = (
'/tmp/dirbtest4/projects/show/veh_car1/lighting',
'/tmp/dirbtest4/projects/show/chr_bob/lighting'
)
self.assertEqual( set(expected), set( x.path for x in foundlist ) )
# ----------------------------------------
def test_simple_globtest4(self):
searchexpr = '(and (bookmark workarea)(attributes (areatype shots))(-parameters (sequence 1*)))'
foundlist = self.d.search_paths( searchexpr )
expected = (
'/tmp/dirbtest4/projects/show/350x220/animation',
)
self.assertEqual( set(expected), set( x.path for x in foundlist ) )
# ----------------------------------------
def test_simple_depict1(self):
createexpr = '(parameters (show diehard)(sequence 999)(shot 888)(dept lighting))'
foundlist = self.d.depict_paths( createexpr )
expected = (
'/tmp/dirbtest4/projects/diehard',
'/tmp/dirbtest4/projects/diehard/999x888',
'/tmp/dirbtest4/projects/diehard/999x888/lighting'
)
self.assertEqual( set(expected), set( x.path for x in foundlist ) )
# ----------------------------------------
def test_simple_depict2(self):
createexpr = '(parameters (show diehard)(sequence 777)(dept lighting))'
foundlist = self.d.depict_paths( createexpr )
expected = (
'/tmp/dirbtest4/projects/diehard',
)
self.assertEqual( set(expected), set( x.path for x in foundlist ) )
# ----------------------------------------
def test_simple_depict3(self):
createexpr = '(and (attributes (areatype shots))(parameters (show diehard)(sequence 666)(shot 010)(dept lighting)))'
foundlist = self.d.depict_paths( createexpr )
expected = (
'/tmp/dirbtest4/projects/diehard',
'/tmp/dirbtest4/projects/diehard/666x010',
'/tmp/dirbtest4/projects/diehard/666x010/lighting'
)
self.assertEqual( set(expected), set( x.path for x in foundlist ) )
# ----------------------------------------
def test_simple_depict4(self):
createexpr = '(and (attributes (areatype assets))(parameters (show diehard)(sequence 666)(shot 010)(dept lighting)))'
foundlist = self.d.depict_paths( createexpr )
expected = (
'/tmp/dirbtest4/projects/diehard',
)
self.assertEqual( set(expected), set( x.path for x in foundlist ) )
# ----------------------------------------
def test_simple_depict5(self):
createexpr = '(attributes (areatype assets))'
foundlist = self.d.depict_paths( createexpr )
self.assertEqual( foundlist, [] )
# ----------------------------------------
def test_simple_depict6(self):
createexpr = '(bookmark workarea)'
foundlist = self.d.depict_paths( createexpr )
self.assertEqual( foundlist, [] )
# ----------------------------------------
def tearDown(self):
pass
#####################################################################
if __name__ == '__main__':
unittest.main()
|
drone115b/dirb
|
test.py
|
Python
|
apache-2.0
| 36,632
|
#!/usr/bin/env python
# -- Content-Encoding: UTF-8 --
"""
Defines some constants for the logger handler
:author: Thomas Calmant
:copyright: Copyright 2014, isandlaTech
:license: Apache License 2.0
:version: 0.5.7
:status: Beta
..
Copyright 2014 isandlaTech
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Module version
__version_info__ = (0, 5, 7)
__version__ = ".".join(str(x) for x in __version_info__)
# Documentation strings format
__docformat__ = "restructuredtext en"
# ------------------------------------------------------------------------------
HANDLER_LOGGER = "sample.handler.logger"
""" Logger handler ID """
|
isandlaTech/cohorte-3rdparty
|
pelix/src/test/resources/samples/handler/constants.py
|
Python
|
apache-2.0
| 1,152
|
import os
import sys
import time
import numpy as np
import pytest
import ray
import ray.ray_constants as ray_constants
@pytest.mark.parametrize(
"ray_start_cluster", [{
"num_cpus": 4,
"num_nodes": 3,
"do_init": True
}],
indirect=True)
def test_actor_creation_node_failure(ray_start_cluster):
# TODO(swang): Refactor test_raylet_failed, etc to reuse the below code.
cluster = ray_start_cluster
@ray.remote
class Child:
def __init__(self, death_probability):
self.death_probability = death_probability
def get_probability(self):
return self.death_probability
def ping(self):
# Exit process with some probability.
exit_chance = np.random.rand()
if exit_chance < self.death_probability:
sys.exit(-1)
num_children = 25
# Children actors will die about half the time.
death_probability = 0.5
children = [Child.remote(death_probability) for _ in range(num_children)]
while len(cluster.list_all_nodes()) > 1:
for j in range(2):
# Submit some tasks on the actors. About half of the actors will
# fail.
children_out = [child.ping.remote() for child in children]
# Wait a while for all the tasks to complete. This should trigger
# reconstruction for any actor creation tasks that were forwarded
# to nodes that then failed.
ready, _ = ray.wait(
children_out, num_returns=len(children_out), timeout=5 * 60.0)
assert len(ready) == len(children_out)
# Replace any actors that died.
for i, out in enumerate(children_out):
try:
ray.get(out)
except ray.exceptions.RayActorError:
children[i] = Child.remote(death_probability)
children_out = [
child.get_probability.remote() for child in children
]
# Wait for new created actors to finish creation before
# removing a node. This is needed because right now we don't
# support reconstructing actors that died in the process of
# being created.
ready, _ = ray.wait(
children_out, num_returns=len(children_out), timeout=5 * 60.0)
assert len(ready) == len(children_out)
# Remove a node. Any actor creation tasks that were forwarded to this
# node must be reconstructed.
cluster.remove_node(cluster.list_all_nodes()[-1])
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="Hanging with new GCS API.")
def test_driver_lives_sequential(ray_start_regular):
ray.worker._global_node.kill_raylet()
ray.worker._global_node.kill_plasma_store()
ray.worker._global_node.kill_log_monitor()
ray.worker._global_node.kill_monitor()
ray.worker._global_node.kill_raylet_monitor()
# If the driver can reach the tearDown method, then it is still alive.
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="Hanging with new GCS API.")
def test_driver_lives_parallel(ray_start_regular):
all_processes = ray.worker._global_node.all_processes
process_infos = (all_processes[ray_constants.PROCESS_TYPE_PLASMA_STORE] +
all_processes[ray_constants.PROCESS_TYPE_RAYLET] +
all_processes[ray_constants.PROCESS_TYPE_LOG_MONITOR] +
all_processes[ray_constants.PROCESS_TYPE_MONITOR] +
all_processes[ray_constants.PROCESS_TYPE_RAYLET_MONITOR])
assert len(process_infos) == 5
# Kill all the components in parallel.
for process_info in process_infos:
process_info.process.terminate()
time.sleep(0.1)
for process_info in process_infos:
process_info.process.kill()
for process_info in process_infos:
process_info.process.wait()
# If the driver can reach the tearDown method, then it is still alive.
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
|
stephanie-wang/ray
|
python/ray/tests/test_component_failures_3.py
|
Python
|
apache-2.0
| 4,164
|
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""The Gumbel distribution class."""
# Dependency imports
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.bijectors import gumbel_cdf as gumbel_cdf_bijector
from tensorflow_probability.python.bijectors import identity as identity_bijector
from tensorflow_probability.python.bijectors import invert as invert_bijector
from tensorflow_probability.python.bijectors import softplus as softplus_bijector
from tensorflow_probability.python.distributions import kullback_leibler
from tensorflow_probability.python.distributions import transformed_distribution
from tensorflow_probability.python.distributions import uniform
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import parameter_properties
from tensorflow_probability.python.internal import tensor_util
class Gumbel(transformed_distribution.TransformedDistribution):
"""The scalar Gumbel distribution with location `loc` and `scale` parameters.
#### Mathematical details
The probability density function (pdf) of this distribution is,
```none
pdf(x; mu, sigma) = exp(-(x - mu) / sigma - exp(-(x - mu) / sigma)) / sigma
```
where `loc = mu` and `scale = sigma`.
The cumulative density function of this distribution is,
```none
cdf(x; mu, sigma) = exp(-exp(-(x - mu) / sigma))
```
The Gumbel distribution is a member of the [location-scale family](
https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be
constructed as,
```none
X ~ Gumbel(loc=0, scale=1)
Y = loc + scale * X
```
#### Examples
Examples of initialization of one or a batch of distributions.
```python
tfd = tfp.distributions
# Define a single scalar Gumbel distribution.
dist = tfd.Gumbel(loc=0., scale=3.)
# Evaluate the cdf at 1, returning a scalar.
dist.cdf(1.)
# Define a batch of two scalar valued Gumbels.
# The first has mean 1 and scale 11, the second 2 and 22.
dist = tfd.Gumbel(loc=[1, 2.], scale=[11, 22.])
# Evaluate the pdf of the first distribution on 0, and the second on 1.5,
# returning a length two tensor.
dist.prob([0, 1.5])
# Get 3 samples, returning a 3 x 2 tensor.
dist.sample([3])
```
Arguments are broadcast when possible.
```python
# Define a batch of two scalar valued Logistics.
# Both have mean 1, but different scales.
dist = tfd.Gumbel(loc=1., scale=[11, 22.])
# Evaluate the pdf of both distributions on the same point, 3.0,
# returning a length 2 tensor.
dist.prob(3.0)
```
"""
def __init__(self,
loc,
scale,
validate_args=False,
allow_nan_stats=True,
name='Gumbel'):
"""Construct Gumbel distributions with location and scale `loc` and `scale`.
The parameters `loc` and `scale` must be shaped in a way that supports
broadcasting (e.g. `loc + scale` is a valid operation).
Args:
loc: Floating point tensor, the means of the distribution(s).
scale: Floating point tensor, the scales of the distribution(s).
scale must contain only positive values.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
Default value: `False`.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value `NaN` to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
Default value: `True`.
name: Python `str` name prefixed to Ops created by this class.
Default value: `'Gumbel'`.
Raises:
TypeError: if loc and scale are different dtypes.
"""
parameters = dict(locals())
with tf.name_scope(name) as name:
dtype = dtype_util.common_dtype([loc, scale], dtype_hint=tf.float32)
loc = tensor_util.convert_nonref_to_tensor(
loc, name='loc', dtype=dtype)
scale = tensor_util.convert_nonref_to_tensor(
scale, name='scale', dtype=dtype)
dtype_util.assert_same_float_dtype([loc, scale])
# Positive scale is asserted by the incorporated Gumbel bijector.
self._gumbel_bijector = gumbel_cdf_bijector.GumbelCDF(
loc=loc, scale=scale, validate_args=validate_args)
# Because the uniform sampler generates samples in `[0, 1)` this would
# cause samples to lie in `(inf, -inf]` instead of `(inf, -inf)`. To fix
# this, we use `np.finfo(dtype_util.as_numpy_dtype(self.dtype).tiny`
# because it is the smallest, positive, 'normal' number.
super(Gumbel, self).__init__(
distribution=uniform.Uniform(
low=np.finfo(dtype_util.as_numpy_dtype(dtype)).tiny,
high=tf.ones([], dtype=dtype),
allow_nan_stats=allow_nan_stats),
# The Gumbel bijector encodes the CDF function as the forward,
# and hence needs to be inverted.
bijector=invert_bijector.Invert(
self._gumbel_bijector, validate_args=validate_args),
parameters=parameters,
name=name)
@classmethod
def _parameter_properties(cls, dtype, num_classes=None):
# pylint: disable=g-long-lambda
return dict(
loc=parameter_properties.ParameterProperties(),
scale=parameter_properties.ParameterProperties(
default_constraining_bijector_fn=(
lambda: softplus_bijector.Softplus(low=dtype_util.eps(dtype)))))
# pylint: enable=g-long-lambda
@property
def loc(self):
"""Distribution parameter for the location."""
return self._gumbel_bijector.loc
@property
def scale(self):
"""Distribution parameter for scale."""
return self._gumbel_bijector.scale
experimental_is_sharded = False
def _entropy(self):
# Use broadcasting rules to calculate the full broadcast sigma.
scale = self.scale * tf.ones_like(self.loc)
return 1. + tf.math.log(scale) + np.euler_gamma
def _log_prob(self, x):
scale = tf.convert_to_tensor(self.scale)
z = (x - self.loc) / scale
return -(z + tf.exp(-z)) - tf.math.log(scale)
def _mean(self):
return self.loc + self.scale * np.euler_gamma
def _stddev(self):
return self.scale * tf.ones_like(self.loc) * np.pi / np.sqrt(6)
def _mode(self):
return self.loc * tf.ones_like(self.scale)
def _default_event_space_bijector(self):
# TODO(b/145620027) Finalize choice of bijector. Consider switching to
# Chain([Softplus(), Log()]) to lighten the doubly-exponential right tail.
return identity_bijector.Identity(validate_args=self.validate_args)
def _parameter_control_dependencies(self, is_init):
return self._gumbel_bijector._parameter_control_dependencies(is_init) # pylint: disable=protected-access
@kullback_leibler.RegisterKL(Gumbel, Gumbel)
def _kl_gumbel_gumbel(a, b, name=None):
"""Calculate the batched KL divergence KL(a || b) with a and b Gumbel.
Args:
a: instance of a Gumbel distribution object.
b: instance of a Gumbel distribution object.
name: (optional) Name to use for created operations.
default is 'kl_gumbel_gumbel'.
Returns:
Batchwise KL(a || b)
"""
with tf.name_scope(name or 'kl_gumbel_gumbel'):
# Consistent with
# http://www.mast.queensu.ca/~communications/Papers/gil-msc11.pdf, page 64
# The paper uses beta to refer to scale and mu to refer to loc.
# There is actually an error in the solution as printed; this is based on
# the second-to-last step of the derivation. The value as printed would be
# off by (a.loc - b.loc) / b.scale.
a_loc = tf.convert_to_tensor(a.loc)
b_loc = tf.convert_to_tensor(b.loc)
a_scale = tf.convert_to_tensor(a.scale)
b_scale = tf.convert_to_tensor(b.scale)
return (tf.math.log(b_scale) - tf.math.log(a_scale) + np.euler_gamma *
(a_scale / b_scale - 1.) +
tf.math.expm1((b_loc - a_loc) / b_scale +
tf.math.lgamma(a_scale / b_scale + 1.)) +
(a_loc - b_loc) / b_scale)
|
tensorflow/probability
|
tensorflow_probability/python/distributions/gumbel.py
|
Python
|
apache-2.0
| 8,933
|
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import uuid
import falcon
import sys
from drydock_provisioner.control.base import DrydockRequest
from drydock_provisioner.control.middleware import AuthMiddleware
class TestAuthMiddleware():
# the WSGI env for a request processed by keystone middleware
# with user token
ks_user_env = {
'REQUEST_METHOD': 'GET',
'SCRIPT_NAME': '/foo',
'PATH_INFO': '',
'QUERY_STRING': '',
'CONTENT_TYPE': '',
'CONTENT_LENGTH': 0,
'SERVER_NAME': 'localhost',
'SERVER_PORT': '9000',
'SERVER_PROTOCOL': 'HTTP/1.1',
'HTTP_X_IDENTITY_STATUS': 'Confirmed',
'HTTP_X_PROJECT_ID': '',
'HTTP_X_USER_ID': '',
'HTTP_X_AUTH_TOKEN': '',
'HTTP_X_ROLES': '',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': sys.stdin,
'wsgi.errors': sys.stderr,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False,
}
# the WSGI env for a request processed by keystone middleware
# with service token
ks_service_env = {
'REQUEST_METHOD': 'GET',
'SCRIPT_NAME': '/foo',
'PATH_INFO': '',
'QUERY_STRING': '',
'CONTENT_TYPE': '',
'CONTENT_LENGTH': 0,
'SERVER_NAME': 'localhost',
'SERVER_PORT': '9000',
'SERVER_PROTOCOL': 'HTTP/1.1',
'HTTP_X_SERVICE_IDENTITY_STATUS': 'Confirmed',
'HTTP_X_SERVICE_PROJECT_ID': '',
'HTTP_X_SERVICE_USER_ID': '',
'HTTP_X_SERVICE_TOKEN': '',
'HTTP_X_ROLES': '',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': sys.stdin,
'wsgi.errors': sys.stderr,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False,
}
def test_process_request_user(self):
''' AuthMiddleware is expected to correctly identify the headers
added to an authenticated request by keystonemiddleware in a
PasteDeploy configuration
'''
req_env = TestAuthMiddleware.ks_user_env
project_id = str(uuid.uuid4().hex)
req_env['HTTP_X_PROJECT_ID'] = project_id
user_id = str(uuid.uuid4().hex)
req_env['HTTP_X_USER_ID'] = user_id
token = str(uuid.uuid4().hex)
req_env['HTTP_X_AUTH_TOKEN'] = token
middleware = AuthMiddleware()
request = DrydockRequest(req_env)
response = falcon.Response()
middleware.process_request(request, response)
assert request.context.authenticated
assert request.context.user_id == user_id
def test_process_request_user_noauth(self):
''' AuthMiddleware is expected to correctly identify the headers
added to an unauthenticated (no token, bad token) request by
keystonemiddleware in a PasteDeploy configuration
'''
req_env = TestAuthMiddleware.ks_user_env
req_env['HTTP_X_IDENTITY_STATUS'] = 'Invalid'
middleware = AuthMiddleware()
request = DrydockRequest(req_env)
response = falcon.Response()
middleware.process_request(request, response)
assert request.context.authenticated is False
|
att-comdev/drydock
|
tests/unit/test_auth_middleware.py
|
Python
|
apache-2.0
| 3,874
|
from distutils.core import setup, Extension
import numpy
# define the extension module
chain_forwards_backwards_native = Extension('chain_forwards_backwards_native', sources=['chain_forwards_backwards_native.c'],
include_dirs=[numpy.get_include()])
# run the setup
setup(ext_modules=[chain_forwards_backwards_native])
|
sebastien-bratieres/pygpstruct
|
src/chain_forwards_backwards_native/setup.py
|
Python
|
apache-2.0
| 355
|
""" Vanilla RNN
lmao
@author Graham Taylor
"""
import numpy as np
import theano
import theano.tensor as T
from sklearn.base import BaseEstimator
import logging
import time
import os
import datetime
import cPickle as pickle
logger = logging.getLogger(__name__)
import matplotlib.pyplot as plt
plt.ion()
mode = theano.Mode(linker='cvm')
#mode = 'DEBUG_MODE'
class RNN(object):
""" Recurrent neural network class
Supported output types:
real : linear output units, use mean-squared error
binary : binary output units, use cross-entropy error
softmax : single softmax out, use cross-entropy error
"""
def __init__(self, input, n_in, n_hidden, n_out, activation=T.tanh,
output_type='real', use_symbolic_softmax=False):
self.input = input
self.activation = activation
self.output_type = output_type
# when using HF, SoftmaxGrad.grad is not implemented
# use a symbolic softmax which is slightly slower than T.nnet.softmax
# See: http://groups.google.com/group/theano-dev/browse_thread/
# thread/3930bd5a6a67d27a
if use_symbolic_softmax:
def symbolic_softmax(x):
e = T.exp(x)
return e / T.sum(e, axis=1).dimshuffle(0, 'x')
self.softmax = symbolic_softmax
else:
self.softmax = T.nnet.softmax
# recurrent weights as a shared variable
W_init = np.asarray(np.random.uniform(size=(n_hidden, n_hidden),
low=-.01, high=.01),
dtype=theano.config.floatX)
self.W = theano.shared(value=W_init, name='W')
# input to hidden layer weights
W_in_init = np.asarray(np.random.uniform(size=(n_in, n_hidden),
low=-.01, high=.01),
dtype=theano.config.floatX)
self.W_in = theano.shared(value=W_in_init, name='W_in')
# hidden to output layer weights
W_out_init = np.asarray(np.random.uniform(size=(n_hidden, n_out),
low=-.01, high=.01),
dtype=theano.config.floatX)
self.W_out = theano.shared(value=W_out_init, name='W_out')
h0_init = np.zeros((n_hidden,), dtype=theano.config.floatX)
self.h0 = theano.shared(value=h0_init, name='h0')
bh_init = np.zeros((n_hidden,), dtype=theano.config.floatX)
self.bh = theano.shared(value=bh_init, name='bh')
by_init = np.zeros((n_out,), dtype=theano.config.floatX)
self.by = theano.shared(value=by_init, name='by')
self.params = [self.W, self.W_in, self.W_out, self.h0,
self.bh, self.by]
# for every parameter, we maintain it's last update
# the idea here is to use "momentum"
# keep moving mostly in the same direction
self.updates = {}
for param in self.params:
init = np.zeros(param.get_value(borrow=True).shape,
dtype=theano.config.floatX)
self.updates[param] = theano.shared(init)
# recurrent function (using tanh activation function) and linear output
# activation function
def step(x_t, h_tm1):
h_t = self.activation(T.dot(x_t, self.W_in) + \
T.dot(h_tm1, self.W) + self.bh)
y_t = T.dot(h_t, self.W_out) + self.by
return h_t, y_t
# the hidden state `h` for the entire sequence, and the output for the
# entire sequence `y` (first dimension is always time)
[self.h, self.y_pred], _ = theano.scan(step,
sequences=self.input,
outputs_info=[self.h0, None])
# L1 norm ; one regularization option is to enforce L1 norm to
# be small
self.L1 = 0
self.L1 += abs(self.W.sum())
self.L1 += abs(self.W_in.sum())
self.L1 += abs(self.W_out.sum())
# square of L2 norm ; one regularization option is to enforce
# square of L2 norm to be small
self.L2_sqr = 0
self.L2_sqr += (self.W ** 2).sum()
self.L2_sqr += (self.W_in ** 2).sum()
self.L2_sqr += (self.W_out ** 2).sum()
if self.output_type == 'real':
self.loss = lambda y: self.mse(y)
elif self.output_type == 'binary':
# push through sigmoid
self.p_y_given_x = T.nnet.sigmoid(self.y_pred) # apply sigmoid
self.y_out = T.round(self.p_y_given_x) # round to {0,1}
self.loss = lambda y: self.nll_binary(y)
elif self.output_type == 'softmax':
# push through softmax, computing vector of class-membership
# probabilities in symbolic form
self.p_y_given_x = self.softmax(self.y_pred)
# compute prediction as class whose probability is maximal
self.y_out = T.argmax(self.p_y_given_x, axis=-1)
self.loss = lambda y: self.nll_multiclass(y)
else:
raise NotImplementedError
def mse(self, y):
# error between output and target
return T.mean((self.y_pred - y) ** 2)
def nll_binary(self, y):
# negative log likelihood based on binary cross entropy error
return T.mean(T.nnet.binary_crossentropy(self.p_y_given_x, y))
def nll_multiclass(self, y):
# negative log likelihood based on multiclass cross entropy error
# y.shape[0] is (symbolically) the number of rows in y, i.e.,
# number of time steps (call it T) in the sequence
# T.arange(y.shape[0]) is a symbolic vector which will contain
# [0,1,2,... n-1] T.log(self.p_y_given_x) is a matrix of
# Log-Probabilities (call it LP) with one row per example and
# one column per class LP[T.arange(y.shape[0]),y] is a vector
# v containing [LP[0,y[0]], LP[1,y[1]], LP[2,y[2]], ...,
# LP[n-1,y[n-1]]] and T.mean(LP[T.arange(y.shape[0]),y]) is
# the mean (across minibatch examples) of the elements in v,
# i.e., the mean log-likelihood across the minibatch.
return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])
def errors(self, y):
"""Return a float representing the number of errors in the sequence
over the total number of examples in the sequence ; zero one
loss over the size of the sequence
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
"""
# check if y has same dimension of y_pred
if y.ndim != self.y_out.ndim:
raise TypeError('y should have the same shape as self.y_out',
('y', y.type, 'y_out', self.y_out.type))
if self.output_type in ('binary', 'softmax'):
# check if y is of the correct datatype
if y.dtype.startswith('int'):
# the T.neq operator returns a vector of 0s and 1s, where 1
# represents a mistake in prediction
return T.mean(T.neq(self.y_out, y))
else:
raise NotImplementedError()
class MetaRNN(BaseEstimator):
def __init__(self, n_in=5, n_hidden=50, n_out=5, learning_rate=0.01,
n_epochs=100, L1_reg=0.00, L2_reg=0.00, learning_rate_decay=1,
activation='tanh', output_type='real',
final_momentum=0.9, initial_momentum=0.5,
momentum_switchover=5,
use_symbolic_softmax=False):
self.n_in = int(n_in)
self.n_hidden = int(n_hidden)
self.n_out = int(n_out)
self.learning_rate = float(learning_rate)
self.learning_rate_decay = float(learning_rate_decay)
self.n_epochs = int(n_epochs)
self.L1_reg = float(L1_reg)
self.L2_reg = float(L2_reg)
self.activation = activation
self.output_type = output_type
self.initial_momentum = float(initial_momentum)
self.final_momentum = float(final_momentum)
self.momentum_switchover = int(momentum_switchover)
self.use_symbolic_softmax = use_symbolic_softmax
self.ready()
def ready(self):
# input (where first dimension is time)
self.x = T.matrix()
# target (where first dimension is time)
if self.output_type == 'real':
self.y = T.matrix(name='y', dtype=theano.config.floatX)
elif self.output_type == 'binary':
self.y = T.matrix(name='y', dtype='int32')
elif self.output_type == 'softmax': # only vector labels supported
self.y = T.vector(name='y', dtype='int32')
else:
raise NotImplementedError
# initial hidden state of the RNN
self.h0 = T.vector()
# learning rate
self.lr = T.scalar()
if self.activation == 'tanh':
activation = T.tanh
elif self.activation == 'sigmoid':
activation = T.nnet.sigmoid
elif self.activation == 'relu':
activation = lambda x: x * (x > 0)
elif self.activation == 'cappedrelu':
activation = lambda x: T.minimum(x * (x > 0), 6)
else:
raise NotImplementedError
self.rnn = RNN(input=self.x, n_in=self.n_in,
n_hidden=self.n_hidden, n_out=self.n_out,
activation=activation, output_type=self.output_type,
use_symbolic_softmax=self.use_symbolic_softmax)
if self.output_type == 'real':
self.predict = theano.function(inputs=[self.x, ],
outputs=self.rnn.y_pred,
mode=mode)
elif self.output_type == 'binary':
self.predict_proba = theano.function(inputs=[self.x, ],
outputs=self.rnn.p_y_given_x, mode=mode)
self.predict = theano.function(inputs=[self.x, ],
outputs=T.round(self.rnn.p_y_given_x),
mode=mode)
elif self.output_type == 'softmax':
self.predict_proba = theano.function(inputs=[self.x, ],
outputs=self.rnn.p_y_given_x, mode=mode)
self.predict = theano.function(inputs=[self.x, ],
outputs=self.rnn.y_out, mode=mode)
else:
raise NotImplementedError
def shared_dataset(self, data_xy):
""" Load the dataset into shared variables """
data_x, data_y = data_xy
shared_x = theano.shared(np.asarray(data_x,
dtype=theano.config.floatX))
shared_y = theano.shared(np.asarray(data_y,
dtype=theano.config.floatX))
if self.output_type in ('binary', 'softmax'):
return shared_x, T.cast(shared_y, 'int32')
else:
return shared_x, shared_y
def __getstate__(self):
""" Return state sequence."""
params = self._get_params() # parameters set in constructor
weights = [p.get_value() for p in self.rnn.params]
state = (params, weights)
return state
def _set_weights(self, weights):
""" Set fittable parameters from weights sequence.
Parameters must be in the order defined by self.params:
W, W_in, W_out, h0, bh, by
"""
i = iter(weights)
for param in self.rnn.params:
param.set_value(i.next())
def __setstate__(self, state):
""" Set parameters from state sequence.
Parameters must be in the order defined by self.params:
W, W_in, W_out, h0, bh, by
"""
params, weights = state
self.set_params(**params)
self.ready()
self._set_weights(weights)
def save(self, fpath='.', fname=None):
""" Save a pickled representation of Model state. """
fpathstart, fpathext = os.path.splitext(fpath)
if fpathext == '.pkl':
# User supplied an absolute path to a pickle file
fpath, fname = os.path.split(fpath)
elif fname is None:
# Generate filename based on date
date_obj = datetime.datetime.now()
date_str = date_obj.strftime('%Y-%m-%d-%H:%M:%S')
class_name = self.__class__.__name__
fname = '%s.%s.pkl' % (class_name, date_str)
fabspath = os.path.join(fpath, fname)
logger.info("Saving to %s ..." % fabspath)
file = open(fabspath, 'wb')
state = self.__getstate__()
pickle.dump(state, file, protocol=pickle.HIGHEST_PROTOCOL)
file.close()
def load(self, path):
""" Load model parameters from path. """
logger.info("Loading from %s ..." % path)
file = open(path, 'rb')
state = pickle.load(file)
self.__setstate__(state)
file.close()
def fit(self, X_train, Y_train, X_test=None, Y_test=None,
validation_frequency=100):
""" Fit model
Pass in X_test, Y_test to compute test error and report during
training.
X_train : ndarray (n_seq x n_steps x n_in)
Y_train : ndarray (n_seq x n_steps x n_out)
validation_frequency : int
in terms of number of sequences (or number of weight updates)
"""
if X_test is not None:
assert(Y_test is not None)
self.interactive = True
test_set_x, test_set_y = self.shared_dataset((X_test, Y_test))
else:
self.interactive = False
train_set_x, train_set_y = self.shared_dataset((X_train, Y_train))
n_train = train_set_x.get_value(borrow=True).shape[0]
if self.interactive:
n_test = test_set_x.get_value(borrow=True).shape[0]
######################
# BUILD ACTUAL MODEL #
######################
logger.info('... building the model')
index = T.lscalar('index') # index to a case
# learning rate (may change)
l_r = T.scalar('l_r', dtype=theano.config.floatX)
mom = T.scalar('mom', dtype=theano.config.floatX) # momentum
cost = self.rnn.loss(self.y) \
+ self.L1_reg * self.rnn.L1 \
+ self.L2_reg * self.rnn.L2_sqr
compute_train_error = theano.function(inputs=[index, ],
outputs=self.rnn.loss(self.y),
givens={
self.x: train_set_x[index],
self.y: train_set_y[index]},
mode=mode)
if self.interactive:
compute_test_error = theano.function(inputs=[index, ],
outputs=self.rnn.loss(self.y),
givens={
self.x: test_set_x[index],
self.y: test_set_y[index]},
mode=mode)
# compute the gradient of cost with respect to theta = (W, W_in, W_out)
# gradients on the weights using BPTT
gparams = []
for param in self.rnn.params:
gparam = T.grad(cost, param)
gparams.append(gparam)
updates = {}
for param, gparam in zip(self.rnn.params, gparams):
weight_update = self.rnn.updates[param]
upd = mom * weight_update - l_r * gparam
updates[weight_update] = upd
updates[param] = param + upd
# compiling a Theano function `train_model` that returns the
# cost, but in the same time updates the parameter of the
# model based on the rules defined in `updates`
train_model = theano.function(inputs=[index, l_r, mom],
outputs=cost,
updates=updates,
givens={
self.x: train_set_x[index],
self.y: train_set_y[index]},
mode=mode)
###############
# TRAIN MODEL #
###############
logger.info('... training')
epoch = 0
while (epoch < self.n_epochs):
epoch = epoch + 1
for idx in xrange(n_train):
effective_momentum = self.final_momentum \
if epoch > self.momentum_switchover \
else self.initial_momentum
example_cost = train_model(idx, self.learning_rate,
effective_momentum)
# iteration number (how many weight updates have we made?)
# epoch is 1-based, index is 0 based
iter = (epoch - 1) * n_train + idx + 1
if iter % validation_frequency == 0:
# compute loss on training set
train_losses = [compute_train_error(i)
for i in xrange(n_train)]
this_train_loss = np.mean(train_losses)
if self.interactive:
test_losses = [compute_test_error(i)
for i in xrange(n_test)]
this_test_loss = np.mean(test_losses)
logger.info('epoch %i, seq %i/%i, tr loss %f '
'te loss %f lr: %f' % \
(epoch, idx + 1, n_train,
this_train_loss, this_test_loss, self.learning_rate))
else:
logger.info('epoch %i, seq %i/%i, train loss %f '
'lr: %f' % \
(epoch, idx + 1, n_train, this_train_loss,
self.learning_rate))
self.learning_rate *= self.learning_rate_decay
def test_real():
""" Test RNN with real-valued outputs. """
n_hidden = 10
n_in = 5
n_out = 3
n_steps = 10
n_seq = 100
np.random.seed(0)
# simple lag test
seq = np.random.randn(n_seq, n_steps, n_in)
targets = np.zeros((n_seq, n_steps, n_out))
targets[:, 1:, 0] = seq[:, :-1, 3] # delayed 1
targets[:, 1:, 1] = seq[:, :-1, 2] # delayed 1
targets[:, 2:, 2] = seq[:, :-2, 0] # delayed 2
targets += 0.01 * np.random.standard_normal(targets.shape)
model = MetaRNN(n_in=n_in, n_hidden=n_hidden, n_out=n_out,
learning_rate=0.001, learning_rate_decay=0.999,
n_epochs=400, activation='tanh')
model.fit(seq, targets, validation_frequency=1000)
plt.close('all')
fig = plt.figure()
ax1 = plt.subplot(211)
plt.plot(seq[0])
ax1.set_title('input')
ax2 = plt.subplot(212)
true_targets = plt.plot(targets[0])
guess = model.predict(seq[0])
guessed_targets = plt.plot(guess, linestyle='--')
for i, x in enumerate(guessed_targets):
x.set_color(true_targets[i].get_color())
ax2.set_title('solid: true output, dashed: model output')
def test_binary(multiple_out=False, n_epochs=250):
""" Test RNN with binary outputs. """
n_hidden = 10
n_in = 5
if multiple_out:
n_out = 2
else:
n_out = 1
n_steps = 10
n_seq = 100
np.random.seed(0)
# simple lag test
seq = np.random.randn(n_seq, n_steps, n_in)
targets = np.zeros((n_seq, n_steps, n_out))
# whether lag 1 (dim 3) is greater than lag 2 (dim 0)
targets[:, 2:, 0] = np.cast[np.int](seq[:, 1:-1, 3] > seq[:, :-2, 0])
if multiple_out:
# whether product of lag 1 (dim 4) and lag 1 (dim 2)
# is less than lag 2 (dim 0)
targets[:, 2:, 1] = np.cast[np.int](
(seq[:, 1:-1, 4] * seq[:, 1:-1, 2]) > seq[:, :-2, 0])
model = MetaRNN(n_in=n_in, n_hidden=n_hidden, n_out=n_out,
learning_rate=0.001, learning_rate_decay=0.999,
n_epochs=n_epochs, activation='tanh', output_type='binary')
model.fit(seq, targets, validation_frequency=1000)
seqs = xrange(10)
plt.close('all')
for seq_num in seqs:
fig = plt.figure()
ax1 = plt.subplot(211)
plt.plot(seq[seq_num])
ax1.set_title('input')
ax2 = plt.subplot(212)
true_targets = plt.step(xrange(n_steps), targets[seq_num], marker='o')
guess = model.predict_proba(seq[seq_num])
guessed_targets = plt.step(xrange(n_steps), guess)
plt.setp(guessed_targets, linestyle='--', marker='d')
for i, x in enumerate(guessed_targets):
x.set_color(true_targets[i].get_color())
ax2.set_ylim((-0.1, 1.1))
ax2.set_title('solid: true output, dashed: model output (prob)')
def test_softmax(n_epochs=250):
""" Test RNN with softmax outputs. """
n_hidden = 10
n_in = 5
n_steps = 10
n_seq = 100
n_classes = 3
n_out = n_classes # restricted to single softmax per time step
np.random.seed(0)
# simple lag test
seq = np.random.randn(n_seq, n_steps, n_in)
targets = np.zeros((n_seq, n_steps), dtype=np.int)
thresh = 0.5
# if lag 1 (dim 3) is greater than lag 2 (dim 0) + thresh
# class 1
# if lag 1 (dim 3) is less than lag 2 (dim 0) - thresh
# class 2
# if lag 2(dim0) - thresh <= lag 1 (dim 3) <= lag2(dim0) + thresh
# class 0
targets[:, 2:][seq[:, 1:-1, 3] > seq[:, :-2, 0] + thresh] = 1
targets[:, 2:][seq[:, 1:-1, 3] < seq[:, :-2, 0] - thresh] = 2
#targets[:, 2:, 0] = np.cast[np.int](seq[:, 1:-1, 3] > seq[:, :-2, 0])
model = MetaRNN(n_in=n_in, n_hidden=n_hidden, n_out=n_out,
learning_rate=0.001, learning_rate_decay=0.999,
n_epochs=n_epochs, activation='tanh',
output_type='softmax', use_symbolic_softmax=False)
model.fit(seq, targets, validation_frequency=1000)
seqs = xrange(10)
plt.close('all')
for seq_num in seqs:
fig = plt.figure()
ax1 = plt.subplot(211)
plt.plot(seq[seq_num])
ax1.set_title('input')
ax2 = plt.subplot(212)
# blue line will represent true classes
true_targets = plt.step(xrange(n_steps), targets[seq_num], marker='o')
# show probabilities (in b/w) output by model
guess = model.predict_proba(seq[seq_num])
guessed_probs = plt.imshow(guess.T, interpolation='nearest',
cmap='gray')
ax2.set_title('blue: true class, grayscale: probs assigned by model')
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
t0 = time.time()
test_real()
# problem takes more epochs to solve
#test_binary(multiple_out=True, n_epochs=2400)
#test_softmax(n_epochs=250)
print "Elapsed time: %f" % (time.time() - t0)
|
CDSFinance/zipline
|
theano_rnn/rnn.py
|
Python
|
apache-2.0
| 23,219
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import os
import tempfile
import time
from urllib.parse import urlparse
import yaml
import zipfile
from novaclient import client as nova_client
from oslo_serialization import jsonutils
from oslo_utils import uuidutils
from tacker.objects import fields
from tacker.tests.functional import base
from tacker.tests.functional.common.fake_server import FakeServerManager
from tacker.tests import utils
from tacker.vnfm.infra_drivers.openstack import constants as infra_cnst
VNF_PACKAGE_UPLOAD_TIMEOUT = 60
VNF_INSTANTIATE_TIMEOUT = 60
VNF_TERMINATE_TIMEOUT = 60
VNF_SUBSCRIPTION_TIMEOUT = 60
VNF_INSTANTIATE_ERROR_WAIT = 80
VNF_DELETE_COMPLETION_WAIT = 60
VNF_HEAL_TIMEOUT = 600
VNF_LCM_DONE_TIMEOUT = 1200
RETRY_WAIT_TIME = 5
FAKE_SERVER_MANAGER = FakeServerManager.get_instance()
FAKE_SERVER_PORT = 9990
MOCK_NOTIFY_CALLBACK_URL = '/notification/callback'
UUID_RE = r'\w{8}-\w{4}-\w{4}-\w{4}-\w{12}'
def _get_external_virtual_links(net0_id):
return [
{
"id": "net0",
"resourceId": net0_id,
"extCps": [{
"cpdId": "CP1",
"cpConfig": [{
"cpProtocolData": [{
"layerProtocol": "IP_OVER_ETHERNET",
}]
}]
}]
}
]
def _create_csar_user_data_common(csar_dir):
ud_common_dir = os.path.join(csar_dir, "../user_data_common/")
return _create_csar_with_unique_vnfd_id(
csar_dir, ud_common_dir)
def _create_csar_with_unique_vnfd_id(csar_dir, *include_dirs):
tempfd, tempname = tempfile.mkstemp(suffix=".zip",
dir=os.path.dirname(csar_dir))
os.close(tempfd)
common_dir = os.path.join(csar_dir, "../common/")
target_dirs = [csar_dir, common_dir]
target_dirs.extend(include_dirs)
unique_id = uuidutils.generate_uuid()
with zipfile.ZipFile(tempname, 'w') as zcsar:
_write_zipfile(zcsar, unique_id, target_dirs)
return tempname, unique_id
def _write_zipfile(zcsar, unique_id, target_dir_list):
for target_dir in target_dir_list:
for (dpath, _, fnames) in os.walk(target_dir):
if not fnames:
continue
for fname in fnames:
src_file = os.path.join(dpath, fname)
dst_file = os.path.relpath(
os.path.join(dpath, fname), target_dir)
if fname.endswith('.yaml') or fname.endswith('.yml'):
with open(src_file, 'rb') as yfile:
data = yaml.safe_load(yfile)
utils._update_unique_id_in_yaml(data, unique_id)
zcsar.writestr(dst_file, yaml.dump(
data, default_flow_style=False,
allow_unicode=True))
else:
zcsar.write(src_file, dst_file)
def _create_and_upload_vnf_package(
tacker_client,
user_defined_data,
temp_csar_path):
# create vnf package
body = jsonutils.dumps({"userDefinedData": user_defined_data})
resp, vnf_package = tacker_client.do_request(
'/vnfpkgm/v1/vnf_packages', "POST", body=body)
with open(temp_csar_path, 'rb') as file_object:
resp, resp_body = tacker_client.do_request(
'/vnfpkgm/v1/vnf_packages/{id}/package_content'.format(
id=vnf_package['id']),
"PUT", body=file_object, content_type='application/zip')
# wait for onboard
timeout = VNF_PACKAGE_UPLOAD_TIMEOUT
start_time = int(time.time())
show_url = os.path.join('/vnfpkgm/v1/vnf_packages', vnf_package['id'])
vnfd_id = None
while True:
resp, body = tacker_client.do_request(show_url, "GET")
if (200 <= resp.status_code < 300) and (
body['onboardingState'] == "ONBOARDED"):
vnfd_id = body['vnfdId']
break
if ((int(time.time()) - start_time) > timeout):
raise TimeoutError("Failed to onboard vnf package")
time.sleep(1)
# remove temporarily created CSAR file
os.remove(temp_csar_path)
return vnf_package['id'], vnfd_id
def _delete_vnf_package(tacker_client, vnf_package_id):
url = '/vnfpkgm/v1/vnf_packages/%s' % vnf_package_id
# Update vnf package before delete
req_body = jsonutils.dumps({"operationalState": "DISABLED"})
tacker_client.do_request(url, "PATCH", body=req_body)
# Delete vnf package before delete
tacker_client.do_request(url, "DELETE")
def _show_vnf_package(tacker_client, vnf_package_id):
# wait for onboard
timeout = VNF_PACKAGE_UPLOAD_TIMEOUT
start_time = int(time.time())
show_url = os.path.join('/vnfpkgm/v1/vnf_packages', vnf_package_id)
while True:
resp, body = tacker_client.do_request(show_url, "GET")
if resp.ok:
return resp, body
if ((int(time.time()) - start_time) > timeout):
raise TimeoutError("Failed to onboard vnf package")
time.sleep(1)
def _list_vnf_package(tacker_client, **kwargs):
# wait for onboard
timeout = VNF_PACKAGE_UPLOAD_TIMEOUT
start_time = int(time.time())
while True:
resp, body = tacker_client.do_request(
'/vnfpkgm/v1/vnf_packages', "GET", **kwargs)
if resp.ok:
return resp, body
if ((int(time.time()) - start_time) > timeout):
raise TimeoutError("Failed to onboard vnf package")
time.sleep(1)
def _create_instantiate_vnf_request_body(flavour_id,
instantiation_level_id=None, vim_id=None, ext_vl=None,
add_params=None):
request_body = {"flavourId": flavour_id}
if instantiation_level_id:
request_body["instantiationLevelId"] = instantiation_level_id
if ext_vl:
request_body["extVirtualLinks"] = ext_vl
if vim_id:
request_body["vimConnectionInfo"] = [
{"id": uuidutils.generate_uuid(),
"vimId": vim_id,
"vimType": "ETSINFV.OPENSTACK_KEYSTONE.v_2"}]
if add_params:
request_body["additionalParams"] = add_params
return request_body
class BaseVnfLcmTest(base.BaseTackerTest):
is_setup_error = False
@classmethod
def setUpClass(cls):
'''Set up test class.
we set up fake NFVO server for test at here.
'''
super(BaseVnfLcmTest, cls).setUpClass()
cls._prepare_start_fake_server(FAKE_SERVER_MANAGER,
FAKE_SERVER_PORT)
@classmethod
def tearDownClass(cls):
super(BaseVnfLcmTest, cls).tearDownClass()
FAKE_SERVER_MANAGER.stop_server()
def setUp(self):
super(BaseVnfLcmTest, self).setUp()
if self.is_setup_error:
self.fail("Faild, not exists pre-registered image.")
callback_url = os.path.join(
MOCK_NOTIFY_CALLBACK_URL,
self._testMethodName)
self._clear_history_and_set_callback(FAKE_SERVER_MANAGER,
callback_url)
self.tacker_client = base.BaseTackerTest.tacker_http_client()
self.base_vnf_instances_url = "/vnflcm/v1/vnf_instances"
self.base_subscriptions_url = "/vnflcm/v1/subscriptions"
self.base_vnf_lcm_op_occs_url = "/vnflcm/v1/vnf_lcm_op_occs"
vim_list = self.client.list_vims()
self.vim = self.get_vim(vim_list, 'VIM0')
if not self.vim:
assert False, "vim_list is Empty: Default VIM is missing"
result = self._create_network_settings()
self.ext_networks = result.get('ext_networks')
self.ext_vl = result.get('ext_vl')
self.ext_mngd_networks = result.get('ext_mngd_networks')
self.ext_link_ports = result.get('ext_link_ports')
self.ext_subnets = result.get('ext_subnets')
self.changed_ext_networks = result.get('changed_ext_networks')
self.changed_ext_subnets = result.get('changed_ext_subnets')
@classmethod
def _prepare_start_fake_server(cls, fake_server,
fake_server_port):
fake_server.prepare_http_server(address='localhost',
port=fake_server_port)
fake_server.start_server()
def _clear_history_and_set_callback(self,
fake_server_manager,
callback_url):
fake_server_manager.clear_history(callback_url)
fake_server_manager.set_callback(
'POST',
callback_url,
status_code=204
)
fake_server_manager.set_callback(
'GET',
callback_url,
status_code=204
)
def _create_network_settings(self, neutron_client=None):
if neutron_client is None:
neutron_client = self.neutronclient()
# Create external external.
ext_networks = list()
# Create external managed networks
ext_mngd_networks = list() # Store ids for cleaning.
# Create external link ports in net0
ext_link_ports = list()
# Create external subnet in net1
ext_subnets = list() # Store ids for cleaning.
# Create external networks to change.
changed_ext_networks = list()
changed_ext_subnets = list() # Store ids for cleaning.
networks = neutron_client.list_networks()
for nw in networks.get('networks'):
if nw['name'] == 'net0':
ext_networks.append(nw['id'])
ext_vl = _get_external_virtual_links(nw['id'])
ext_subnets.append(nw['subnets'][0])
ext_link_ports.append(self._create_port(nw['id'],
neutron_client))
ext_link_ports.append(self._create_port(nw['id'],
neutron_client))
elif nw['name'] == 'net1':
ext_mngd_networks.append(nw['id'])
# create new network.
ext_net_id, ext_net_name = \
self._create_network("external_net", neutron_client)
ext_networks.append(ext_net_id)
ext_mngd_net_id, _ = \
self._create_network("external_managed_internal_net",
neutron_client)
ext_mngd_networks.append(ext_mngd_net_id)
changed_ext_net_id, changed_ext_net_name = \
self._create_network("changed_external_net", neutron_client)
changed_ext_networks.append(changed_ext_net_id)
# Chack how many networks are created.
networks = neutron_client.list_networks()
for nw in networks.get('networks'):
if nw['name'] not in [ext_net_name, changed_ext_net_name]:
continue
elif nw['name'] == ext_net_name:
ext_subnets.append(
self._create_subnet(nw,
cidr="22.22.1.0/24",
gateway="22.22.1.1",
neutron_client=neutron_client))
elif nw['name'] == changed_ext_net_name:
changed_ext_subnets.append(
self._create_subnet(nw,
cidr="22.22.2.0/24",
gateway="22.22.2.1",
neutron_client=neutron_client))
return {'ext_networks': ext_networks,
'ext_vl': ext_vl,
'ext_mngd_networks': ext_mngd_networks,
'ext_link_ports': ext_link_ports,
'ext_subnets': ext_subnets,
'changed_ext_networks': changed_ext_networks,
'changed_ext_subnets': changed_ext_subnets}
@classmethod
def _list_glance_image(cls, filter_name='cirros-0.5.2-x86_64-disk',
glance_client=None):
if glance_client is None:
glance_client = cls.glance_client
try:
images = glance_client.images.list()
except Exception:
print("glance-image does not exists.", flush=True)
return []
if filter_name is None:
return images
return list(filter(lambda image: image.name == filter_name, images))
@classmethod
def _get_glance_image(cls, image_id, glance_client=None):
if glance_client is None:
glance_client = cls.glance_client
try:
image = glance_client.images.get(image_id)
except Exception:
print("glance-image does not exists.", image_id, flush=True)
return None
return image
@classmethod
def _create_glance_image(cls, image_data, file_url, glance_client=None):
if glance_client is None:
glance_client = cls.glance_client
image = glance_client.images.create(**image_data)
glance_client.images.upload(image.id, file_url)
return image.id
def _get_glance_image_list_from_stack_resource(
self, stack_id, stack_resource_name, h_client=None):
if h_client is None:
h_client = self.h_client
image_id_list = []
for resource_name in stack_resource_name:
resource_details = self._get_heat_resource(stack_id,
resource_name,
h_client)
image = self._get_image_id_from_resource_attributes(
resource_details)
if image:
image_id_list.append(image.id)
return image_id_list
@classmethod
def _list_zone(cls):
try:
zone = cls.nova_client.services.list()
except nova_client.exceptions.ClientException:
print("availability zone does not exists.", flush=True)
return []
return zone
def _register_subscription(self, request_body, http_client=None):
if http_client is None:
http_client = self.http_client
resp, response_body = http_client.do_request(
self.base_subscriptions_url,
"POST",
body=jsonutils.dumps(request_body))
return resp, response_body
def _delete_subscription(self, subscription_id, tacker_client=None):
if tacker_client is None:
tacker_client = self.tacker_client
delete_url = os.path.join(self.base_subscriptions_url, subscription_id)
resp, body = tacker_client.do_request(delete_url, "DELETE")
return resp, body
def _show_subscription(self, subscription_id, tacker_client=None):
if tacker_client is None:
tacker_client = self.tacker_client
show_url = os.path.join(self.base_subscriptions_url, subscription_id)
resp, body = tacker_client.do_request(show_url, "GET")
return resp, body
def _list_subscription(self, tacker_client=None):
if tacker_client is None:
tacker_client = self.tacker_client
resp, body = tacker_client.do_request(
self.base_subscriptions_url, "GET")
return resp, body
def _list_subscription_filter(self, http_client=None, **kwargs):
if http_client is None:
http_client = self.http_client
params = kwargs.get('params', {})
filter_variable = params['filter']
subscriptions_list_filter_url = "%s?%s" % (
self.base_subscriptions_url, filter_variable)
resp, subscription_body = http_client.do_request(
subscriptions_list_filter_url, "GET")
return resp, subscription_body
def _create_vnf_instance(self, vnfd_id, vnf_instance_name=None,
vnf_instance_description=None, http_client=None):
if http_client is None:
http_client = self.http_client
request_body = {'vnfdId': vnfd_id}
if vnf_instance_name:
request_body['vnfInstanceName'] = vnf_instance_name
if vnf_instance_description:
request_body['vnfInstanceDescription'] = vnf_instance_description
return self._create_vnf_instance_from_body(request_body, http_client)
def _create_vnf_instance_from_body(self, request_body, http_client=None):
if http_client is None:
http_client = self.http_client
request_body['vnfInstanceName'] = self._testMethodName
resp, response_body = http_client.do_request(
self.base_vnf_instances_url,
"POST",
body=jsonutils.dumps(request_body))
return resp, response_body
def _delete_vnf_instance(self, id, http_client=None):
if http_client is None:
http_client = self.http_client
url = os.path.join(self.base_vnf_instances_url, id)
resp, body = http_client.do_request(url, "DELETE")
return resp, body
def _show_vnf_instance(self, id, http_client=None):
if http_client is None:
http_client = self.http_client
show_url = os.path.join(self.base_vnf_instances_url, id)
resp, vnf_instance = http_client.do_request(show_url, "GET")
return resp, vnf_instance
def _list_vnf_instance(self, http_client=None, **kwargs):
if http_client is None:
http_client = self.http_client
resp, vnf_instances = http_client.do_request(
self.base_vnf_instances_url, "GET", **kwargs)
return resp, vnf_instances
def _wait_vnf_instance(self, id,
http_client=None,
instantiation_state=fields.VnfInstanceState.INSTANTIATED,
timeout=VNF_INSTANTIATE_TIMEOUT):
if http_client is None:
http_client = self.http_client
start_time = int(time.time())
while True:
resp, body = self._show_vnf_instance(id, http_client)
if body['instantiationState'] == instantiation_state:
break
if ((int(time.time()) - start_time) > timeout):
error = ("Vnf instance %(id)s status is %(current)s, "
"expected status should be %(expected)s")
self.fail(error % {"id": id,
"current": body['instantiationState'],
"expected": instantiation_state})
time.sleep(5)
def _instantiate_vnf_instance(self, id, request_body, http_client=None):
if http_client is None:
http_client = self.http_client
url = os.path.join(self.base_vnf_instances_url, id, "instantiate")
resp, body = http_client.do_request(url, "POST",
body=jsonutils.dumps(request_body))
return resp, body
def _heal_vnf_instance(self, vnf_instance_id, request_body,
http_client=None):
if http_client is None:
http_client = self.http_client
url = os.path.join(
self.base_vnf_instances_url,
vnf_instance_id,
"heal")
resp, body = http_client.do_request(url, "POST",
body=jsonutils.dumps(request_body))
return resp, body
def _scale_vnf_instance(self, vnf_instance_id, request_body,
http_client=None):
if http_client is None:
http_client = self.http_client
url = os.path.join(
self.base_vnf_instances_url,
vnf_instance_id,
"scale")
resp, body = http_client.do_request(url, "POST",
body=jsonutils.dumps(request_body))
return resp, body
def _terminate_vnf_instance(self, id, request_body, http_client=None):
if http_client is None:
http_client = self.http_client
url = os.path.join(self.base_vnf_instances_url, id, "terminate")
resp, body = http_client.do_request(url, "POST",
body=jsonutils.dumps(request_body))
return resp, body
def _update_vnf_instance(self, vnf_instance_id, request_body,
http_client=None):
if http_client is None:
http_client = self.http_client
url = os.path.join(self.base_vnf_instances_url, vnf_instance_id)
resp, body = http_client.do_request(url, "PATCH",
body=jsonutils.dumps(request_body))
return resp, body
def _change_ext_conn_vnf_instance(self, vnf_instance_id, request_body,
http_client=None):
if http_client is None:
http_client = self.http_client
url = os.path.join(
self.base_vnf_instances_url,
vnf_instance_id,
"change_ext_conn")
resp, body = http_client.do_request(url, "POST",
body=jsonutils.dumps(request_body))
return resp, body
def _rollback_op_occs(self, vnf_lcm_op_occs_id, http_client=None):
if http_client is None:
http_client = self.http_client
rollback_url = os.path.join(
self.base_vnf_lcm_op_occs_url,
vnf_lcm_op_occs_id, 'rollback')
resp, response_body = http_client.do_request(
rollback_url, "POST")
return resp, response_body
def _fail_op_occs(self, vnf_lcm_op_occs_id, http_client=None):
if http_client is None:
http_client = self.http_client
fail_url = os.path.join(
self.base_vnf_lcm_op_occs_url,
vnf_lcm_op_occs_id, 'fail')
resp, response_body = http_client.do_request(
fail_url, "POST")
return resp, response_body
def _retry_op_occs(self, vnf_lcm_op_occs_id, http_client=None):
if http_client is None:
http_client = self.http_client
retry_url = os.path.join(
self.base_vnf_lcm_op_occs_url,
vnf_lcm_op_occs_id, 'retry')
resp, response_body = http_client.do_request(
retry_url, "POST")
return resp, response_body
def _show_op_occs(self, vnf_lcm_op_occs_id, http_client=None):
if http_client is None:
http_client = self.http_client
show_url = os.path.join(
self.base_vnf_lcm_op_occs_url,
vnf_lcm_op_occs_id)
resp, response_body = http_client.do_request(
show_url, "GET")
return resp, response_body
def _wait_terminate_vnf_instance(self, id, timeout=None, http_client=None):
if http_client is None:
http_client = self.http_client
start_time = int(time.time())
self._wait_vnf_instance(id,
http_client,
instantiation_state=fields.VnfInstanceState.NOT_INSTANTIATED,
timeout=timeout)
# If gracefulTerminationTimeout is set, check whether vnf
# instantiation_state is set to NOT_INSTANTIATED after
# gracefulTerminationTimeout seconds.
if timeout and int(time.time()) - start_time < timeout:
self.fail("Vnf is terminated before graceful termination"
"timeout period")
else:
return
# wait for status completion
time.sleep(VNF_DELETE_COMPLETION_WAIT)
def _get_heat_stack(self, vnf_instance_id, h_client=None,
prefix_id='vnflcm_'):
if h_client is None:
h_client = self.h_client
try:
stacks = h_client.stacks.list()
except Exception:
return None
target_stack_name = prefix_id + vnf_instance_id
target_stakcs = list(
filter(
lambda x: x.stack_name == target_stack_name,
stacks))
if len(target_stakcs) == 0:
return None
return target_stakcs[0]
def _delete_heat_stack(self, stack_id, h_client=None):
if h_client is None:
h_client = self.h_client
h_client.stacks.delete(stack_id)
def _wait_until_stack_ready(self, stack_id, expected_status,
h_client=None):
if h_client is None:
h_client = self.h_client
start_time = time.time()
callback_url = os.path.join(
MOCK_NOTIFY_CALLBACK_URL,
self._testMethodName)
while True:
stack = h_client.stacks.get(stack_id)
actual_status = stack.stack_status
print(
("Wait:callback_url=<%s>, " +
"wait_status=<%s> ") %
(callback_url, actual_status),
flush=True)
if actual_status == expected_status:
return None
if time.time() - start_time > VNF_LCM_DONE_TIMEOUT:
if actual_status:
error = (
"LCM incomplete timeout, " +
" stack %(stack_id)s" +
" is %(actual)s," +
"expected status should be %(expected)s")
self.fail(
error % {
"stack_id": stack_id,
"expected": expected_status,
"actual": actual_status})
else:
self.fail("LCM incomplete timeout")
time.sleep(RETRY_WAIT_TIME)
def _get_heat_resource_list(self, stack_id, nested_depth=0,
h_client=None):
if h_client is None:
h_client = self.h_client
try:
resources = h_client.resources.list(
stack_id, nested_depth=nested_depth)
except Exception:
return None
return resources
def _get_heat_resource(self, stack_id, resource_name, h_client=None):
if h_client is None:
h_client = self.h_client
try:
resource = h_client.resources.get(
stack_id, resource_name)
except Exception:
return None
return resource
def _get_heat_stack_template(self, stack_id, nested_depth=0,
h_client=None):
if h_client is None:
h_client = self.h_client
try:
template = h_client.stacks.template(stack_id)
except Exception:
return None
return template
def _get_image_id_from_resource_attributes(self, stack_resource_details):
if stack_resource_details is None:
return None
if not hasattr(stack_resource_details, 'attributes'):
return None
return stack_resource_details.attributes.get('image', {}).get('id')
def _get_vnfc_instance_id_list(
self,
stack_id,
resource_type='OS::Nova::Server',
nested_depth=2,
limit=2,
h_client=None):
if h_client is None:
h_client = self.h_client
resources = self._get_heat_resource_list(
stack_id, nested_depth=nested_depth, h_client=h_client)
if resources is None:
return None
return [r.physical_resource_id for r in resources[:limit]
if r.resource_type == resource_type]
def assert_http_header_location_for_create(self, response_header):
"""Validate URI in location header for CreateVNF
{apiRoot}/vnflcm/v1/vnf_instances/{vnfInstanceId}/instantiate
"""
location = response_header.get(
"Location") or response_header.get("location")
self.assertIsNotNone(location)
uri = urlparse(location)
self.assertIn(uri.scheme, ['http', 'https'])
self.assertRegex(
uri.path,
r'^/(?P<apiRoot>[^/]*?/)?vnflcm/v1/vnf_instances/' +
UUID_RE)
def assert_http_header_location_for_lcm_op_occs(self, response_header):
"""Validate URI in location header for various LCMs
{apiRoot}/vnflcm/v1/vnf_lcm_op_occs/{vnfLcmOpOccId}
"""
location = response_header.get(
"Location") or response_header.get("location")
self.assertIsNotNone(location)
uri = urlparse(location)
self.assertIn(uri.scheme, ['http', 'https'])
self.assertRegex(
uri.path,
r'^/(?P<apiRoot>[^/]*?/)?vnflcm/v1/vnf_lcm_op_occs/' +
UUID_RE)
def assert_http_header_location_for_subscription(self, response_header):
"""Validate URI in location header for Subscription
{apiRoot}/vnflcm/v1/subscriptions/{subscriptionId}
"""
location = response_header.get(
"Location") or response_header.get("location")
self.assertIsNotNone(location)
uri = urlparse(location)
self.assertIn(uri.scheme, ['http', 'https'])
self.assertRegex(
uri.path,
r'^/(?P<apiRoot>[^/]*?/)?vnflcm/v1/subscriptions/' +
UUID_RE)
def assert_instantiation_state(
self,
vnf_instance_body,
expected_instantiation_state=fields.VnfInstanceState.INSTANTIATED):
# FT-checkpoint: Instantiation state(VNF instance)
self.assertEqual(
expected_instantiation_state,
vnf_instance_body['instantiationState'])
def assert_vnf_state(
self,
vnf_instance_body,
expected_vnf_state=fields.VnfOperationalStateType.STARTED):
# FT-checkpoint: vnf_state
self.assertEqual(
expected_vnf_state,
vnf_instance_body['instantiatedVnfInfo']['vnfState'])
def assert_heat_stack_status(
self,
vnf_instance_id,
h_client=None,
expected_stack_status=infra_cnst.STACK_CREATE_COMPLETE):
if h_client is None:
h_client = self.h_client
stack = self._get_heat_stack(vnf_instance_id, h_client)
self.assertEqual(
expected_stack_status,
stack.stack_status)
def assert_heat_resource_status(
self,
vnf_instance,
h_client=None,
expected_glance_image=None,
expected_resource_status=None):
if h_client is None:
h_client = self.h_client
def assert_glance_image(stack_id, resource_name, h_client):
resource_details = self._get_heat_resource(stack_id,
resource_name,
h_client)
image = self._get_image_id_from_resource_attributes(
resource_details)
if image:
self.assertEqual(expected_glance_image, image.status)
stack = self._get_heat_stack(vnf_instance['id'], h_client)
resources = self._get_heat_resource_list(stack.id,
nested_depth=2, h_client=h_client)
self.assertIsNotNone(resources)
for resource in resources:
# FT-checkpoint: resource status
self.assertEqual(expected_resource_status,
resource.resource_status)
# FT-checkpoint: Glance-image
if expected_glance_image:
assert_glance_image(stack.id, resource.resource_name,
h_client)
def assert_heat_resource_status_is_none(
self,
stack_id,
h_client=None,
glance_client=None,
resources_name_list=None,
glance_image_id_list=None):
if h_client is None:
h_client = self.h_client
if glance_client is None:
glance_client = self.glance_client
resources_name_list = resources_name_list or []
for resource_name in resources_name_list:
resource = self._get_heat_resource(stack_id,
resource_name,
h_client)
self.assertIsNone(resource)
glance_image_id_list = glance_image_id_list or []
for glance_image_id in glance_image_id_list:
image = self._get_glance_image(glance_image_id,
glance_client)
self.assertIsNone(image)
def _wait_lcm_done(self,
expected_operation_status=None,
vnf_instance_id=None,
fake_server_manager=None):
if fake_server_manager is None:
fake_server_manager = FAKE_SERVER_MANAGER
start_time = int(time.time())
callback_url = os.path.join(
MOCK_NOTIFY_CALLBACK_URL,
self._testMethodName)
while True:
actual_status = None
vnf_lcm_op_occ_id = None
notify_mock_responses = fake_server_manager.get_history(
callback_url)
print(
("Wait:callback_url=<%s>, " +
"wait_status=<%s>, " +
"vnf_instance_id=<%s>") %
(callback_url, expected_operation_status, vnf_instance_id),
flush=True)
for res in notify_mock_responses:
if vnf_instance_id != res.request_body.get('vnfInstanceId'):
continue
if expected_operation_status is None:
return
actual_status = res.request_body.get('operationState', '')
vnf_lcm_op_occ_id = res.request_body.get('vnfLcmOpOccId', '')
if actual_status == expected_operation_status:
return
if ((int(time.time()) - start_time) > VNF_LCM_DONE_TIMEOUT):
if actual_status:
error = (
"LCM incomplete timeout, %(vnf_lcm_op_occ_id)s" +
" is %(actual)s," +
"expected status should be %(expected)s")
self.fail(
error % {
"vnf_lcm_op_occ_id": vnf_lcm_op_occ_id,
"expected": expected_operation_status,
"actual": actual_status})
else:
self.fail("LCM incomplete timeout")
time.sleep(RETRY_WAIT_TIME)
def _wait_stack_update(self, vnf_instance_id, expected_status,
h_client=None):
if h_client is None:
h_client = self.h_client
timeout = VNF_HEAL_TIMEOUT
start_time = int(time.time())
while True:
stack = self._get_heat_stack(vnf_instance_id, h_client)
if stack.stack_status == expected_status:
break
if ((int(time.time()) - start_time) > timeout):
error = ("Stack %(id)s status is %(current)s, expected status "
"should be %(expected)s")
self.fail(error % {"vnf_instance_name": vnf_instance_id,
"current": stack.status,
"expected": expected_status})
time.sleep(RETRY_WAIT_TIME)
def assert_create_vnf(self, resp, vnf_instance,
fake_server_manager=None):
if fake_server_manager is None:
fake_server_manager = FAKE_SERVER_MANAGER
self.assertEqual(201, resp.status_code)
self.assert_http_header_location_for_create(resp.headers)
self.assert_instantiation_state(
vnf_instance,
fields.VnfInstanceState.NOT_INSTANTIATED)
# FT-checkpoint: Notification
callback_url = os.path.join(
MOCK_NOTIFY_CALLBACK_URL,
self._testMethodName)
notify_mock_responses = self._filter_notify_history(callback_url,
vnf_instance.get('id'),
fake_server_manager=fake_server_manager)
self.assertEqual(1, len(notify_mock_responses))
self.assert_notification_mock_response(
notify_mock_responses[0],
'VnfIdentifierCreationNotification')
def assert_delete_vnf(self, resp, vnf_instance_id, http_client=None,
fake_server_manager=None):
self.assertEqual(204, resp.status_code)
if http_client is None:
http_client = self.http_client
if fake_server_manager is None:
fake_server_manager = FAKE_SERVER_MANAGER
resp, _ = self._show_vnf_instance(vnf_instance_id, http_client)
self.assertEqual(404, resp.status_code)
# FT-checkpoint: Notification
callback_url = os.path.join(
MOCK_NOTIFY_CALLBACK_URL,
self._testMethodName)
notify_mock_responses = self._filter_notify_history(callback_url,
vnf_instance_id, fake_server_manager=fake_server_manager)
self.assertEqual(1, len(notify_mock_responses))
self.assert_notification_mock_response(
notify_mock_responses[0],
'VnfIdentifierDeletionNotification')
def assert_instantiate_vnf(
self,
resp,
vnf_instance_id,
http_client=None,
h_client=None,
fake_server_manager=None):
if http_client is None:
http_client = self.http_client
if h_client is None:
h_client = self.h_client
if fake_server_manager is None:
fake_server_manager = FAKE_SERVER_MANAGER
self.assertEqual(202, resp.status_code)
resp, vnf_instance = self._show_vnf_instance(vnf_instance_id,
http_client)
self.assert_vnf_state(vnf_instance)
self.assert_heat_stack_status(vnf_instance['id'], h_client)
self.assert_heat_resource_status(
vnf_instance,
h_client,
expected_glance_image='active',
expected_resource_status='CREATE_COMPLETE')
# FT-checkpoint: Notification
callback_url = os.path.join(
MOCK_NOTIFY_CALLBACK_URL,
self._testMethodName)
notify_mock_responses = self._filter_notify_history(callback_url,
vnf_instance_id, fake_server_manager=fake_server_manager)
self.assertEqual(3, len(notify_mock_responses))
self.assert_notification_mock_response(
notify_mock_responses[0],
'VnfLcmOperationOccurrenceNotification',
'STARTING')
self.assert_notification_mock_response(
notify_mock_responses[1],
'VnfLcmOperationOccurrenceNotification',
'PROCESSING')
self.assert_notification_mock_response(
notify_mock_responses[2],
'VnfLcmOperationOccurrenceNotification',
'COMPLETED')
def assert_heal_vnf(
self,
resp,
vnf_instance_id,
expected_stack_status='UPDATE_COMPLETE',
http_client=None,
h_client=None,
fake_server_manager=None):
if http_client is None:
http_client = self.http_client
if h_client is None:
h_client = self.h_client
if fake_server_manager is None:
fake_server_manager = FAKE_SERVER_MANAGER
self.assertEqual(202, resp.status_code)
resp, vnf_instance = self._show_vnf_instance(vnf_instance_id,
http_client)
self.assert_vnf_state(vnf_instance)
self.assert_instantiation_state(vnf_instance)
self.assert_heat_stack_status(
vnf_instance['id'],
h_client,
expected_stack_status=expected_stack_status)
# FT-checkpoint: Notification
callback_url = os.path.join(
MOCK_NOTIFY_CALLBACK_URL,
self._testMethodName)
notify_mock_responses = self._filter_notify_history(callback_url,
vnf_instance_id, fake_server_manager=fake_server_manager)
self.assertEqual(3, len(notify_mock_responses))
self.assert_notification_mock_response(
notify_mock_responses[0],
'VnfLcmOperationOccurrenceNotification',
'STARTING')
self.assert_notification_mock_response(
notify_mock_responses[1],
'VnfLcmOperationOccurrenceNotification',
'PROCESSING')
self.assert_notification_mock_response(
notify_mock_responses[2],
'VnfLcmOperationOccurrenceNotification',
'COMPLETED')
def assert_terminate_vnf(
self,
resp,
vnf_instance_id,
stack_id,
resource_name_list,
glance_image_id_list,
http_client=None,
h_client=None,
glance_client=None,
fake_server_manager=None):
if http_client is None:
http_client = self.http_client
if h_client is None:
h_client = self.h_client
if glance_client is None:
glance_client = self.glance_client
if fake_server_manager is None:
fake_server_manager = FAKE_SERVER_MANAGER
self.assertEqual(202, resp.status_code)
resp, vnf_instance = self._show_vnf_instance(vnf_instance_id,
http_client)
self.assert_instantiation_state(
vnf_instance,
fields.VnfInstanceState.NOT_INSTANTIATED)
# FT-checkpoint: Heat stack status.
stack = self._get_heat_stack(vnf_instance_id, h_client)
self.assertIsNone(stack)
self.assert_heat_resource_status_is_none(
stack_id,
h_client,
glance_client,
resources_name_list=resource_name_list,
glance_image_id_list=glance_image_id_list)
# FT-checkpoint: Notification
callback_url = os.path.join(
MOCK_NOTIFY_CALLBACK_URL,
self._testMethodName)
notify_mock_responses = self._filter_notify_history(callback_url,
vnf_instance_id, fake_server_manager=fake_server_manager)
self.assertEqual(3, len(notify_mock_responses))
self.assert_notification_mock_response(
notify_mock_responses[0],
'VnfLcmOperationOccurrenceNotification',
'STARTING')
self.assert_notification_mock_response(
notify_mock_responses[1],
'VnfLcmOperationOccurrenceNotification',
'PROCESSING')
self.assert_notification_mock_response(
notify_mock_responses[2],
'VnfLcmOperationOccurrenceNotification',
'COMPLETED')
def assert_scale_vnf(
self,
resp,
vnf_instance_id,
pre_stack_resource_list,
post_stack_resource_list,
scale_type='SCALE_OUT',
expected_stack_status='CREATE_COMPLETE',
http_client=None,
h_client=None,
fake_server_manager=None):
if http_client is None:
http_client = self.http_client
if h_client is None:
h_client = self.h_client
if fake_server_manager is None:
fake_server_manager = FAKE_SERVER_MANAGER
self.assertEqual(202, resp.status_code)
self.assert_http_header_location_for_lcm_op_occs(resp.headers)
resp, vnf_instance = self._show_vnf_instance(vnf_instance_id,
http_client)
self.assert_vnf_state(vnf_instance)
self.assert_instantiation_state(vnf_instance)
# check: scaling stack resource count
if scale_type == 'SCALE_OUT':
self.assertTrue(len(pre_stack_resource_list) <
len(post_stack_resource_list))
else:
self.assertTrue(len(pre_stack_resource_list) >
len(post_stack_resource_list))
# check scaleStatus
scale_status = vnf_instance['instantiatedVnfInfo']['scaleStatus']
self.assertTrue(len(scale_status) > 0)
for status in scale_status:
self.assertIsNotNone(status.get('aspectId'))
self.assertIsNotNone(status.get('scaleLevel'))
self.assert_heat_stack_status(
vnf_instance['id'],
h_client,
expected_stack_status=expected_stack_status)
# FT-checkpoint: Notification
callback_url = os.path.join(
MOCK_NOTIFY_CALLBACK_URL,
self._testMethodName)
notify_mock_responses = self._filter_notify_history(callback_url,
vnf_instance_id, fake_server_manager=fake_server_manager)
self.assertEqual(3, len(notify_mock_responses))
self.assert_notification_mock_response(
notify_mock_responses[0],
'VnfLcmOperationOccurrenceNotification',
'STARTING')
self.assert_notification_mock_response(
notify_mock_responses[1],
'VnfLcmOperationOccurrenceNotification',
'PROCESSING')
self.assert_notification_mock_response(
notify_mock_responses[2],
'VnfLcmOperationOccurrenceNotification',
'COMPLETED')
def assert_rollback_vnf(self, resp, vnf_instance_id,
fake_server_manager=None):
self.assertEqual(202, resp.status_code)
if fake_server_manager is None:
fake_server_manager = FAKE_SERVER_MANAGER
# FT-checkpoint: Notification
callback_url = os.path.join(
MOCK_NOTIFY_CALLBACK_URL,
self._testMethodName)
notify_mock_responses = self._filter_notify_history(callback_url,
vnf_instance_id, fake_server_manager=fake_server_manager)
self.assertEqual(2, len(notify_mock_responses))
self.assert_notification_mock_response(
notify_mock_responses[0],
'VnfLcmOperationOccurrenceNotification',
'ROLLING_BACK')
self.assert_notification_mock_response(
notify_mock_responses[1],
'VnfLcmOperationOccurrenceNotification',
'ROLLED_BACK')
def assert_fail_vnf(self, resp, vnf_instance_id,
fake_server_manager=None):
self.assertEqual(200, resp.status_code)
if fake_server_manager is None:
fake_server_manager = FAKE_SERVER_MANAGER
# FT-checkpoint: Notification
callback_url = os.path.join(
MOCK_NOTIFY_CALLBACK_URL,
self._testMethodName)
notify_mock_responses = self._filter_notify_history(callback_url,
vnf_instance_id, fake_server_manager=fake_server_manager)
self.assertEqual(1, len(notify_mock_responses))
self.assert_notification_mock_response(
notify_mock_responses[0],
'VnfLcmOperationOccurrenceNotification',
'FAILED')
def assert_retry_vnf(self, resp, vnf_instance_id,
fake_server_manager=None):
self.assertEqual(202, resp.status_code)
if fake_server_manager is None:
fake_server_manager = FAKE_SERVER_MANAGER
# FT-checkpoint: Notification
callback_url = os.path.join(
MOCK_NOTIFY_CALLBACK_URL,
self._testMethodName)
notify_mock_responses = self._filter_notify_history(callback_url,
vnf_instance_id, fake_server_manager=fake_server_manager)
self.assertEqual(2, len(notify_mock_responses))
self.assert_notification_mock_response(
notify_mock_responses[0],
'VnfLcmOperationOccurrenceNotification',
'PROCESSING')
self.assert_notification_mock_response(
notify_mock_responses[1],
'VnfLcmOperationOccurrenceNotification',
'FAILED_TEMP')
def assert_update_vnf(
self,
resp,
vnf_instance_id,
expected_stack_status='CREATE_COMPLETE',
http_client=None,
h_client=None,
fake_server_manager=None):
if http_client is None:
http_client = self.http_client
if h_client is None:
h_client = self.h_client
if fake_server_manager is None:
fake_server_manager = FAKE_SERVER_MANAGER
self.assertEqual(202, resp.status_code)
self.assert_http_header_location_for_lcm_op_occs(resp.headers)
resp, vnf_instance = self._show_vnf_instance(vnf_instance_id,
http_client)
self.assertEqual(200, resp.status_code)
self.assert_vnf_state(vnf_instance)
self.assert_instantiation_state(vnf_instance)
self.assert_heat_stack_status(
vnf_instance['id'],
h_client,
expected_stack_status=expected_stack_status)
# FT-checkpoint: Notification
callback_url = os.path.join(
MOCK_NOTIFY_CALLBACK_URL,
self._testMethodName)
notify_mock_responses = self._filter_notify_history(callback_url,
vnf_instance_id, fake_server_manager=fake_server_manager)
self.assertEqual(2, len(notify_mock_responses))
self.assert_notification_mock_response(
notify_mock_responses[0],
'VnfLcmOperationOccurrenceNotification',
'PROCESSING')
self.assert_notification_mock_response(
notify_mock_responses[1],
'VnfLcmOperationOccurrenceNotification',
'COMPLETED')
def assert_notification_get(self, callback_url,
fake_server_manager=None):
if fake_server_manager is None:
fake_server_manager = FAKE_SERVER_MANAGER
notify_mock_responses = fake_server_manager.get_history(
callback_url)
fake_server_manager.clear_history(
callback_url)
self.assertEqual(1, len(notify_mock_responses))
self.assertEqual(204, notify_mock_responses[0].status_code)
def assert_notification_mock_response(
self,
notify_mock_response,
expected_notify_types,
expected_operation_status=None):
self.assertEqual(204, notify_mock_response.status_code)
self.assertEqual(
expected_notify_types,
notify_mock_response.request_body['notificationType'])
if expected_operation_status:
self.assertEqual(
expected_operation_status,
notify_mock_response.request_body['operationState'])
def _create_network(self, name, neutron_client=None):
# First, we have to check network name passed by caller is
# already exists or not.
# OK, we can create this.
if neutron_client is None:
neutron_client = self.neutronclient()
try:
uniq_name = name + '-' + uuidutils.generate_uuid()
net = \
self.neutronclient().create_network(
{'network': {'name': uniq_name}})
net_id = net['network']['id']
self.addCleanup(self._delete_network, net_id)
print("Create network success, %s" % uniq_name, flush=True)
return net_id, uniq_name
except Exception as e:
self.fail("Failed, create network=<%s>, %s" %
(uniq_name, e))
def _create_subnet(self, network, cidr, gateway, neutron_client=None):
if neutron_client is None:
neutron_client = self.neutronclient()
body = {'subnet': {'network_id': network['id'],
'name': "subnet-%s" % uuidutils.generate_uuid(),
'cidr': "{}".format(cidr),
'ip_version': 4,
'gateway_ip': "{}".format(gateway),
"enable_dhcp": True}}
try:
subnet = neutron_client.create_subnet(body=body)["subnet"]
self.addCleanup(self._delete_subnet, subnet['id'], neutron_client)
print("Create subnet success, %s" % subnet['id'], flush=True)
return subnet['id']
except Exception as e:
self.fail("Failed, create subnet for net_id=<%s>, %s" %
(network['id'], e))
def _create_port(self, network_id, neutron_client=None):
if neutron_client is None:
neutron_client = self.neutronclient()
body = {'port': {'network_id': network_id}}
try:
port = neutron_client.create_port(body=body)["port"]
self.addCleanup(self._delete_port, port['id'], neutron_client)
return port['id']
except Exception as e:
self.fail("Failed, create port for net_id=<%s>, %s" %
(network_id, e))
def _delete_network(self, network_id, neutron_client=None):
if neutron_client is None:
neutron_client = self.neutronclient()
try:
neutron_client.delete_network(network_id)
except Exception:
print("Failed, delete network.", network_id, flush=True)
def _delete_subnet(self, subnet_id, neutron_client=None):
if neutron_client is None:
neutron_client = self.neutronclient()
try:
neutron_client.delete_subnet(subnet_id)
except Exception:
print("Failed, delete subnet.", subnet_id, flush=True)
def _delete_port(self, port_id, neutron_client):
if neutron_client is None:
neutron_client = self.neutronclient()
try:
neutron_client.delete_port(port_id)
except Exception:
print("Failed, delete port.", port_id, flush=True)
def assert_subscription_show(self, resp, response_body):
"""Assert that subscription informations has mandatory keys."""
self.assertEqual(200, resp.status_code)
self.assertIsNotNone(response_body.get('id'))
_filter = response_body.get('filter')
self.assertIsNotNone(_filter)
self.assertIsNotNone(_filter.get('notificationTypes'))
self.assertIsNotNone(_filter.get('operationTypes'))
self.assertIsNotNone(response_body.get('callbackUri'))
_links = response_body.get('_links')
self.assertIsNotNone(_links)
self.assertIsNotNone(_links.get('self'))
self.assertIsNotNone(_links.get('self').get('href'))
def _filter_notify_history(self, callback_url, vnf_instance_id,
fake_server_manager=None):
if fake_server_manager is None:
fake_server_manager = FAKE_SERVER_MANAGER
notify_histories = fake_server_manager.get_history(
callback_url)
fake_server_manager.clear_history(callback_url)
return [
h for h in notify_histories
if h.request_body.get('vnfInstanceId') == vnf_instance_id]
def _get_heat_stack_show(self, vnf_instance_id, resource_name=None,
h_client=None):
"""Retrieve image name of the resource from stack"""
if h_client is None:
h_client = self.h_client
try:
stack = self._get_heat_stack(vnf_instance_id, h_client)
stack_info = h_client.stacks.get(stack.id)
stack_dict = stack_info.to_dict()
resource_dict = json.loads(stack_dict['parameters']['nfv'])
except Exception:
return None
if resource_name is None:
return resource_dict
return resource_dict['VDU'][resource_name]['image']
|
openstack/tacker
|
tacker/tests/functional/sol/vnflcm/base.py
|
Python
|
apache-2.0
| 55,167
|
# Create your views here.
import datetime
from django.shortcuts import (render_to_response, HttpResponseRedirect, RequestContext, Http404)
from django.contrib.auth.decorators import login_required
import stripe
from profiles.models import Profile
from profiles.forms import AddressForm
from products.models import Product
from orders.models import Order, ShippingStatus
from .models import Cart, CartItem
from .forms import ProductQtyForm
from orders.customs import id_generator
stripe.api_key = 'sk_test_VlexvRxhTsrRNNM01vDSuFzI'
def add_to_cart(request):
request.session.set_expiry(0)
try:
cart_id = request.session['cart_id']
except Exception:
cart = Cart()
cart.save()
request.session['cart_id'] = cart.id
cart_id = cart.id
if request.method == "POST":
form = ProductQtyForm(request.POST)
if form.is_valid():
product_slug = form.cleaned_data['slug']
product_quantity = form.cleaned_data['quantity']
try:
product = Product.objects.get(slug=product_slug)
except Exception:
product = None
try:
cart = Cart.objects.get(id=cart_id)
except Exception:
cart = None
new_cart, created = CartItem.objects.get_or_create(
cart=cart,
product=product
)
if product_quantity > 0:
new_cart.quantity = product_quantity
new_cart.total = int(new_cart.quantity) * new_cart.product.price
new_cart.save()
else:
pass
if created:
print 'created!'
return HttpResponseRedirect('/cart/')
return HttpResponseRedirect('/contact/')
else:
raise Http404
def add_stripe(user):
profile, created = Profile.objects.get_or_create(user=user)
if len(profile.stripe_id) > 2:
print "Exists"
pass
else:
new_customer = stripe.Customer.create(
email=user.email,
description="Added to Stripe on %s" % (datetime.datetime.now())
)
profile.stripe_id = new_customer.id
profile.save()
return profile.stripe_id
def view(request):
try:
cart_id = request.session['cart_id']
cart = Cart.objects.get(id=cart_id)
request.session['cart_items'] = len(cart.cartitem_set.all())
except:
cart = False
if not cart or not cart.active:
message = 'Your cart is empty!'
if cart and cart.active:
cart = cart
cart.total = 0
for item in cart.cartitem_set.all():
cart.total += item.total
cart.save()
try:
stripe_id = add_stripe(request.user)
except:
pass
return render_to_response('cart/view.html', locals(), context_instance=RequestContext(request))
@login_required
def checkout(request):
try:
cart_id = request.session['cart_id']
cart = Cart.objects.get(id=cart_id)
except:
cart = False
return HttpResponseRedirect('/cart')
amount = int(cart.total * 100)
try:
stripe_id = add_stripe(request.user)
except:
pass
new_number = id_generator()
new_order, created = Order.objects.get_or_create(cart=cart, user=request.user)
if created:
new_order.order_id = str(new_number[:2]) + str(new_order.cart.id) + str(new_number[3:])
new_order.status = "Started"
new_order.save()
address_form = AddressForm(request.POST or None)
if request.method == "POST":
address_form = AddressForm(request.POST)
token = request.POST['stripeToken']
profile = request.user.get_profile()
customer = stripe.Customer.retrieve(stripe_id)
new_card = customer.cards.create(card=token)
if address_form.is_valid():
form = address_form.save(commit=False)
print form
if address_form.cleaned_data['save_card']:
new_card.address_line1 = form.address1
if len(form.address2) > 1:
new_card.address_line2 = form.address2
new_card.address_city = form.city
new_card.address_zip = form.postal_code
new_card.address_country = form.country
new_card.save()
try:
form.user = request.user
form.save()
print "form saved!"
except:
pass
else:
print 'did not save'
charge = stripe.Charge.create(
amount=amount,
currency='usd',
customer=customer.id,
description='Payment for order %s' % (new_order.order_id)
)
if charge:
print 'charged'
new_order.status = 'Collected'
new_order.cc_four = new_card.last4
new_order.address = form
new_order.save()
add_shipping = ShippingStatus(order=new_order)
add_shipping.save()
cart.user = request.user
cart.active = False
cart.save()
del request.session['cart_id']
del request.session['cart_items']
return HttpResponseRedirect('/orders/')
return render_to_response('cart/checkout.html', locals(), context_instance=RequestContext(request))
|
ferdyrod/basic-ecommerce
|
cart/views.py
|
Python
|
apache-2.0
| 5,568
|
import sys
import os
# To run this on your local machine, you need to setup kafka and create a producer first:
# http://kafka.apache.org/documentation.html#quickstart
# Path for spark source folder
os.environ['SPARK_HOME'] = "/path/to/spark"
# Append pyspark to Python Path
sys.path.append("/path/to/spark/python")
try:
from pyspark import SparkContext, SparkConf
from pyspark.mllib.tree import RandomForest, RandomForestModel
from pyspark.mllib.util import MLUtils
from pyspark.mllib.regression import LabeledPoint
from pyspark.mllib.linalg import Vectors
print ("Successfully imported Spark Modules")
except ImportError as e:
print ("Can not import Spark Modules", e)
sys.exit(1)
import functools
import itertools
from kafka import KafkaConsumer
def parseData(line):
splittedLine = line.split(",")
values = [float(s) for s in splittedLine[4:-1]]
label = splittedLine[-1]
featuresVector = Vectors.dense(values)
return LabeledPoint(label, featuresVector)
if __name__ == "__main__":
conf = SparkConf().setAppName("RandomForest_Anomaly_Detection_Kafka_Consumer")
sc = SparkContext(conf=conf)
savedModel = RandomForestModel.load(sc, "../train_model/model")
consumer = KafkaConsumer('test', group_id='my_group', bootstrap_servers=['localhost:9092'])
print("Waiting for messages...")
for message in consumer:
print("%s:%d:%d: key=%s value=%s" % (message.topic, message.partition, message.offset, message.key, message.value))
data = sc.parallelize([message.value])
testData = data.map(parseData)
predictions = savedModel.predict(testData.map(lambda x: x.features))
print("Prediction: ")
print(predictions.first())
|
MarioPerezEsteso/Network-Anomaly-Detection-Apache-Spark-Kafka
|
src/main/python/kafka_consumer/kafkaconsumerrandomforest.py
|
Python
|
apache-2.0
| 1,737
|
# Copyright (c) 2015 Ansible, Inc.
# All Rights Reserved.
# Django
from django.conf import settings # noqa
from django.db import connection
from django.db.models.signals import pre_delete # noqa
# AWX
from awx.main.models.base import ( # noqa
BaseModel, PrimordialModel, prevent_search, accepts_json,
CLOUD_INVENTORY_SOURCES, VERBOSITY_CHOICES
)
from awx.main.models.unified_jobs import ( # noqa
UnifiedJob, UnifiedJobTemplate, StdoutMaxBytesExceeded
)
from awx.main.models.organization import ( # noqa
Organization, Profile, Team, UserSessionMembership
)
from awx.main.models.credential import ( # noqa
Credential, CredentialType, CredentialInputSource, ManagedCredentialType, build_safe_env
)
from awx.main.models.projects import Project, ProjectUpdate # noqa
from awx.main.models.inventory import ( # noqa
CustomInventoryScript, Group, Host, Inventory, InventorySource,
InventoryUpdate, SmartInventoryMembership
)
from awx.main.models.jobs import ( # noqa
Job, JobHostSummary, JobLaunchConfig, JobTemplate, SystemJob,
SystemJobTemplate,
)
from awx.main.models.events import ( # noqa
AdHocCommandEvent, InventoryUpdateEvent, JobEvent, ProjectUpdateEvent,
SystemJobEvent,
)
from awx.main.models.ad_hoc_commands import AdHocCommand # noqa
from awx.main.models.schedules import Schedule # noqa
from awx.main.models.activity_stream import ActivityStream # noqa
from awx.main.models.ha import ( # noqa
Instance, InstanceGroup, TowerScheduleState,
)
from awx.main.models.rbac import ( # noqa
Role, batch_role_ancestor_rebuilding, get_roles_on_resource,
role_summary_fields_generator, ROLE_SINGLETON_SYSTEM_ADMINISTRATOR,
ROLE_SINGLETON_SYSTEM_AUDITOR,
)
from awx.main.models.mixins import ( # noqa
CustomVirtualEnvMixin, ResourceMixin, SurveyJobMixin,
SurveyJobTemplateMixin, TaskManagerInventoryUpdateMixin,
TaskManagerJobMixin, TaskManagerProjectUpdateMixin,
TaskManagerUnifiedJobMixin,
)
from awx.main.models.notifications import ( # noqa
Notification, NotificationTemplate,
JobNotificationMixin
)
from awx.main.models.label import Label # noqa
from awx.main.models.workflow import ( # noqa
WorkflowJob, WorkflowJobNode, WorkflowJobOptions, WorkflowJobTemplate,
WorkflowJobTemplateNode, WorkflowApproval, WorkflowApprovalTemplate,
)
from awx.api.versioning import reverse
from awx.main.models.oauth import ( # noqa
OAuth2AccessToken, OAuth2Application
)
from oauth2_provider.models import Grant, RefreshToken # noqa -- needed django-oauth-toolkit model migrations
# Add custom methods to User model for permissions checks.
from django.contrib.auth.models import User # noqa
from awx.main.access import ( # noqa
get_user_queryset, check_user_access, check_user_access_with_errors,
user_accessible_objects
)
User.add_to_class('get_queryset', get_user_queryset)
User.add_to_class('can_access', check_user_access)
User.add_to_class('can_access_with_errors', check_user_access_with_errors)
User.add_to_class('accessible_objects', user_accessible_objects)
def enforce_bigint_pk_migration():
# see: https://github.com/ansible/awx/issues/6010
# look at all the event tables and verify that they have been fully migrated
# from the *old* int primary key table to the replacement bigint table
# if not, attempt to migrate them in the background
for tblname in (
'main_jobevent', 'main_inventoryupdateevent',
'main_projectupdateevent', 'main_adhoccommandevent',
'main_systemjobevent'
):
with connection.cursor() as cursor:
cursor.execute(
'SELECT 1 FROM information_schema.tables WHERE table_name=%s',
(f'_old_{tblname}',)
)
if bool(cursor.rowcount):
from awx.main.tasks import migrate_legacy_event_data
migrate_legacy_event_data.apply_async([tblname])
def cleanup_created_modified_by(sender, **kwargs):
# work around a bug in django-polymorphic that doesn't properly
# handle cascades for reverse foreign keys on the polymorphic base model
# https://github.com/django-polymorphic/django-polymorphic/issues/229
for cls in (UnifiedJobTemplate, UnifiedJob):
cls.objects.filter(created_by=kwargs['instance']).update(created_by=None)
cls.objects.filter(modified_by=kwargs['instance']).update(modified_by=None)
pre_delete.connect(cleanup_created_modified_by, sender=User)
@property
def user_get_organizations(user):
return Organization.objects.filter(member_role__members=user)
@property
def user_get_admin_of_organizations(user):
return Organization.objects.filter(admin_role__members=user)
@property
def user_get_auditor_of_organizations(user):
return Organization.objects.filter(auditor_role__members=user)
@property
def created(user):
return user.date_joined
User.add_to_class('organizations', user_get_organizations)
User.add_to_class('admin_of_organizations', user_get_admin_of_organizations)
User.add_to_class('auditor_of_organizations', user_get_auditor_of_organizations)
User.add_to_class('created', created)
@property
def user_is_system_auditor(user):
if not hasattr(user, '_is_system_auditor'):
if user.pk:
user._is_system_auditor = user.roles.filter(
singleton_name='system_auditor', role_field='system_auditor').exists()
else:
# Odd case where user is unsaved, this should never be relied on
return False
return user._is_system_auditor
@user_is_system_auditor.setter
def user_is_system_auditor(user, tf):
if not user.id:
# If the user doesn't have a primary key yet (i.e., this is the *first*
# time they've logged in, and we've just created the new User in this
# request), we need one to set up the system auditor role
user.save()
if tf:
role = Role.singleton('system_auditor')
# must check if member to not duplicate activity stream
if user not in role.members.all():
role.members.add(user)
user._is_system_auditor = True
else:
role = Role.singleton('system_auditor')
if user in role.members.all():
role.members.remove(user)
user._is_system_auditor = False
User.add_to_class('is_system_auditor', user_is_system_auditor)
def user_is_in_enterprise_category(user, category):
ret = (category,) in user.enterprise_auth.values_list('provider') and not user.has_usable_password()
# NOTE: this if-else block ensures existing enterprise users are still able to
# log in. Remove it in a future release
if category == 'radius':
ret = ret or not user.has_usable_password()
elif category == 'saml':
ret = ret or user.social_auth.all()
return ret
User.add_to_class('is_in_enterprise_category', user_is_in_enterprise_category)
def o_auth2_application_get_absolute_url(self, request=None):
return reverse('api:o_auth2_application_detail', kwargs={'pk': self.pk}, request=request)
OAuth2Application.add_to_class('get_absolute_url', o_auth2_application_get_absolute_url)
def o_auth2_token_get_absolute_url(self, request=None):
return reverse('api:o_auth2_token_detail', kwargs={'pk': self.pk}, request=request)
OAuth2AccessToken.add_to_class('get_absolute_url', o_auth2_token_get_absolute_url)
from awx.main.registrar import activity_stream_registrar # noqa
activity_stream_registrar.connect(Organization)
activity_stream_registrar.connect(Inventory)
activity_stream_registrar.connect(Host)
activity_stream_registrar.connect(Group)
activity_stream_registrar.connect(InventorySource)
#activity_stream_registrar.connect(InventoryUpdate)
activity_stream_registrar.connect(Credential)
activity_stream_registrar.connect(CredentialType)
activity_stream_registrar.connect(Team)
activity_stream_registrar.connect(Project)
#activity_stream_registrar.connect(ProjectUpdate)
activity_stream_registrar.connect(JobTemplate)
activity_stream_registrar.connect(Job)
activity_stream_registrar.connect(AdHocCommand)
# activity_stream_registrar.connect(JobHostSummary)
# activity_stream_registrar.connect(JobEvent)
# activity_stream_registrar.connect(Profile)
activity_stream_registrar.connect(Schedule)
activity_stream_registrar.connect(CustomInventoryScript)
activity_stream_registrar.connect(NotificationTemplate)
activity_stream_registrar.connect(Notification)
activity_stream_registrar.connect(Label)
activity_stream_registrar.connect(User)
activity_stream_registrar.connect(WorkflowJobTemplate)
activity_stream_registrar.connect(WorkflowJobTemplateNode)
activity_stream_registrar.connect(WorkflowJob)
activity_stream_registrar.connect(WorkflowApproval)
activity_stream_registrar.connect(WorkflowApprovalTemplate)
activity_stream_registrar.connect(OAuth2Application)
activity_stream_registrar.connect(OAuth2AccessToken)
# prevent API filtering on certain Django-supplied sensitive fields
prevent_search(User._meta.get_field('password'))
prevent_search(OAuth2AccessToken._meta.get_field('token'))
prevent_search(RefreshToken._meta.get_field('token'))
prevent_search(OAuth2Application._meta.get_field('client_secret'))
prevent_search(OAuth2Application._meta.get_field('client_id'))
prevent_search(Grant._meta.get_field('code'))
|
GoogleCloudPlatform/sap-deployment-automation
|
third_party/github.com/ansible/awx/awx/main/models/__init__.py
|
Python
|
apache-2.0
| 9,309
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v10.common.types import tag_snippet
__protobuf__ = proto.module(
package="google.ads.googleads.v10.resources",
marshal="google.ads.googleads.v10",
manifest={"RemarketingAction",},
)
class RemarketingAction(proto.Message):
r"""A remarketing action. A snippet of JavaScript code that will
collect the product id and the type of page people visited
(product page, shopping cart page, purchase page, general site
visit) on an advertiser's website.
Attributes:
resource_name (str):
Immutable. The resource name of the remarketing action.
Remarketing action resource names have the form:
``customers/{customer_id}/remarketingActions/{remarketing_action_id}``
id (int):
Output only. Id of the remarketing action.
This field is a member of `oneof`_ ``_id``.
name (str):
The name of the remarketing action.
This field is required and should not be empty
when creating new remarketing actions.
This field is a member of `oneof`_ ``_name``.
tag_snippets (Sequence[google.ads.googleads.v10.common.types.TagSnippet]):
Output only. The snippets used for tracking
remarketing actions.
"""
resource_name = proto.Field(proto.STRING, number=1,)
id = proto.Field(proto.INT64, number=5, optional=True,)
name = proto.Field(proto.STRING, number=6, optional=True,)
tag_snippets = proto.RepeatedField(
proto.MESSAGE, number=4, message=tag_snippet.TagSnippet,
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
googleads/google-ads-python
|
google/ads/googleads/v10/resources/types/remarketing_action.py
|
Python
|
apache-2.0
| 2,279
|
import asyncio
from unittest import mock
import pytest
from aiohttp import errors, web
@asyncio.coroutine
def test_simple_server(raw_test_server, test_client):
@asyncio.coroutine
def handler(request):
return web.Response(text=str(request.rel_url))
server = yield from raw_test_server(handler)
client = yield from test_client(server)
resp = yield from client.get('/path/to')
assert resp.status == 200
txt = yield from resp.text()
assert txt == '/path/to'
@asyncio.coroutine
def test_raw_server_not_http_exception(raw_test_server, test_client):
exc = RuntimeError("custom runtime error")
@asyncio.coroutine
def handler(request):
raise exc
logger = mock.Mock()
server = yield from raw_test_server(handler, logger=logger)
client = yield from test_client(server)
resp = yield from client.get('/path/to')
assert resp.status == 500
txt = yield from resp.text()
assert "<h1>500 Internal Server Error</h1>" in txt
logger.exception.assert_called_with(
"Error handling request",
exc_info=exc)
@asyncio.coroutine
def test_raw_server_handler_timeout(raw_test_server, test_client):
exc = asyncio.TimeoutError("error")
@asyncio.coroutine
def handler(request):
raise exc
logger = mock.Mock()
server = yield from raw_test_server(handler, logger=logger)
client = yield from test_client(server)
resp = yield from client.get('/path/to')
assert resp.status == 504
txt = yield from resp.text()
assert "<h1>504 Gateway Timeout</h1>" in txt
logger.debug.assert_called_with("Request handler timed out.")
@asyncio.coroutine
def test_raw_server_do_not_swallow_exceptions(raw_test_server, test_client):
exc = None
@asyncio.coroutine
def handler(request):
raise exc
logger = mock.Mock()
server = yield from raw_test_server(handler, logger=logger)
client = yield from test_client(server)
for _exc, msg in (
(asyncio.CancelledError("error"), 'Request handler cancelled.'),
(errors.ClientDisconnectedError("error"),
'Ignored premature client disconnection #1.')):
exc = _exc
with pytest.raises(errors.ClientResponseError):
yield from client.get('/path/to')
logger.debug.assert_called_with(msg)
@asyncio.coroutine
def test_raw_server_not_http_exception_debug(raw_test_server, test_client):
exc = RuntimeError("custom runtime error")
@asyncio.coroutine
def handler(request):
raise exc
logger = mock.Mock()
server = yield from raw_test_server(handler, logger=logger, debug=True)
client = yield from test_client(server)
resp = yield from client.get('/path/to')
assert resp.status == 500
txt = yield from resp.text()
assert "<h2>Traceback:</h2>" in txt
logger.exception.assert_called_with(
"Error handling request",
exc_info=exc)
def test_create_web_server_with_implicit_loop(loop):
asyncio.set_event_loop(loop)
@asyncio.coroutine
def handler(request):
return web.Response() # pragma: no cover
srv = web.Server(handler)
assert srv._loop is loop
|
z2v/aiohttp
|
tests/test_web_server.py
|
Python
|
apache-2.0
| 3,207
|
# coding=utf-8
# Copyright 2022 The Balloon Learning Environment Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for altitude_safety."""
from absl.testing import absltest
from absl.testing import parameterized
from balloon_learning_environment.env.balloon import altitude_safety
from balloon_learning_environment.env.balloon import control
from balloon_learning_environment.env.balloon import standard_atmosphere
from balloon_learning_environment.utils import units
import jax
class AltitudeSafetyTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.atmosphere = standard_atmosphere.Atmosphere(jax.random.PRNGKey(0))
very_low_altitude = (
altitude_safety.MIN_ALTITUDE - units.Distance(feet=100.0))
low_altitude = (altitude_safety.MIN_ALTITUDE + altitude_safety.BUFFER / 2.0)
low_nominal_altitude = (
altitude_safety.MIN_ALTITUDE + altitude_safety.BUFFER +
altitude_safety.RESTART_HYSTERESIS / 2.0)
nominal_altitude = (
altitude_safety.MIN_ALTITUDE + altitude_safety.BUFFER +
altitude_safety.RESTART_HYSTERESIS + units.Distance(feet=100.0))
self.very_low_altitude_pressure = self.atmosphere.at_height(
very_low_altitude).pressure
self.low_altitude_pressure = self.atmosphere.at_height(
low_altitude).pressure
self.low_nominal_altitude_pressure = self.atmosphere.at_height(
low_nominal_altitude).pressure
self.nominal_altitude_pressure = self.atmosphere.at_height(
nominal_altitude).pressure
self.pressures = {
'very_low_altitude_pressure': self.very_low_altitude_pressure,
'low_altitude_pressure': self.low_altitude_pressure,
'low_nominal_altitude_pressure': self.low_nominal_altitude_pressure,
'nominal_altitude_pressure': self.nominal_altitude_pressure
}
@parameterized.named_parameters(
dict(
testcase_name='very_low_atltitude_advises_up',
pressure='very_low_altitude_pressure',
action=control.AltitudeControlCommand.DOWN,
expected_action=control.AltitudeControlCommand.UP),
dict(
testcase_name='low_altitude_advises_stay',
pressure='low_altitude_pressure',
action=control.AltitudeControlCommand.DOWN,
expected_action=control.AltitudeControlCommand.STAY),
dict(
testcase_name='nominal_altitude_allows_action',
pressure='nominal_altitude_pressure',
action=control.AltitudeControlCommand.DOWN,
expected_action=control.AltitudeControlCommand.DOWN),
dict(
testcase_name='low_altitude_allows_up_action',
pressure='low_altitude_pressure',
action=control.AltitudeControlCommand.UP,
expected_action=control.AltitudeControlCommand.UP))
def test_safety_layer_gives_correct_action(
self, pressure: str, action: control.AltitudeControlCommand,
expected_action: control.AltitudeControlCommand):
asl = altitude_safety.AltitudeSafetyLayer()
pressure = self.pressures[pressure]
action = asl.get_action(action, self.atmosphere, pressure)
self.assertEqual(action, expected_action)
@parameterized.named_parameters(
dict(
testcase_name='very_low_altitude_is_paused',
pressure='very_low_altitude_pressure',
expected=True),
dict(
testcase_name='low_altitude_is_paused',
pressure='low_altitude_pressure',
expected=True),
dict(
testcase_name='nominal_altitude_is_not_paused',
pressure='nominal_altitude_pressure',
expected=False))
def test_navigation_is_paused_is_calculated_correctly(self, pressure: str,
expected: bool):
asl = altitude_safety.AltitudeSafetyLayer()
pressure = self.pressures[pressure]
asl.get_action(control.AltitudeControlCommand.DOWN, self.atmosphere,
pressure)
self.assertEqual(asl.navigation_is_paused, expected)
def test_increasing_altitude_below_hysteresis_does_not_resume_control(self):
asl = altitude_safety.AltitudeSafetyLayer()
# Sets state to LOW.
asl.get_action(control.AltitudeControlCommand.DOWN, self.atmosphere,
self.low_altitude_pressure)
asl.get_action(control.AltitudeControlCommand.DOWN, self.atmosphere,
self.low_nominal_altitude_pressure)
self.assertTrue(asl.navigation_is_paused)
def test_increasing_altitude_above_hysteresis_resumes_control(self):
asl = altitude_safety.AltitudeSafetyLayer()
# Sets state to LOW.
asl.get_action(control.AltitudeControlCommand.DOWN, self.atmosphere,
self.low_altitude_pressure)
asl.get_action(control.AltitudeControlCommand.DOWN, self.atmosphere,
self.nominal_altitude_pressure)
self.assertFalse(asl.navigation_is_paused)
if __name__ == '__main__':
absltest.main()
|
google/balloon-learning-environment
|
balloon_learning_environment/env/balloon/altitude_safety_test.py
|
Python
|
apache-2.0
| 5,465
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
"""
TensorLayer provides rich layer implementations trailed for
various benchmarks and domain-specific problems. In addition, we also
support transparent access to native TensorFlow parameters.
For example, we provide not only layers for local response normalization, but also
layers that allow user to apply ``tf.nn.lrn`` on ``network.outputs``.
More functions can be found in `TensorFlow API <https://www.tensorflow.org/versions/master/api_docs/index.html>`__.
"""
from .hyperdash import *
|
zsdonghao/tensorlayer
|
tensorlayer/logging/contrib/__init__.py
|
Python
|
apache-2.0
| 536
|
# coding=utf-8
import pytz
import datetime
"""
Copyright 2015 Samuel Góngora García
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__author__ = 's.gongoragarcia@gmail.com'
def localize_datetime_utc(date_time):
"""
Localizes in the UTC timezone a given Datetime object.
:param date_time: The object to be localized.
:return: Localized Datetime object in the UTC timezone.
"""
return pytz.utc.localize(date_time)
def get_now_utc(no_microseconds=True):
"""
This method returns now's datetime object UTC localized.
:param no_microseconds: sets whether microseconds should be cleared.
:return: the just created datetime object with today's date.
"""
if no_microseconds:
return pytz.utc.localize(datetime.datetime.utcnow()).replace(
microsecond=0
)
else:
return pytz.utc.localize(datetime.datetime.utcnow())
def get_now_hour_utc(no_microseconds=True):
"""
This method returns now's hour in the UTC timezone.
:param no_microseconds: sets whether microseconds should be cleared.
:return: The time object within the UTC timezone.
"""
if no_microseconds:
return datetime.time.utcnow().replace(microsecond=0).time()
else:
return datetime.time.utcnow().time()
def get_today_utc():
"""
This method returns today's date localized with the microseconds set to
zero.
:return: the just created datetime object with today's date.
"""
return pytz.utc.localize(datetime.datetime.utcnow()).replace(
hour=0, minute=0, second=0, microsecond=0
)
def get_next_midnight():
"""
This method returns today's datetime 00am.
:return: the just created datetime object with today's datetime 00am.
"""
return pytz.utc.localize(datetime.datetime.today()).replace(
hour=0, minute=0, second=0, microsecond=0
) + datetime.timedelta(days=1)
def localize_date_utc(date):
"""
Localizes in the UTC timezone the given date object.
:param date: The date object to be localized.
:return: A localized datetime object in the UTC timezone.
"""
return pytz.utc.localize(
datetime.datetime.combine(
date, datetime.time(hour=0, minute=0, second=0)
)
)
def localize_time_utc(non_utc_time):
"""
Localizes in the UTC timezone the given time object.
:param non_utc_time: The time object to be localized.
:return: A localized time object in the UTC timezone.
"""
return pytz.utc.localize(non_utc_time)
TIMESTAMP_0 = localize_date_utc(datetime.datetime(year=1970, month=1, day=1))
def get_utc_timestamp(utc_datetime=None):
"""
Returns a timestamp with the number of microseconds ellapsed since January
1st of 1970 for the given datetime object, UTC localized.
:param utc_datetime: The datetime whose timestamp is to be calculated.
:return: The number of miliseconds since 1.1.1970, UTC localized (integer)
"""
if utc_datetime is None:
utc_datetime = get_now_utc()
diff = utc_datetime - TIMESTAMP_0
return int(diff.total_seconds() * 10**6)
|
satnet-project/protocol
|
ampauth/misc.py
|
Python
|
apache-2.0
| 3,639
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import configparser
class Config():
def __init__(self, file):
self.config = configparser.ConfigParser();
self.file = file
self.dict = {}
def get(self):
try:
self.config.read(self.file)
for section in self.config.sections():
for options in self.config.options(section):
self.dict[options] = self.config.get(section, options)
return self.dict
except Exception:
print('File not found or wrong format')
|
Dirilean/Road
|
Server/Config.py
|
Python
|
apache-2.0
| 583
|
# Copyright (C) Mesosphere, Inc. See LICENSE file for details.
import copy
import logging
import os
import time
import urllib
import pytest
import requests
from generic_test_code.common import (
generic_upstream_headers_verify_test,
generic_verify_response_test,
overridden_file_content,
verify_header,
)
from util import LineBufferFilter, SearchCriteria
log = logging.getLogger(__name__)
class TestServiceEndpoint:
# Majority of /service endpoint tests are done with generic tests framework
def test_if_accept_encoding_header_is_in_upstream_request(
self, master_ar_process_perclass, mocker, valid_user_header):
headers = copy.deepcopy(valid_user_header)
headers['Accept-Encoding'] = 'gzip'
generic_upstream_headers_verify_test(master_ar_process_perclass,
headers,
'/service/scheduler-alwaysthere/foo/bar/',
assert_headers={'Accept-Encoding': 'gzip'},
)
def test_escapes_are_in_upstream_request(
self, master_ar_process_perclass, mocker, valid_user_header
):
"""
Any space, question mark, or hash escaped in a path element of the
`/service` endpoint gets passed through to the service unchanged.
"""
path = urllib.parse.quote('/foo/a ?#z/')
url = master_ar_process_perclass.make_url_from_path(
'/service/scheduler-alwaysthere/{}'.format(path)
)
resp = requests.get(url,
allow_redirects=False,
headers=valid_user_header)
assert resp.status_code == 200
req_data = resp.json()
assert req_data['method'] == 'GET'
assert req_data['path'] == path
class TestAgentEndpoint:
# Tests for /agent endpoint routing are done in test_cache.py
def test_if_accept_encoding_header_is_removed_from_upstream_request(
self, master_ar_process_perclass, mocker, valid_user_header):
headers = copy.deepcopy(valid_user_header)
headers['Accept-Encoding'] = 'gzip'
generic_upstream_headers_verify_test(master_ar_process_perclass,
headers,
'/agent/de1baf83-c36c-4d23-9cb0-f89f596cd6ab-S1/',
assert_headers_absent=["Accept-Encoding"],
)
class TestSystemAgentEndpoint:
# Tests for /agent endpoint routing are done in test_cache.py
def test_if_accept_encoding_header_is_removed_from_upstream_request(
self, master_ar_process_perclass, mocker, valid_user_header):
headers = copy.deepcopy(valid_user_header)
headers['Accept-Encoding'] = 'gzip'
generic_upstream_headers_verify_test(
master_ar_process_perclass,
headers,
'/system/v1/agent/de1baf83-c36c-4d23-9cb0-f89f596cd6ab-S0/logs',
assert_headers_absent=["Accept-Encoding"],
)
class TestMetadata:
@pytest.mark.parametrize("public_ip", ['1.2.3.4', "10.20.20.30"])
def test_if_public_ip_detection_works(
self, master_ar_process_perclass, valid_user_header, public_ip):
url = master_ar_process_perclass.make_url_from_path('/metadata')
with overridden_file_content(
'/usr/local/detect_ip_public_data.txt',
"return ip {}".format(public_ip)):
resp = requests.get(
url,
allow_redirects=False,
headers=valid_user_header)
assert resp.status_code == 200
resp_data = resp.json()
assert resp_data['PUBLIC_IPV4'] == public_ip
def test_if_clusterid_is_returned(
self, master_ar_process_perclass, valid_user_header):
url = master_ar_process_perclass.make_url_from_path('/metadata')
resp = requests.get(
url,
allow_redirects=False,
headers=valid_user_header)
assert resp.status_code == 200
resp_data = resp.json()
assert resp_data['CLUSTER_ID'] == 'fdb1d7c0-06cf-4d65-bb9b-a8920bb854ef'
with overridden_file_content(
'/var/lib/dcos/cluster-id',
"fd21689b-4fe2-4779-8c30-9125149eef11"):
resp = requests.get(
url,
allow_redirects=False,
headers=valid_user_header)
assert resp.status_code == 200
resp_data = resp.json()
assert resp_data['CLUSTER_ID'] == "fd21689b-4fe2-4779-8c30-9125149eef11"
def test_if_missing_clusterid_file_is_handled(
self, master_ar_process_perclass, valid_user_header):
url = master_ar_process_perclass.make_url_from_path('/metadata')
with overridden_file_content('/var/lib/dcos/cluster-id'):
os.unlink('/var/lib/dcos/cluster-id')
resp = requests.get(
url,
allow_redirects=False,
headers=valid_user_header)
assert resp.status_code == 200
resp_data = resp.json()
assert 'CLUSTER_ID' not in resp_data
def test_if_public_ip_detect_script_failue_is_handled(
self, master_ar_process_perclass, valid_user_header):
url = master_ar_process_perclass.make_url_from_path('/metadata')
filter_regexp = {
r'Traceback \(most recent call last\):': SearchCriteria(1, True),
(r"FileNotFoundError: \[Errno 2\] No such file or directory:"
" '/usr/local/detect_ip_public_data.txt'"): SearchCriteria(1, True),
}
lbf = LineBufferFilter(filter_regexp,
line_buffer=master_ar_process_perclass.stderr_line_buffer)
with lbf, overridden_file_content('/usr/local/detect_ip_public_data.txt'):
os.unlink('/usr/local/detect_ip_public_data.txt')
resp = requests.get(
url,
allow_redirects=False,
headers=valid_user_header)
assert resp.status_code == 200
assert lbf.extra_matches == {}
resp_data = resp.json()
assert resp_data['PUBLIC_IPV4'] == "127.0.0.1"
@pytest.mark.xfail(reason="Needs some refactoring, tracked in DCOS_OSS-1007")
def test_if_public_ip_detect_script_execution_is_timed_out(
self, master_ar_process_perclass, valid_user_header):
url = master_ar_process_perclass.make_url_from_path('/metadata')
ts_start = time.time()
with overridden_file_content('/usr/local/detect_ip_public_data.txt',
"timeout 10"):
requests.get(
url,
allow_redirects=False,
headers=valid_user_header)
ts_total = time.time() - ts_start
assert ts_total < 10
# TODO (prozlach): tune it a bit
# assert resp.status_code == 200
# resp_data = resp.json()
# assert resp_data['PUBLIC_IPV4'] == "127.0.0.1"
@pytest.mark.xfail(reason="Needs some refactoring, tracked in DCOS_OSS-1007")
def test_if_public_ip_detect_script_nonzero_exit_status_is_handled(
self, master_ar_process_perclass, valid_user_header):
url = master_ar_process_perclass.make_url_from_path('/metadata')
with overridden_file_content(
'/usr/local/detect_ip_public_data.txt',
"break with 1"):
resp = requests.get(
url,
allow_redirects=False,
headers=valid_user_header)
assert resp.status_code == 200
resp_data = resp.json()
assert resp_data['PUBLIC_IPV4'] == "127.0.0.1"
class TestUiRoot:
@pytest.mark.parametrize("uniq_content", ["(。◕‿‿◕。)", "plain text 1234"])
@pytest.mark.parametrize("path", ["plain-ui-testfile.html",
"nest1/nested-ui-testfile.html"])
def test_if_ui_files_are_handled(
self,
master_ar_process_perclass,
valid_user_header,
uniq_content,
path):
url = master_ar_process_perclass.make_url_from_path('/{}'.format(path))
with overridden_file_content(
'/var/lib/dcos/dcos-ui-update-service/dist/ui/{}'.format(path),
uniq_content):
resp = requests.get(
url,
allow_redirects=False,
headers=valid_user_header)
assert resp.status_code == 200
resp.encoding = 'utf-8'
assert resp.text == uniq_content
verify_header(resp.headers.items(), 'X-Frame-Options', 'DENY')
class TestMisc:
@pytest.mark.parametrize("content", ["{'data': '1234'}", "{'data': 'abcd'}"])
def test_if_buildinfo_is_served(
self, master_ar_process_perclass, valid_user_header, content):
url = master_ar_process_perclass.make_url_from_path(
'/pkgpanda/active.buildinfo.full.json')
with overridden_file_content(
'/opt/mesosphere/active.buildinfo.full.json',
content):
resp = requests.get(
url,
allow_redirects=False,
headers=valid_user_header
)
assert resp.status_code == 200
assert resp.text == content
@pytest.mark.parametrize("content", ["{'data': '1234'}", "{'data': 'abcd'}"])
def test_if_dcos_metadata_is_served(
self, master_ar_process_perclass, valid_user_header, content):
url = master_ar_process_perclass.make_url_from_path(
'/dcos-metadata/dcos-version.json')
with overridden_file_content(
'/opt/mesosphere/active/dcos-metadata/etc/dcos-version.json',
content):
resp = requests.get(
url,
allow_redirects=False,
headers=valid_user_header
)
assert resp.status_code == 200
assert resp.text == content
def test_if_xaccel_header_is_passed_to_client_by_ar(
self,
master_ar_process_perclass,
valid_user_header,
mocker):
accel_buff_header = {"X-Accel-Buffering": "TEST"}
mocker.send_command(
endpoint_id='http:///run/dcos/dcos-log.sock',
func_name='set_response_headers',
aux_data=accel_buff_header,
)
generic_verify_response_test(
master_ar_process_perclass,
valid_user_header,
'/system/v1/logs/foo/bar',
assert_headers=accel_buff_header)
|
dcos/dcos
|
packages/adminrouter/extra/src/test-harness/tests/test_master.py
|
Python
|
apache-2.0
| 10,790
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.api.definitions import availability_zone as az_def
from neutron_lib import context
from neutron_lib.exceptions import availability_zone as az_exc
from neutron.db import agents_db
from neutron.db import db_base_plugin_v2
from neutron.extensions import agent
from neutron.extensions import availability_zone as az_ext
from neutron.tests.common import helpers
from neutron.tests.unit.db import test_db_base_plugin_v2
class AZExtensionManager(object):
def get_resources(self):
agent.Agent().update_attributes_map(az_def.RESOURCE_ATTRIBUTE_MAP)
return (az_ext.Availability_zone.get_resources() +
agent.Agent.get_resources())
def get_actions(self):
return []
def get_request_extensions(self):
return []
class AZTestPlugin(db_base_plugin_v2.NeutronDbPluginV2,
agents_db.AgentDbMixin):
supported_extension_aliases = ["agent", "availability_zone"]
class AZTestCommon(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
def _register_azs(self):
self.agent1 = helpers.register_dhcp_agent(host='host1', az='nova1')
self.agent2 = helpers.register_dhcp_agent(host='host2', az='nova2')
self.agent3 = helpers.register_l3_agent(host='host2', az='nova2')
self.agent4 = helpers.register_l3_agent(host='host3', az='nova3')
self.agent5 = helpers.register_l3_agent(host='host4', az='nova2')
class TestAZAgentCase(AZTestCommon):
def setUp(self):
plugin = ('neutron.tests.unit.extensions.'
'test_availability_zone.AZTestPlugin')
ext_mgr = AZExtensionManager()
super(TestAZAgentCase, self).setUp(plugin=plugin, ext_mgr=ext_mgr)
def test_list_availability_zones(self):
self._register_azs()
helpers.set_agent_admin_state(self.agent3['id'], admin_state_up=False)
helpers.set_agent_admin_state(self.agent4['id'], admin_state_up=False)
expected = [
{'name': 'nova1', 'resource': 'network', 'state': 'available'},
{'name': 'nova2', 'resource': 'network', 'state': 'available'},
{'name': 'nova2', 'resource': 'router', 'state': 'available'},
{'name': 'nova3', 'resource': 'router', 'state': 'unavailable'}]
res = self._list('availability_zones')
azs = res['availability_zones']
self.assertItemsEqual(expected, azs)
# not admin case
ctx = context.Context('', 'noadmin')
res = self._list('availability_zones', neutron_context=ctx)
azs = res['availability_zones']
self.assertItemsEqual(expected, azs)
def test_list_agent_with_az(self):
helpers.register_dhcp_agent(host='host1', az='nova1')
res = self._list('agents')
self.assertEqual('nova1',
res['agents'][0]['availability_zone'])
def test_validate_availability_zones(self):
self._register_azs()
ctx = context.Context('', 'tenant_id')
self.plugin.validate_availability_zones(ctx, 'network',
['nova1', 'nova2'])
self.plugin.validate_availability_zones(ctx, 'router',
['nova2', 'nova3'])
self.assertRaises(az_exc.AvailabilityZoneNotFound,
self.plugin.validate_availability_zones,
ctx, 'router', ['nova1'])
class TestAZNetworkCase(AZTestCommon):
def setUp(self):
ext_mgr = AZExtensionManager()
super(TestAZNetworkCase, self).setUp(plugin='ml2', ext_mgr=ext_mgr)
def test_availability_zones_in_create_response(self):
with self.network() as net:
self.assertIn('availability_zone_hints', net['network'])
self.assertIn('availability_zones', net['network'])
def test_create_network_with_az(self):
self._register_azs()
az_hints = ['nova1']
with self.network(availability_zone_hints=az_hints) as net:
res = self._show('networks', net['network']['id'])
self.assertItemsEqual(az_hints,
res['network']['availability_zone_hints'])
def test_create_network_with_azs(self):
self._register_azs()
az_hints = ['nova1', 'nova2']
with self.network(availability_zone_hints=az_hints) as net:
res = self._show('networks', net['network']['id'])
self.assertItemsEqual(az_hints,
res['network']['availability_zone_hints'])
def test_create_network_without_az(self):
with self.network() as net:
res = self._show('networks', net['network']['id'])
self.assertEqual([], res['network']['availability_zone_hints'])
def test_create_network_with_empty_az(self):
with self.network(availability_zone_hints=[]) as net:
res = self._show('networks', net['network']['id'])
self.assertEqual([], res['network']['availability_zone_hints'])
def test_create_network_with_not_exist_az(self):
res = self._create_network(self.fmt, 'net', True,
availability_zone_hints=['nova3'])
self.assertEqual(404, res.status_int)
|
eayunstack/neutron
|
neutron/tests/unit/extensions/test_availability_zone.py
|
Python
|
apache-2.0
| 5,803
|
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_context import context as o_context
from oslo_context import fixture as o_fixture
from nova import context
from nova import objects
from nova import test
class ContextTestCase(test.NoDBTestCase):
def setUp(self):
super(ContextTestCase, self).setUp()
self.useFixture(o_fixture.ClearRequestContext())
def test_request_context_elevated(self):
user_ctxt = context.RequestContext('111',
'222',
admin=False)
self.assertFalse(user_ctxt.is_admin)
admin_ctxt = user_ctxt.elevated()
self.assertTrue(admin_ctxt.is_admin)
self.assertIn('admin', admin_ctxt.roles)
self.assertFalse(user_ctxt.is_admin)
self.assertNotIn('admin', user_ctxt.roles)
def test_request_context_sets_is_admin(self):
ctxt = context.RequestContext('111',
'222',
roles=['admin', 'weasel'])
self.assertTrue(ctxt.is_admin)
def test_request_context_sets_is_admin_by_role(self):
ctxt = context.RequestContext('111',
'222',
roles=['administrator'])
self.assertTrue(ctxt.is_admin)
def test_request_context_sets_is_admin_upcase(self):
ctxt = context.RequestContext('111',
'222',
roles=['Admin', 'weasel'])
self.assertTrue(ctxt.is_admin)
def test_request_context_read_deleted(self):
ctxt = context.RequestContext('111',
'222',
read_deleted='yes')
self.assertEqual('yes', ctxt.read_deleted)
ctxt.read_deleted = 'no'
self.assertEqual('no', ctxt.read_deleted)
def test_request_context_read_deleted_invalid(self):
self.assertRaises(ValueError,
context.RequestContext,
'111',
'222',
read_deleted=True)
ctxt = context.RequestContext('111', '222')
self.assertRaises(ValueError,
setattr,
ctxt,
'read_deleted',
True)
def test_extra_args_to_context_get_logged(self):
info = {}
def fake_warn(log_msg, *args):
if args:
log_msg = log_msg % args
info['log_msg'] = log_msg
self.stub_out('nova.context.LOG.warning', fake_warn)
c = context.RequestContext('user', 'project',
extra_arg1='meow', extra_arg2='wuff')
self.assertTrue(c)
self.assertIn("'extra_arg1': 'meow'", info['log_msg'])
self.assertIn("'extra_arg2': 'wuff'", info['log_msg'])
def test_service_catalog_default(self):
ctxt = context.RequestContext('111', '222')
self.assertEqual([], ctxt.service_catalog)
ctxt = context.RequestContext('111', '222',
service_catalog=[])
self.assertEqual([], ctxt.service_catalog)
ctxt = context.RequestContext('111', '222',
service_catalog=None)
self.assertEqual([], ctxt.service_catalog)
def test_service_catalog_cinder_only(self):
service_catalog = [
{u'type': u'compute', u'name': u'nova'},
{u'type': u's3', u'name': u's3'},
{u'type': u'image', u'name': u'glance'},
{u'type': u'volume', u'name': u'cinder'},
{u'type': u'ec2', u'name': u'ec2'},
{u'type': u'object-store', u'name': u'swift'},
{u'type': u'identity', u'name': u'keystone'},
{u'type': None, u'name': u'S_withouttype'},
{u'type': u'vo', u'name': u'S_partofvolume'}]
volume_catalog = [{u'type': u'volume', u'name': u'cinder'}]
ctxt = context.RequestContext('111', '222',
service_catalog=service_catalog)
self.assertEqual(volume_catalog, ctxt.service_catalog)
def test_to_dict_from_dict_no_log(self):
warns = []
def stub_warn(msg, *a, **kw):
if (a and len(a) == 1 and isinstance(a[0], dict) and a[0]):
a = a[0]
warns.append(str(msg) % a)
self.stub_out('nova.context.LOG.warning', stub_warn)
ctxt = context.RequestContext('111',
'222',
roles=['admin', 'weasel'])
context.RequestContext.from_dict(ctxt.to_dict())
self.assertEqual(0, len(warns), warns)
def test_store_when_no_overwrite(self):
# If no context exists we store one even if overwrite is false
# (since we are not overwriting anything).
ctx = context.RequestContext('111',
'222',
overwrite=False)
self.assertIs(o_context.get_current(), ctx)
def test_no_overwrite(self):
# If there is already a context in the cache a new one will
# not overwrite it if overwrite=False.
ctx1 = context.RequestContext('111',
'222',
overwrite=True)
context.RequestContext('333',
'444',
overwrite=False)
self.assertIs(o_context.get_current(), ctx1)
def test_admin_no_overwrite(self):
# If there is already a context in the cache creating an admin
# context will not overwrite it.
ctx1 = context.RequestContext('111',
'222',
overwrite=True)
context.get_admin_context()
self.assertIs(o_context.get_current(), ctx1)
def test_convert_from_rc_to_dict(self):
ctx = context.RequestContext(
111, 222, request_id='req-679033b7-1755-4929-bf85-eb3bfaef7e0b',
timestamp='2015-03-02T22:31:56.641629')
values2 = ctx.to_dict()
expected_values = {'auth_token': None,
'domain': None,
'instance_lock_checked': False,
'is_admin': False,
'project_id': 222,
'project_domain': None,
'project_name': None,
'quota_class': None,
'read_deleted': 'no',
'read_only': False,
'remote_address': None,
'request_id':
'req-679033b7-1755-4929-bf85-eb3bfaef7e0b',
'resource_uuid': None,
'roles': [],
'service_catalog': [],
'show_deleted': False,
'tenant': 222,
'timestamp': '2015-03-02T22:31:56.641629',
'user': 111,
'user_domain': None,
'user_id': 111,
'user_identity': '111 222 - - -',
'user_name': None}
self.assertEqual(expected_values, values2)
def test_convert_from_dict_then_to_dict(self):
values = {'user': '111',
'user_id': '111',
'tenant': '222',
'project_id': '222',
'domain': None, 'project_domain': None,
'auth_token': None,
'resource_uuid': None, 'read_only': False,
'user_identity': '111 222 - - -',
'instance_lock_checked': False,
'user_name': None, 'project_name': None,
'timestamp': '2015-03-02T20:03:59.416299',
'remote_address': None, 'quota_class': None,
'is_admin': True,
'service_catalog': [],
'read_deleted': 'no', 'show_deleted': False,
'roles': [],
'request_id': 'req-956637ad-354a-4bc5-b969-66fd1cc00f50',
'user_domain': None}
ctx = context.RequestContext.from_dict(values)
self.assertEqual('111', ctx.user)
self.assertEqual('222', ctx.tenant)
self.assertEqual('111', ctx.user_id)
self.assertEqual('222', ctx.project_id)
values2 = ctx.to_dict()
self.assertEqual(values, values2)
@mock.patch('nova.db.create_context_manager')
@mock.patch('nova.rpc.create_transport')
def test_target_cell(self, mock_create_transport, mock_create_ctxt_mgr):
mock_create_ctxt_mgr.return_value = mock.sentinel.cm
mock_create_transport.return_value = mock.sentinel.tp
ctxt = context.RequestContext('111',
'222',
roles=['admin', 'weasel'])
# Verify the existing db_connection, if any, is restored
ctxt.db_connection = mock.sentinel.db_conn
ctxt.mq_connection = mock.sentinel.mq_conn
mapping = objects.CellMapping(database_connection='fake://',
transport_url='anotherfake://')
with context.target_cell(ctxt, mapping):
self.assertEqual(ctxt.db_connection, mock.sentinel.cm)
self.assertEqual(ctxt.mq_connection, mock.sentinel.tp)
self.assertEqual(mock.sentinel.db_conn, ctxt.db_connection)
self.assertEqual(mock.sentinel.mq_conn, ctxt.mq_connection)
mock_create_transport.assert_called_once_with(mapping.transport_url)
@mock.patch('nova.db.create_context_manager')
@mock.patch('nova.rpc.create_transport')
def test_target_cell_transport_url_sentinel(self, mock_create_transport,
mock_create_ctxt_mgr):
mock_create_ctxt_mgr.return_value = mock.sentinel.cm
mock_create_transport.return_value = mock.sentinel.tp
ctxt = context.RequestContext('111',
'222',
roles=['admin', 'weasel'])
mapping = objects.CellMapping(database_connection='fake://',
transport_url='none://')
with context.target_cell(ctxt, mapping):
self.assertEqual(ctxt.db_connection, mock.sentinel.cm)
self.assertIsNone(ctxt.mq_connection)
self.assertFalse(mock_create_transport.called)
|
bigswitch/nova
|
nova/tests/unit/test_context.py
|
Python
|
apache-2.0
| 11,335
|
# Copyright 2019,2020,2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import nnabla.functions as F
from .utils import no_grad
def prelu_backward(inputs, base_axis=1):
"""
Args:
inputs (list of nn.Variable): Incomming grads/inputs to/of the forward function.
kwargs (dict of arguments): Dictionary of the corresponding function arguments.
Return:
list of Variable: Return the gradients wrt inputs of the corresponding function.
"""
dy = inputs[0]
x0 = inputs[1]
w0 = inputs[2]
base_axis += x0.ndim*(base_axis < 0)
m0 = F.greater_scalar(x0, 0)
m1 = 1 - m0
m0 = no_grad(m0)
m1 = no_grad(m1)
if w0.shape == (): # shared
reshape = [1 for i in range(len(x0.shape))]
w0 = F.reshape(w0, reshape, inplace=False)
dw0 = F.sum(dy * x0 * m1)
else:
reshape = [w0.shape[0] if i ==
base_axis else 1 for i in range(len(x0.shape))]
w0 = F.reshape(w0, reshape, inplace=False)
raxes = [i for i in range(len(x0.shape)) if i != base_axis]
dw0 = F.sum(dy * x0 * m1, raxes, keepdims=False)
dx0 = dy * (m0 + w0 * m1)
return dx0, dw0
|
sony/nnabla
|
python/src/nnabla/backward_function/prelu.py
|
Python
|
apache-2.0
| 1,735
|
#!/usr/bin/env python
# Copyright 2014-2021 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
# Carlos Jimenez-Hoyos
#
import numpy
from pyscf.cc import ccsd_rdm
def _gamma1_intermediates(cc, t1, t2, l1, l2):
nocc, nvir = t1.shape
t1a = t1
t2ab = numpy.copy(t2)
t2aa = numpy.copy(t2) \
- t2.transpose(0,1,3,2)
l1a = l1
l2ab = 2*numpy.copy(l2)
l2aa = numpy.copy(l2) \
- l2.transpose(0,1,3,2)
doo = numpy.zeros((nocc,nocc))
doo += -2*numpy.einsum('ie,je->ij', t1a, l1a)
doo += -numpy.einsum('imef,jmef->ij', t2ab, l2ab) \
-numpy.einsum('imef,jmef->ij', t2aa, l2aa)
dvv = numpy.zeros((nvir,nvir))
dvv += 2*numpy.einsum('ma,mb->ab', l1a, t1a)
dvv += numpy.einsum('mnae,mnbe->ab', l2ab, t2ab) \
+ numpy.einsum('mnae,mnbe->ab', l2aa, t2aa)
xt1 = numpy.einsum('mnef,inef->mi', l2aa, t2aa)
xt1 += numpy.einsum('mnef,inef->mi', l2ab, t2ab)
xt2 = numpy.einsum('mnaf,mnef->ae', t2aa, l2aa)
xt2 += numpy.einsum('mnaf,mnef->ae', t2ab, l2ab)
xtv = numpy.einsum('ma,me->ae', t1a, l1a)
dov = numpy.zeros((nocc,nvir))
dov += 2*t1a
dov += 2*numpy.einsum('imae,me->ia', t2aa, l1a) \
+ 2*numpy.einsum('imae,me->ia', t2ab, l1a) \
+ -2*numpy.einsum('ie,ae->ia', t1a, xtv)
dov += -numpy.einsum('mi,ma->ia', xt1, t1a) \
+ -numpy.einsum('ie,ae->ia', t1a, xt2)
dvo = numpy.zeros((nvir,nocc))
dvo += 2*l1a.transpose(1,0)
return doo*.5, dov*.5, dvo*.5, dvv*.5
# gamma2 intermediates in Chemist's notation
def _gamma2_intermediates(cc, t1, t2, l1, l2):
tau = t2 + numpy.einsum('ia,jb->ijab', t1, t1)
tau2 = t2 + numpy.einsum('ia,jb->ijab', t1, t1*2)
theta = t2*2 - t2.transpose(0,1,3,2)
mOvOv = numpy.einsum('ikca,jkcb->jbia', l2, t2)
mOVov = numpy.einsum('ikac,jkbc->jbia', l2, theta)
mOVov -= numpy.einsum('ikca,jkbc->jbia', l2, t2)
moo =(numpy.einsum('jdld->jl', mOvOv) * 2 +
numpy.einsum('jdld->jl', mOVov))
mvv =(numpy.einsum('lbld->bd', mOvOv) * 2 +
numpy.einsum('lbld->bd', mOVov))
gvvvv = numpy.einsum('ijab,ijcd->abcd', l2*.5, tau)
goooo = numpy.einsum('ijab,klab->klij', l2, tau)*.5
goovv = .5 * l2 + .5 * tau
tmp = numpy.einsum('kc,ikac->ia', l1, theta)
goovv += numpy.einsum('ia,jb->ijab', tmp, t1)
tmp = numpy.einsum('kc,kb->cb', l1, t1)
goovv -= numpy.einsum('cb,ijac->ijab', tmp, t2)
tmp = numpy.einsum('kc,jc->kj', l1, t1)
goovv -= numpy.einsum('kj,ikab->ijab', tmp, tau)
goovv -= numpy.einsum('jl,ilab->ijab', moo*.5, tau)
goovv -= numpy.einsum('bd,ijad->ijab', mvv*.5, tau)
goovv += numpy.einsum('ibld,ljad->ijab', mOvOv, tau2) * .5
goovv -= numpy.einsum('iald,ljbd->ijab', mOVov, tau2) * .5
goovv += numpy.einsum('iald,ljdb->ijab', mOVov*2+mOvOv, t2) * .5
goovv += numpy.einsum('ijkl,klab->ijab', goooo, tau)
gooov = numpy.einsum('ib,kjab->jkia', -l1, tau)
gooov += numpy.einsum('jkil,la->jkia', goooo, t1*2)
gooov += numpy.einsum('ji,ka->jkia', moo*-.5, t1)
gooov += numpy.einsum('jaic,kc->jkia', mOvOv, t1)
gooov -= numpy.einsum('kaic,jc->jkia', mOVov, t1)
gooov -= numpy.einsum('jkba,ib->jkia', l2, t1)
govvv = numpy.einsum('ja,jibc->iacb', l1, tau)
govvv -= numpy.einsum('adbc,id->iacb', gvvvv, t1*2)
govvv += numpy.einsum('ba,ic->iacb', mvv, t1*.5)
govvv -= numpy.einsum('ibka,kc->iacb', mOvOv, t1)
govvv += numpy.einsum('icka,kb->iacb', mOVov, t1)
govvv += numpy.einsum('jibc,ja->iacb', l2, t1)
gOvVo = numpy.einsum('ia,jb->jabi', l1, t1) + mOVov.transpose(0,3,1,2)
tmp = numpy.einsum('ikac,jc->jaik', l2, t1)
gOvVo -= numpy.einsum('jaik,kb->jabi', tmp, t1)
gOvvO = mOvOv.transpose(0,3,1,2) + numpy.einsum('jaki,kb->jabi', tmp, t1)
doovv = goovv*2 - goovv.transpose(0,1,3,2)
dvvvv = gvvvv*2 - gvvvv.transpose(0,1,3,2)
doooo = goooo*2 - goooo.transpose(0,1,3,2)
dovov = -2*gOvvO.transpose(0,1,3,2) - gOvVo.transpose(0,1,3,2)
dovvo = gOvVo*2 + gOvvO
dovvv = govvv*2 - govvv.transpose(0,1,3,2)
dooov = gooov*2 - gooov.transpose(1,0,2,3)
doovv, dovov = dovov.transpose(0,2,1,3), doovv.transpose(0,2,1,3)
dvvvv = dvvvv.transpose(0,2,1,3)
doooo = doooo.transpose(0,2,1,3)
dovvo = dovvo.transpose(0,2,1,3)
dovvv = dovvv.transpose(0,2,1,3)
dooov = dooov.transpose(0,2,1,3)
dvvov = None
return (dovov, dvvvv, doooo, doovv, dovvo, dvvov, dovvv, dooov)
def make_rdm1(cc, t1, t2, l1, l2):
d1 = _gamma1_intermediates(cc, t1, t2, l1, l2)
return ccsd_rdm._make_rdm1(cc, d1, with_frozen=True)
def make_rdm2(cc, t1, t2, l1, l2, d1=None, d2=None):
d1 = _gamma1_intermediates(cc, t1, t2, l1, l2)
d2 = _gamma2_intermediates(cc, t1, t2, l1, l2)
return ccsd_rdm._make_rdm2(cc, d1, d2, with_dm1=True, with_frozen=True)
if __name__ == '__main__':
from pyscf import gto
from pyscf import scf
from pyscf.cc import ccsd
from pyscf import ao2mo
mol = gto.M()
mf = scf.RHF(mol)
mcc = ccsd.CCSD(mf)
numpy.random.seed(2)
nocc = 5
nmo = 12
nvir = nmo - nocc
eri0 = numpy.random.random((nmo,nmo,nmo,nmo))
eri0 = ao2mo.restore(1, ao2mo.restore(8, eri0, nmo), nmo)
fock0 = numpy.random.random((nmo,nmo))
fock0 = fock0 + fock0.T + numpy.diag(range(nmo))*2
t1 = numpy.random.random((nocc,nvir))
t2 = numpy.random.random((nocc,nocc,nvir,nvir))
t2 = t2 + t2.transpose(1,0,3,2)
l1 = numpy.random.random((nocc,nvir))
l2 = numpy.random.random((nocc,nocc,nvir,nvir))
l2 = l2 + l2.transpose(1,0,3,2)
h1 = fock0 - (numpy.einsum('kkpq->pq', eri0[:nocc,:nocc])*2 -
numpy.einsum('pkkq->pq', eri0[:,:nocc,:nocc]))
eris = lambda:None
eris.oooo = eri0[:nocc,:nocc,:nocc,:nocc].copy()
eris.ooov = eri0[:nocc,:nocc,:nocc,nocc:].copy()
eris.ovoo = eri0[:nocc,nocc:,:nocc,:nocc].copy()
eris.oovv = eri0[:nocc,:nocc,nocc:,nocc:].copy()
eris.ovov = eri0[:nocc,nocc:,:nocc,nocc:].copy()
eris.ovvo = eri0[:nocc,nocc:,nocc:,:nocc].copy()
eris.ovvv = eri0[:nocc,nocc:,nocc:,nocc:].copy()
eris.vvvv = eri0[nocc:,nocc:,nocc:,nocc:].copy()
eris.fock = fock0
doo, dov, dvo, dvv = _gamma1_intermediates(mcc, t1, t2, l1, l2)
print((numpy.einsum('ij,ij', doo, fock0[:nocc,:nocc]))*2+20166.329861034799)
print((numpy.einsum('ab,ab', dvv, fock0[nocc:,nocc:]))*2-58078.964019246778)
print((numpy.einsum('ia,ia', dov, fock0[:nocc,nocc:]))*2+74994.356886784764)
print((numpy.einsum('ai,ai', dvo, fock0[nocc:,:nocc]))*2-34.010188025702391)
dovov, dvvvv, doooo, doovv, dovvo, dvvov, dovvv, dooov = \
_gamma2_intermediates(mcc, t1, t2, l1, l2)
print('doooo',numpy.einsum('ijkl,ijkl', doooo, eris.oooo)*2-15939.9007625418)
print('dvvvv',numpy.einsum('acbd,acbd', dvvvv, eris.vvvv)*2-37581.823919588 )
print('dooov',numpy.einsum('jkia,jkia', dooov, eris.ooov)*2-128470.009687716)
print('dovvv',numpy.einsum('icab,icab', dovvv, eris.ovvv)*2+166794.225195056)
print('doovv',numpy.einsum('iajb,iajb', dovov, eris.ovov)*2+719279.812916893)
print('dovvo',numpy.einsum('jbai,jbia', dovvo, eris.ovov)*2
+numpy.einsum('ijab,ijab', doovv, eris.oovv)*2+53634.0012286654)
dm1 = make_rdm1(mcc, t1, t2, l1, l2)
dm2 = make_rdm2(mcc, t1, t2, l1, l2)
e2 = (numpy.einsum('ijkl,ijkl', doooo, eris.oooo)*2 +
numpy.einsum('acbd,acbd', dvvvv, eris.vvvv)*2 +
numpy.einsum('jkia,jkia', dooov, eris.ooov)*2 +
numpy.einsum('icab,icab', dovvv, eris.ovvv)*2 +
numpy.einsum('iajb,iajb', dovov, eris.ovov)*2 +
numpy.einsum('jbai,jbia', dovvo, eris.ovov)*2 +
numpy.einsum('ijab,ijab', doovv, eris.oovv)*2 +
numpy.einsum('ij,ij', doo, fock0[:nocc,:nocc])*2 +
numpy.einsum('ia,ia', dov, fock0[:nocc,nocc:])*2 +
numpy.einsum('ai,ai', dvo, fock0[nocc:,:nocc])*2 +
numpy.einsum('ab,ab', dvv, fock0[nocc:,nocc:])*2 +
fock0[:nocc].trace()*2 -
numpy.einsum('kkpq->pq', eri0[:nocc,:nocc,:nocc,:nocc]).trace()*2 +
numpy.einsum('pkkq->pq', eri0[:nocc,:nocc,:nocc,:nocc]).trace())
print(e2+794721.197459942)
print(numpy.einsum('pqrs,pqrs', dm2, eri0)*.5 +
numpy.einsum('pq,qp', dm1, h1) - e2)
print(numpy.allclose(dm2, dm2.transpose(1,0,3,2)))
print(numpy.allclose(dm2, dm2.transpose(2,3,0,1)))
d1 = numpy.einsum('kkpq->qp', dm2) / 9
print(numpy.allclose(d1, dm1))
|
sunqm/pyscf
|
pyscf/cc/ccsd_rdm_slow.py
|
Python
|
apache-2.0
| 9,115
|
#pylint: disable=W0703,R0912,R0915,R0904,W0105
""" Thread to perform creation of a service """
import os
import traceback
from agent.lib.utils import islink
from agent.lib.utils import readlink
from agent.lib.errors import Errors
from agent.lib.errors import AgentException
from agent.lib.agent_thread.manifest_control import ManifestControl
from agent.lib import manifestutil
class UserActionService(ManifestControl):
""" This thread will attempt to restart a service
This means going through each package in ACTIVE manifest
call the shutdown
call start
"""
THREAD_NAME = 'service_lifecycle'
def __init__(self, threadMgr, service, action, parentId = None):
""" Constructor """
ManifestControl.__init__(self, threadMgr, service, manifest = None, parentId = parentId)
self.setName(UserActionService.THREAD_NAME)
self.__action = action
def doRun(self):
""" Main body of the thread """
errorMsg = ""
errorCode = None
failed = False
activeManifest = None
try:
activePath = manifestutil.manifestPath(self._service, 'active')
# make sure that the active path exists and it is a link
# Should we check this again since we already have a check in action controller
if not os.path.exists(activePath) or not islink(activePath):
raise AgentException(error = Errors.ACTIVEMANIFEST_MANIFEST_MISSING, errorMsg = 'No active manifest - cannot restart service')
activeManifest = os.path.basename(readlink(activePath))
self.__lcmActionManifest(self._service, activeManifest, self.__action)
self._LOG.info('Done: %s service for (%s/%s)' % (self.__action, self._service, activeManifest))
self._updateStatus(progress = 100)
except AgentException as exc:
failed = True
errorMsg = '%s Service - Agent Exception - %s' % (self.__action, exc.getMsg())
errorCode = exc.getCode()
except Exception as exc:
failed = True
errorMsg = '%s Service - Unknown error - (%s/%s) - %s - %s' \
% (self.__action, self._service, self._manifest, str(exc), traceback.format_exc(5))
errorCode = Errors.UNKNOWN_ERROR
finally:
if failed:
self._LOG.error(errorMsg)
self._updateStatus(httpStatus = 500, error = errorCode, errorMsg = errorMsg)
def __lcmActionManifest(self, service, manifest, action):
""" shutdown a manifest. This means calling shutdown script on manifest packages
@param service - service of manifest to deactivate
"""
self._LOG.info("%s active Manifest %s" % (action, service))
self._execPackages(action, service, manifest, 50, 90)
|
cronuspaas/cronusagent
|
agent/agent/lib/agent_thread/useraction_service.py
|
Python
|
apache-2.0
| 2,919
|
# Copyright 2017--2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import json
import sys
from abc import ABC, abstractmethod
from itertools import chain
from typing import Optional
import sockeye.constants as C
from sockeye.utils import smart_open
from . import inference
def get_output_handler(output_type: str,
output_fname: Optional[str] = None) -> 'OutputHandler':
"""
:param output_type: Type of output handler.
:param output_fname: Output filename. If none sys.stdout is used.
:raises: ValueError for unknown output_type.
:return: Output handler.
"""
output_stream = sys.stdout if output_fname is None else smart_open(output_fname, mode='w')
if output_type == C.OUTPUT_HANDLER_TRANSLATION:
return StringOutputHandler(output_stream)
elif output_type == C.OUTPUT_HANDLER_SCORE:
return ScoreOutputHandler(output_stream)
elif output_type == C.OUTPUT_HANDLER_PAIR_WITH_SCORE:
return PairWithScoreOutputHandler(output_stream)
elif output_type == C.OUTPUT_HANDLER_TRANSLATION_WITH_SCORE:
return StringWithScoreOutputHandler(output_stream)
elif output_type == C.OUTPUT_HANDLER_BENCHMARK:
return BenchmarkOutputHandler(output_stream)
elif output_type == C.OUTPUT_HANDLER_JSON:
return JSONOutputHandler(output_stream)
elif output_type == C.OUTPUT_HANDLER_TRANSLATION_WITH_FACTORS:
return FactoredStringOutputHandler(output_stream)
else:
raise ValueError("unknown output type")
class OutputHandler(ABC):
"""
Abstract output handler interface
"""
@abstractmethod
def handle(self,
t_input: inference.TranslatorInput,
t_output: inference.TranslatorOutput,
t_walltime: float = 0.):
"""
:param t_input: Translator input.
:param t_output: Translator output.
:param t_walltime: Total wall-clock time for translation.
"""
pass
@abstractmethod
def reports_score(self) -> bool:
"""
True if output_handler makes use of TranslatorOutput.score
:return:
"""
pass
class StringOutputHandler(OutputHandler):
"""
Output handler to write translation to a stream
:param stream: Stream to write translations to (e.g. sys.stdout).
"""
def __init__(self, stream):
self.stream = stream
def handle(self,
t_input: inference.TranslatorInput,
t_output: inference.TranslatorOutput,
t_walltime: float = 0.):
"""
:param t_input: Translator input.
:param t_output: Translator output.
:param t_walltime: Total walltime for translation.
"""
print("%s" % t_output.translation, file=self.stream, flush=True)
def reports_score(self) -> bool:
return False
class StringWithScoreOutputHandler(OutputHandler):
"""
Output handler to write translation score and translation to a stream. The score and translation
string are tab-delimited.
:param stream: Stream to write translations to (e.g. sys.stdout).
"""
def __init__(self, stream):
self.stream = stream
def handle(self,
t_input: inference.TranslatorInput,
t_output: inference.TranslatorOutput,
t_walltime: float = 0.):
"""
:param t_input: Translator input.
:param t_output: Translator output.
:param t_walltime: Total walltime for translation.
"""
print("{:.6f}\t{}".format(t_output.score, t_output.translation), file=self.stream, flush=True)
def reports_score(self) -> bool:
return True
class ScoreOutputHandler(OutputHandler):
"""
Output handler to write translation score to a stream.
:param stream: Stream to write translations to (e.g., sys.stdout).
"""
def __init__(self, stream):
self.stream = stream
def handle(self,
t_input: inference.TranslatorInput,
t_output: inference.TranslatorOutput,
t_walltime: float = 0.):
"""
:param t_input: Translator input.
:param t_output: Translator output.
:param t_walltime: Total walltime for translation.
"""
result = "{:.6f}".format(t_output.score)
if hasattr(t_output, 'factor_scores') and t_output.factor_scores:
factor_scores = "\t".join("{:.6f}".format(fs) for fs in t_output.factor_scores)
result = f"{result}\t{factor_scores}"
print(result, file=self.stream, flush=True)
def reports_score(self) -> bool:
return True
class PairWithScoreOutputHandler(OutputHandler):
"""
Output handler to write translation score along with sentence input and output (tab-delimited).
:param stream: Stream to write translations to (e.g., sys.stdout).
"""
def __init__(self, stream):
self.stream = stream
def handle(self,
t_input: inference.TranslatorInput,
t_output: inference.TranslatorOutput,
t_walltime: float = 0.):
"""
:param t_input: Translator input.
:param t_output: Translator output.
:param t_walltime: Total walltime for translation.
"""
print("{:.6f}\t{}\t{}".format(t_output.score,
C.TOKEN_SEPARATOR.join(t_input.tokens),
t_output.translation), file=self.stream, flush=True)
def reports_score(self) -> bool:
return True
class BenchmarkOutputHandler(StringOutputHandler):
"""
Output handler to write detailed benchmark information to a stream.
"""
def handle(self,
t_input: inference.TranslatorInput,
t_output: inference.TranslatorOutput,
t_walltime: float = 0.):
"""
:param t_input: Translator input.
:param t_output: Translator output.
:param t_walltime: Total walltime for translation.
"""
print("input=%s\toutput=%s\tinput_tokens=%d\toutput_tokens=%d\ttranslation_time=%0.4f" %
(C.TOKEN_SEPARATOR.join(t_input.tokens),
t_output.translation,
len(t_input.tokens),
len(t_output.tokens),
t_walltime),
file=self.stream, flush=True)
def reports_score(self) -> bool:
return False
class JSONOutputHandler(OutputHandler):
"""
Output single-line JSON objects.
Carries over extra fields from the input.
"""
def __init__(self, stream) -> None:
self.stream = stream
def handle(self,
t_input: inference.TranslatorInput,
t_output: inference.TranslatorOutput,
t_walltime: float = 0.):
"""
Outputs a JSON object of the fields in the `TranslatorOutput` object.
"""
d_ = t_output.json()
print(json.dumps(d_, sort_keys=True), file=self.stream, flush=True)
def reports_score(self) -> bool:
return True
class FactoredStringOutputHandler(OutputHandler):
"""
Returns a factored string if the model produces target factors. If there are no target factors the output
is equivalent to StringOutputHandler
"""
def __init__(self, stream):
self.stream = stream
def handle(self,
t_input: inference.TranslatorInput,
t_output: inference.TranslatorOutput,
t_walltime: float = 0.):
"""
:param t_input: Translator input.
:param t_output: Translator output.
:param t_walltime: Total walltime for translation.
"""
factored_string = C.TOKEN_SEPARATOR.join(C.DEFAULT_FACTOR_DELIMITER.join(factors) for factors in
zip(*chain([t_output.tokens], t_output.factor_tokens)))
print(factored_string, file=self.stream, flush=True)
def reports_score(self) -> bool:
return False
|
awslabs/sockeye
|
sockeye/output_handler.py
|
Python
|
apache-2.0
| 8,543
|
from pybbn.pptc.evidencecollector import EvidenceCollector
from pybbn.pptc.evidencedistributor import EvidenceDistributor
class Propagator(object):
"""
Evidence propagator.
"""
@staticmethod
def propagate(join_tree):
"""
Propagates evidence.
:param join_tree: Join tree.
:return: Join tree.
"""
cliques = join_tree.get_cliques()
cliques = sorted(cliques, key=lambda c: c.id)
# cliques = sorted(cliques, key=lambda c: c.get_sid())
x = cliques[0]
join_tree.unmark_cliques()
Propagator.collect_evidence(join_tree, x)
join_tree.unmark_cliques()
Propagator.distribute_evidence(join_tree, x)
return join_tree
@staticmethod
def collect_evidence(join_tree, start):
"""
Collects evidence.
:param join_tree: Join tree.
:param start: Start clique.
"""
collector = EvidenceCollector(join_tree, start)
collector.start()
@staticmethod
def distribute_evidence(join_tree, start):
"""
Distributes evidence.
:param join_tree: Join tree.
:param start: Start clique.
"""
distributor = EvidenceDistributor(join_tree, start)
distributor.start()
|
vangj/py-bbn
|
pybbn/pptc/propagator.py
|
Python
|
apache-2.0
| 1,294
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for importing a TF v1-style SavedModel when executing eagerly."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
from tensorflow.python.client import session as session_lib
from tensorflow.python.eager import backprop
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.saved_model import builder_impl
from tensorflow.python.saved_model import load
from tensorflow.python.saved_model import save
from tensorflow.python.saved_model import signature_def_utils
from tensorflow.python.saved_model import simple_save
from tensorflow.python.saved_model import utils_impl
class LoadTest(test.TestCase):
def _v1_single_metagraph_saved_model(self, use_resource):
export_graph = ops.Graph()
with export_graph.as_default():
start = array_ops.placeholder(
shape=[None], dtype=dtypes.float32, name="start")
if use_resource:
distractor = variables.RefVariable(-1., name="distractor")
v = resource_variable_ops.ResourceVariable(3., name="v")
else:
# "distractor" gets saved in the checkpoint and so used in the restore
# function, but not in the pruned function for the signature. This tests
# node naming: it needs to be consistent (and ideally always the same as
# the node in the original GraphDef) for the resource manager to find
# the right variable.
distractor = variables.RefVariable(-1., name="distractor")
v = variables.RefVariable(3., name="v")
local_variable = variables.VariableV1(
1.,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
trainable=False,
use_resource=True)
output = array_ops.identity(start * v * local_variable, name="output")
with session_lib.Session() as session:
session.run([v.initializer, distractor.initializer,
local_variable.initializer])
path = os.path.join(self.get_temp_dir(), "saved_model", str(ops.uid()))
simple_save.simple_save(
session,
path,
inputs={"start": start},
outputs={"output": output},
legacy_init_op=local_variable.initializer)
return path
def test_resource_variable_import(self):
imported = load.load(self._v1_single_metagraph_saved_model(
use_resource=True))
fn = imported.signatures["serving_default"]
self.assertEqual({"output": 6.},
self.evaluate(fn(constant_op.constant(2.))))
self.assertAllEqual([3., 1.], self.evaluate(imported.variables))
imported.variables[0].assign(4.)
self.assertEqual({"output": 8.},
self.evaluate(fn(start=constant_op.constant(2.))))
imported.variables[1].assign(2.)
self.assertEqual({"output": 24.},
self.evaluate(fn(start=constant_op.constant(3.))))
self.assertTrue(imported.variables[0].trainable)
self.assertFalse(imported.variables[1].trainable)
with backprop.GradientTape() as tape:
output = fn(start=constant_op.constant(4.))
self.assertEqual(imported.variables[:1], list(tape.watched_variables()))
self.assertEqual(8., tape.gradient(output, imported.variables[0]).numpy())
def test_ref_variable_import(self):
saved = self._v1_single_metagraph_saved_model(use_resource=False)
imported = load.load(saved)
fn = imported.signatures["serving_default"]
self.assertEqual(6., fn(start=constant_op.constant(2.))["output"].numpy())
def _v1_multi_metagraph_saved_model(self):
export_graph = ops.Graph()
with export_graph.as_default():
start = array_ops.placeholder(
shape=[None], dtype=dtypes.float32, name="start")
v = resource_variable_ops.ResourceVariable(21.)
first_output = array_ops.identity(start * v, name="first_output")
second_output = array_ops.identity(v, name="second_output")
with session_lib.Session() as session:
session.run(v.initializer)
path = os.path.join(self.get_temp_dir(), "saved_model", str(ops.uid()))
builder = builder_impl.SavedModelBuilder(path)
builder.add_meta_graph_and_variables(
session, tags=["first"],
signature_def_map={
"first_key": signature_def_utils.build_signature_def(
{"first_start": utils_impl.build_tensor_info(start)},
{"first_output": utils_impl.build_tensor_info(
first_output)})})
builder.add_meta_graph(
tags=["second"],
signature_def_map={
"second_key": signature_def_utils.build_signature_def(
{"second_start": utils_impl.build_tensor_info(start)},
{"second_output": utils_impl.build_tensor_info(
second_output)})})
builder.save()
return path
def test_multi_meta_graph_loading(self):
with self.assertRaisesRegexp(ValueError, "2 MetaGraphs"):
load.load(self._v1_multi_metagraph_saved_model())
first_imported = load.load(self._v1_multi_metagraph_saved_model(),
tags=["first"])
self.assertEqual({"first_output": 42.},
self.evaluate(first_imported.signatures["first_key"](
first_start=constant_op.constant(2.))))
second_imported = load.load(self._v1_multi_metagraph_saved_model(),
tags=["second"])
with self.assertRaisesRegexp(TypeError, "second_start"):
second_imported.signatures["second_key"](x=constant_op.constant(2.))
with self.assertRaisesRegexp(TypeError, "second_start"):
second_imported.signatures["second_key"](
second_start=constant_op.constant(2.),
x=constant_op.constant(2.))
self.assertEqual({"second_output": 21.},
self.evaluate(second_imported.signatures["second_key"](
second_start=constant_op.constant(2.))))
def _v1_asset_saved_model(self):
export_graph = ops.Graph()
vocab_path = os.path.join(self.get_temp_dir(), "vocab.txt")
with open(vocab_path, "w") as f:
f.write("alpha\nbeta\ngamma\n")
with export_graph.as_default():
initializer = lookup_ops.TextFileInitializer(
vocab_path,
key_dtype=dtypes.string,
key_index=lookup_ops.TextFileIndex.WHOLE_LINE,
value_dtype=dtypes.int64,
value_index=lookup_ops.TextFileIndex.LINE_NUMBER)
table = lookup_ops.HashTable(
initializer, default_value=-1)
start = array_ops.placeholder(
shape=None, dtype=dtypes.string, name="in")
output = table.lookup(start, name="out")
with session_lib.Session() as session:
session.run([table.initializer])
path = os.path.join(self.get_temp_dir(), "saved_model", str(ops.uid()))
simple_save.simple_save(
session,
path,
inputs={"start": start},
outputs={"output": output},
legacy_init_op=table.initializer)
file_io.delete_file(vocab_path)
return path
def test_asset_loading(self):
first_path = self._v1_asset_saved_model()
imported = load.load(first_path)
fn = imported.signatures["serving_default"]
self.assertAllClose({"output": [2, 0]},
fn(start=constant_op.constant(["gamma", "alpha"])))
second_path = os.path.join(self.get_temp_dir(), "saved_model",
str(ops.uid()))
save.save(imported, second_path, signatures=imported.signatures)
shutil.rmtree(first_path)
second_import = load.load(second_path)
fn = second_import.signatures["serving_default"]
self.assertAllClose({"output": [2, 0]},
fn(start=constant_op.constant(["gamma", "alpha"])))
third_path = os.path.join(self.get_temp_dir(), "saved_model",
str(ops.uid()))
save.save(second_import, third_path, signatures=second_import.signatures)
shutil.rmtree(second_path)
third_import = load.load(third_path)
fn = third_import.signatures["serving_default"]
self.assertAllClose({"output": [2, 0]},
fn(start=constant_op.constant(["gamma", "alpha"])))
if __name__ == "__main__":
test.main()
|
ageron/tensorflow
|
tensorflow/python/saved_model/load_v1_in_v2_test.py
|
Python
|
apache-2.0
| 9,405
|
"""This module contains the general information for GpuInventory ManagedObject."""
from ...imcmo import ManagedObject
from ...imccoremeta import MoPropertyMeta, MoMeta
from ...imcmeta import VersionMeta
class GpuInventoryConsts:
pass
class GpuInventory(ManagedObject):
"""This is GpuInventory class."""
consts = GpuInventoryConsts()
naming_props = set([u'id'])
mo_meta = {
"classic": MoMeta("GpuInventory", "gpuInventory", "gpu-inv-[id]", VersionMeta.Version303a, "OutputOnly", 0xf, [], ["admin", "read-only", "user"], [u'pciEquipSlot'], [], ["Get"]),
}
prop_meta = {
"classic": {
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version303a, MoPropertyMeta.READ_ONLY, 0x2, 0, 255, None, [], []),
"id": MoPropertyMeta("id", "id", "string", VersionMeta.Version303a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version303a, MoPropertyMeta.READ_ONLY, 0x4, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version303a, MoPropertyMeta.READ_ONLY, 0x8, None, None, None, ["", "created", "deleted", "modified", "removed"], []),
"temperature": MoPropertyMeta("temperature", "temperature", "string", VersionMeta.Version303a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
},
}
prop_map = {
"classic": {
"dn": "dn",
"id": "id",
"rn": "rn",
"status": "status",
"temperature": "temperature",
},
}
def __init__(self, parent_mo_or_dn, id, **kwargs):
self._dirty_mask = 0
self.id = id
self.status = None
self.temperature = None
ManagedObject.__init__(self, "GpuInventory", parent_mo_or_dn, **kwargs)
|
ragupta-git/ImcSdk
|
imcsdk/mometa/gpu/GpuInventory.py
|
Python
|
apache-2.0
| 1,876
|
from random import random
import requests
import ray
from ray import serve
ray.init(num_cpus=8)
serve.start()
# Our pipeline will be structured as follows:
# - Input comes in, the composed model sends it to model_one
# - model_one outputs a random number between 0 and 1, if the value is
# greater than 0.5, then the data is sent to model_two
# - otherwise, the data is returned to the user.
# Let's define two models that just print out the data they received.
@serve.deployment
def model_one(data):
print("Model 1 called with data ", data)
return random()
model_one.deploy()
@serve.deployment
def model_two(data):
print("Model 2 called with data ", data)
return data
model_two.deploy()
# max_concurrent_queries is optional. By default, if you pass in an async
# function, Ray Serve sets the limit to a high number.
@serve.deployment(max_concurrent_queries=10, route_prefix="/composed")
class ComposedModel:
def __init__(self):
self.model_one = model_one.get_handle()
self.model_two = model_two.get_handle()
# This method can be called concurrently!
async def __call__(self, starlette_request):
data = await starlette_request.body()
score = await self.model_one.remote(data=data)
if score > 0.5:
result = await self.model_two.remote(data=data)
result = {"model_used": 2, "score": score}
else:
result = {"model_used": 1, "score": score}
return result
ComposedModel.deploy()
for _ in range(5):
resp = requests.get("http://127.0.0.1:8000/composed", data="hey!")
print(resp.json())
# Output
# {'model_used': 2, 'score': 0.6250189863595503}
# {'model_used': 1, 'score': 0.03146855349621436}
# {'model_used': 2, 'score': 0.6916977560006987}
# {'model_used': 2, 'score': 0.8169693450866928}
# {'model_used': 2, 'score': 0.9540681979573862}
|
pcmoritz/ray-1
|
python/ray/serve/examples/doc/snippet_model_composition.py
|
Python
|
apache-2.0
| 1,883
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import time
from pprint import pprint
import click
from kael.microservice import micro_server
from kael.work_frame import WORK_FRAME
file_path = os.path.abspath(os.path.dirname(__file__))
if file_path not in sys.path:
sys.path.insert(0, file_path)
path = os.path.split(file_path)
if path not in sys.path:
sys.path.insert(0, path[0])
AMQ_URI = os.environ.get('AMQ_URI')
@click.group()
def cli():
pass
@cli.command()
def s():
server = micro_server("s1", auri=AMQ_URI)
@server.service("hha")
def h(s):
print "HHHHH", s, os.getpid()
return {"b": s}
server.start_service(4, daemon=False)
r = server.hha(123123)
print server.hha.src
print "--------------", r
print "done"
print server.services
@cli.command()
def c():
server = micro_server("s1", auri=AMQ_URI)
r = server.hha(s=12312, qid="a")
print server.hha.src
print r
@cli.command()
def p():
conf_dir = os.path.join(file_path, 'setting.yaml')
w = WORK_FRAME("test", auri=AMQ_URI, service_group_conf=conf_dir)
w.frame_start()
@cli.command()
def pc():
server = WORK_FRAME("test", auri=AMQ_URI)
print server.calculate__add(10, 20)
print server.calculate__minus(10, 20)
print server.time__add(1)
r = server.command("_restart_service")
print r
time.sleep(3)
print server.get_response(r)
@cli.command()
def status():
print '-' * 10, 'service', '-' * 10
server = WORK_FRAME("test", auri=AMQ_URI)
r = server.command("_get_pkg_version", pkg_type='service')
pprint(server.get_response(r))
print '\n\n', '-' * 10, 'crontab', '-' * 10
r = server.command("_get_pkg_version", pkg_type='crontab')
pprint(server.get_response(r))
@cli.command()
def restart_service():
server = WORK_FRAME("test", auri=AMQ_URI)
print server.restart_servers('service', timeout=3)
@cli.command()
def restart_crontab():
server = WORK_FRAME("test", auri=AMQ_URI)
r = server.command("_restart_crontab")
print server.get_response(r, timeout=5)
@cli.command()
def update_s():
service = 'time'
server = WORK_FRAME("test", auri=AMQ_URI)
r = server.command("_get_pkg_version")
pprint(server.get_response(r, timeout=5, ))
pprint(server.update_service(service))
@cli.command()
def update_c():
crontab = 'print'
server = WORK_FRAME("test", auri=AMQ_URI)
r = server.command("_get_pkg_version", pkg_type='crontab')
pprint(server.get_response(r, timeout=5, ))
pprint(server.update_crontab(crontab, version=1.0))
@cli.command()
def install():
server = WORK_FRAME("test", auri=AMQ_URI)
service = 'calculate'
pprint(server.get_last_version(service))
pprint(server.install_service(service, './caccu'))
# cron tab
@cli.command()
def scron():
"""micro server crontab"""
server = micro_server("test", auri=AMQ_URI)
server.add_crontab(cron_name='haha', command='echo 2', time_str='* * * * *')
server.start_crontab()
print '-' * 100
print 'USER ALL CRONTAB'
pprint(server.cron_manage.user_cron_jobs())
print '-' * 100
@cli.command()
def wfcron():
"""work frame crontab"""
server = WORK_FRAME("test", auri=AMQ_URI)
pprint(server.get_all_crontab_status())
if __name__ == "__main__":
cli()
|
360skyeye/kael
|
examples/micro_service/run.py
|
Python
|
apache-2.0
| 3,373
|
# Copyright 2017 The Chromium Authors.
#
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd.
from __future__ import absolute_import
import unittest
from .messages import Message, message, XrefSignature, InternalLink, TextRange
@message
class Foo(Message):
DESCRIPTOR = {'x': int}
class Bar(Message):
DESCRIPTOR = {'x': int, 'y': Message.PARENT_TYPE}
class Baz(Message):
DESCRIPTOR = {'x': int, 'y': [Message.PARENT_TYPE]}
class Qux(Message):
FOO = 1
BAR = 2
DESCRIPTOR = int
class Quux(Message):
DESCRIPTOR = {'x': Qux}
class S(Message):
DESCRIPTOR = {'s': str}
class TestProto(unittest.TestCase):
def test_proto_bridge(self):
v = Foo()
v.x = 3
self.assertEqual(v.AsQueryString(), [('x', '3')])
def test_from_json_string_1(self):
v = Message.FromJsonString('{"x": 3}')
self.assertEqual(v.x, 3)
def test_from_json_string_2(self):
v = Foo.FromJsonString('{"x": 3}')
self.assertTrue(isinstance(v, Foo))
self.assertTrue(isinstance(v.x, int))
self.assertEqual(v.x, 3)
def test_from_json_string_3(self):
v = Bar.FromJsonString('{"x": 3, "y": {"x": 4}}')
self.assertTrue(isinstance(v, Bar))
self.assertTrue(isinstance(v.y, Bar))
self.assertEqual(v.x, 3)
self.assertEqual(v.y.x, 4)
def test_from_json_string_4(self):
v = Foo.FromJsonString('{"y": 3}')
self.assertTrue(isinstance(v, Foo))
def test_from_json_string_5(self):
v = Foo.FromJsonString('{"y": 3}')
self.assertTrue(isinstance(v, Foo))
self.assertEqual(v.y, 3)
def test_from_json_string_6(self):
v = Quux.FromJsonString('{"x": 3}')
self.assertTrue(isinstance(v, Quux))
self.assertTrue(isinstance(v.x, int))
self.assertEqual(v.x, 3)
def test_from_json_string_7(self):
v = Quux.FromJsonString('{"x": "FOO"}')
self.assertTrue(isinstance(v, Quux))
self.assertTrue(isinstance(v.x, int))
self.assertEqual(v.x, 1)
def test_from_json_string_invalid(self):
s = bytearray('{"s": "abcdefghijklmnop"}'.encode('utf-8'))
s[8] = 0xf8
v = S.FromJsonString(s)
self.assertTrue(isinstance(v, S))
self.assertGreater(len(v.s), 0)
def test_from_shallow_dict_1(self):
v = Baz.FromShallowDict({'x': 3, 'y': [{'x': 4}, {'x': 5}]})
self.assertTrue(isinstance(v, Baz))
self.assertTrue(isinstance(v.y, list))
self.assertTrue(isinstance(v.y[0], Baz))
self.assertTrue(isinstance(v.y[1], Baz))
class TestConstructor(unittest.TestCase):
def test_empty_class(self):
f = Foo()
self.assertFalse(hasattr(f, 'x'))
def test_class_with_known_keyword(self):
f = Foo(x=10)
self.assertTrue(hasattr(f, 'x'))
self.assertEqual(10, f.x)
def test_class_with_unknown_keyword(self):
f = Foo(x=10, y=9)
self.assertTrue(hasattr(f, 'x'))
self.assertTrue(hasattr(f, 'y'))
self.assertEqual(9, f.y)
class TestXrefSignature(unittest.TestCase):
def test_basic_with_single_signature(self):
xsig = XrefSignature.FromJsonString(
'{"highlight_signature":"abc", "signature":"sig","signature_hash":"hash"}'
)
self.assertEqual('abc', xsig.highlight_signature)
self.assertEqual('sig', xsig.signature)
self.assertEqual('hash', xsig.signature_hash)
self.assertSetEqual(set(['abc', 'sig']), set(xsig.GetSignatures()))
self.assertEqual('sig', xsig.GetSignature())
def test_multi_strings(self):
xsig = XrefSignature.FromJsonString('''{
"signature": "foo bar baz",
"highlight_signature": "hifoo hibar"
}''')
self.assertSetEqual(
set(['foo', 'bar', 'baz', 'hifoo', 'hibar']), set(xsig.GetSignatures()))
self.assertEqual('foo', xsig.GetSignature())
class TestInternalLink(unittest.TestCase):
def test_basic_with_single_signature(self):
ilink = InternalLink.FromJsonString(
'{"highlight_signature":"abc", "signature":"sig","signature_hash":"hash"}'
)
self.assertEqual('abc', ilink.highlight_signature)
self.assertEqual('sig', ilink.signature)
self.assertEqual('hash', ilink.signature_hash)
self.assertSetEqual(set(['abc', 'sig']), set(ilink.GetSignatures()))
self.assertEqual('sig', ilink.GetSignature())
def test_multi_strings(self):
ilink = InternalLink.FromJsonString('''{
"signature": "foo bar baz",
"highlight_signature": "hifoo hibar"
}''')
self.assertSetEqual(
set(['foo', 'bar', 'baz', 'hifoo', 'hibar']),
set(ilink.GetSignatures()))
self.assertEqual('foo', ilink.GetSignature())
class TestTextRange(unittest.TestCase):
def test_contains(self):
r = TextRange(start_line=1, start_column=8, end_line=3, end_column=1)
self.assertTrue(r.Contains(1, 8))
self.assertTrue(r.Contains(3, 1))
self.assertTrue(r.Contains(2, 100))
self.assertFalse(r.Contains(1, 7))
self.assertFalse(r.Contains(3, 2))
def test_overlaps(self):
def _QuadToRange(q):
return TextRange(
start_line=q[0], start_column=q[1], end_line=q[2], end_column=q[3])
TestCases = [
{
"r1": (2, 8, 2, 9),
"r2": (1, 1, 1, 100),
"result": False
},
{
"r1": (2, 8, 2, 9),
"r2": (2, 6, 2, 7),
"result": False
},
{
"r1": (2, 8, 2, 9),
"r2": (2, 6, 2, 8),
"result": True
},
{
"r1": (2, 8, 3, 9),
"r2": (2, 6, 2, 8),
"result": True
},
{
"r1": (2, 8, 4, 9),
"r2": (3, 6, 3, 800),
"result": True
},
{
"r1": (2, 8, 4, 9),
"r2": (1, 6, 3, 800),
"result": True
},
{
"r1": (2, 8, 4, 9),
"r2": (3, 6, 300, 800),
"result": True
},
{
"r1": (2, 8, 4, 9),
"r2": (1, 6, 2, 7),
"result": False
},
]
for t in TestCases:
r1 = _QuadToRange(t["r1"])
r2 = _QuadToRange(t["r2"])
self.assertEqual(t["result"], r1.Overlaps(r2))
self.assertEqual(t["result"], r2.Overlaps(r1))
if __name__ == '__main__':
unittest.main()
|
karlinjf/ChromiumXRefs
|
third_party/codesearch/test_messages.py
|
Python
|
apache-2.0
| 6,287
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python layer for set_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.framework.python.framework import tensor_util
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import load_library
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.platform import resource_loader
_set_ops = load_library.load_op_library(
resource_loader.get_path_to_datafile("_set_ops.so"))
assert _set_ops, "Could not load _set_ops.so."
_VALID_DTYPES = set([
dtypes.int8, dtypes.int16, dtypes.int32, dtypes.int64,
dtypes.uint8, dtypes.uint16, dtypes.string])
@ops.RegisterShape("SetSize")
def _size_shape(unused_op):
"""Shape function for SetSize op."""
return [tensor_shape.unknown_shape()]
def set_size(a, validate_indices=True):
"""Compute number of unique elements along last dimension of `a`.
Args:
a: `SparseTensor`, with indices sorted in row-major order.
validate_indices: Whether to validate the order and range of sparse indices
in `a`.
Returns:
For `a` ranked `n`, this is a `Tensor` with rank `n-1`, and the same 1st
`n-1` dimensions as `a`. Each value is the number of unique elements in
the corresponding `[0...n-1]` dimension of `a`.
Raises:
TypeError: If `a` is an invalid types.
"""
a = tensor_util.convert_to_tensor_or_sparse_tensor(a, name="a")
if not isinstance(a, ops.SparseTensor):
raise TypeError("Expected `SparseTensor`, got %s." % a)
if a.values.dtype.base_dtype not in _VALID_DTYPES:
raise TypeError("Invalid dtype %s." % a.values.dtype)
# pylint: disable=protected-access
return _set_ops.set_size(a.indices, a.values, a.shape, validate_indices)
ops.NoGradient("SetSize")
@ops.RegisterShape("DenseToDenseSetOperation")
def _dense_to_dense_shape(op):
"""Shapes for `SparseTensor` result given 2 dense inputs.
Args:
op: Operation with 2 dense `Tensor` inputs.
Returns:
Tuple of three shapes corresponding to the indices, values, and shape
`Tensor` components of the result `SparseTensor`.
Raises:
ValueError: if either input `Tensor` has rank < 2, or ranks do not match, or
first n-1 dims of input shapes are not compatible.
"""
# The following should stay in sync with `ComputeDenseToDense` shape
# assertions in kernels/set_kernels.cc.
input0_shape = op.inputs[0].get_shape()
input0_rank = input0_shape.ndims
if (input0_rank is not None) and (input0_rank < 2):
raise ValueError("Input 0, expected rank >= 2, got shape %s." %
input0_shape)
# Dimension n contains the set values to be compared, so ranks and the first
# n-1 dimensions of inputs and output must match.
input1_shape = op.inputs[1].get_shape()
input1_rank = input1_shape.ndims
if (input0_rank is not None) and (input1_rank is not None) and (
input0_rank != input1_rank):
raise ValueError(
"Ranks do not match: input 0 with shape %s, input 1 with shape %s." %
(input0_shape, input1_shape))
output_rank = input1_rank if input0_rank is None else input0_rank
output_dim0 = input1_shape[1] if input0_shape[0] is None else input0_shape[0]
input0_dims = input0_shape.dims
if input0_dims is None:
group0_shape = tensor_shape.unknown_shape()
else:
group0_shape = tensor_shape.TensorShape(input0_dims[:-1])
input1_dims = input1_shape.dims
if input1_dims is None:
group1_shape = tensor_shape.unknown_shape()
else:
group1_shape = tensor_shape.TensorShape(input1_dims[:-1])
group0_shape.assert_is_compatible_with(group1_shape)
indices_shape = tensor_shape.TensorShape((output_dim0, output_rank))
values_shape = tensor_shape.unknown_shape(1)
shape_shape = tensor_shape.TensorShape((output_rank,))
return (indices_shape, values_shape, shape_shape)
@ops.RegisterShape("DenseToSparseSetOperation")
def _dense_to_sparse_shape(op):
"""Shapes for `SparseTensor` result given 1 dense input and 1 sparse input.
Args:
op: Operation with 1 dense `Tensor` and 1 `SparseTensor` input.
Returns:
Tuple of three shapes corresponding to the indices, values, and shape
`Tensor` components of the result `SparseTensor`.
Raises:
ValueError: if either input `Tensor` has rank < 2.
"""
# The following should stay in sync with `ComputeDenseToSparse` shape
# assertions in kernels/set_kernels.cc.
input_shape = op.inputs[0].get_shape()
input_rank = input_shape.ndims
if (input_rank is not None) and (input_rank < 2):
raise ValueError("Expected rank >= 2, got %s." % input_shape)
# Assert valid dimensions for the 3 `Tensor` components of `SparseTensor`.
ops.SparseTensor(op.inputs[1], op.inputs[2], op.inputs[3])
indices_shape = tensor_shape.TensorShape((input_shape[0], input_rank))
values_shape = tensor_shape.unknown_shape(1)
shape_shape = tensor_shape.TensorShape((input_rank,))
return (indices_shape, values_shape, shape_shape)
@ops.RegisterShape("SparseToSparseSetOperation")
def _sparse_to_sparse_shape(op):
"""Shapes for `SparseTensor` result given 2 sparse inputs.
Args:
op: Operation with 2 `SparseTensor` inputs.
Returns:
Tuple of three shapes corresponding to the indices, values, and shape
`Tensor` components of the result `SparseTensor`.
"""
# The following should stay in sync with `ComputeSparseToSparse` shape
# assertions in kernels/set_kernels.cc.
# Assert valid dimensions for the 3 `Tensor` components of `SparseTensor`.
ops.SparseTensor(op.inputs[0], op.inputs[1], op.inputs[2])
ops.SparseTensor(op.inputs[3], op.inputs[4], op.inputs[5])
indices_shape = tensor_shape.unknown_shape(2)
values_shape = tensor_shape.unknown_shape(1)
shape_shape = tensor_shape.unknown_shape(1)
return (indices_shape, values_shape, shape_shape)
ops.NoGradient("DenseToDenseSetOperation")
ops.NoGradient("DenseToSparseSetOperation")
ops.NoGradient("SparseToSparseSetOperation")
def _set_operation(a, b, set_operation, validate_indices=True):
"""Compute set operation of elements in last dimension of `a` and `b`.
All but the last dimension of `a` and `b` must match.
Args:
a: `Tensor` or `SparseTensor` of the same type as `b`. If sparse, indices
must be sorted in row-major order.
b: `Tensor` or `SparseTensor` of the same type as `a`. Must be
`SparseTensor` if `a` is `SparseTensor`. If sparse, indices must be
sorted in row-major order.
set_operation: String indicating set operaiton. See
SetOperationOp::SetOperationFromContext for valid values.
validate_indices: Whether to validate the order and range of sparse indices
in `a` and `b`.
Returns:
A `SparseTensor` with the same rank as `a` and `b`, and all but the last
dimension the same. Elements along the last dimension contain the results
of the set operation.
Raises:
TypeError: If inputs are invalid types.
ValueError: If `a` is sparse and `b` is dense.
"""
a = tensor_util.convert_to_tensor_or_sparse_tensor(a, name="a")
if a.dtype.base_dtype not in _VALID_DTYPES:
raise TypeError("'a' invalid dtype %s." % a.dtype)
b = tensor_util.convert_to_tensor_or_sparse_tensor(b, name="b")
if b.dtype.base_dtype != a.dtype.base_dtype:
raise TypeError("Types don't match, %s vs %s." % (a.dtype, b.dtype))
# pylint: disable=protected-access
if isinstance(a, ops.SparseTensor):
if isinstance(b, ops.SparseTensor):
indices, values, shape = _set_ops.sparse_to_sparse_set_operation(
a.indices, a.values, a.shape, b.indices, b.values, b.shape,
set_operation, validate_indices)
else:
raise ValueError("Sparse,Dense is not supported, but Dense,Sparse is. "
"Please flip the order of your inputs.")
elif isinstance(b, ops.SparseTensor):
indices, values, shape = _set_ops.dense_to_sparse_set_operation(
a, b.indices, b.values, b.shape, set_operation, validate_indices)
else:
indices, values, shape = _set_ops.dense_to_dense_set_operation(
a, b, set_operation, validate_indices)
# pylint: enable=protected-access
return ops.SparseTensor(indices, values, shape)
def set_intersection(a, b, validate_indices=True):
"""Compute set intersection of elements in last dimension of `a` and `b`.
All but the last dimension of `a` and `b` must match.
Args:
a: `Tensor` or `SparseTensor` of the same type as `b`. If sparse, indices
must be sorted in row-major order.
b: `Tensor` or `SparseTensor` of the same type as `a`. Must be
`SparseTensor` if `a` is `SparseTensor`. If sparse, indices must be
sorted in row-major order.
validate_indices: Whether to validate the order and range of sparse indices
in `a` and `b`.
Returns:
A `SparseTensor` with the same rank as `a` and `b`, and all but the last
dimension the same. Elements along the last dimension contain the
intersections.
"""
return _set_operation(a, b, "intersection", validate_indices)
def set_difference(a, b, aminusb=True, validate_indices=True):
"""Compute set difference of elements in last dimension of `a` and `b`.
All but the last dimension of `a` and `b` must match.
Args:
a: `Tensor` or `SparseTensor` of the same type as `b`. If sparse, indices
must be sorted in row-major order.
b: `Tensor` or `SparseTensor` of the same type as `a`. Must be
`SparseTensor` if `a` is `SparseTensor`. If sparse, indices must be
sorted in row-major order.
aminusb: Whether to subtract `b` from `a`, vs vice versa.
validate_indices: Whether to validate the order and range of sparse indices
in `a` and `b`.
Returns:
A `SparseTensor` with the same rank as `a` and `b`, and all but the last
dimension the same. Elements along the last dimension contain the
differences.
"""
return _set_operation(a, b, "a-b" if aminusb else "b-a", validate_indices)
def set_union(a, b, validate_indices=True):
"""Compute set union of elements in last dimension of `a` and `b`.
All but the last dimension of `a` and `b` must match.
Args:
a: `Tensor` or `SparseTensor` of the same type as `b`. If sparse, indices
must be sorted in row-major order.
b: `Tensor` or `SparseTensor` of the same type as `a`. Must be
`SparseTensor` if `a` is `SparseTensor`. If sparse, indices must be
sorted in row-major order.
validate_indices: Whether to validate the order and range of sparse indices
in `a` and `b`.
Returns:
A `SparseTensor` with the same rank as `a` and `b`, and all but the last
dimension the same. Elements along the last dimension contain the
unions.
"""
return _set_operation(a, b, "union", validate_indices)
|
rew4332/tensorflow
|
tensorflow/contrib/metrics/python/ops/set_ops.py
|
Python
|
apache-2.0
| 11,549
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for datasets loading."""
import tensorflow as tf
def change_resolution(image, res, method='area'):
image = tf.image.resize(image, method=method, antialias=True,
size=(res, res))
image = tf.cast(tf.round(image), dtype=tf.int32)
return image
def downsample_and_upsample(x, train, downsample_res, upsample_res, method):
"""Downsample and upsample."""
keys = ['targets']
if train and 'targets_slice' in x.keys():
keys += ['targets_slice']
for key in keys:
inputs = x[key]
# Conditional low resolution input.
x_down = change_resolution(inputs, res=downsample_res, method=method)
x['%s_%d' % (key, downsample_res)] = x_down
# We upsample here instead of in the model code because some upsampling
# methods are not TPU friendly.
x_up = change_resolution(x_down, res=upsample_res, method=method)
x['%s_%d_up_back' % (key, downsample_res)] = x_up
return x
def random_channel_slice(x):
random_channel = tf.random.uniform(
shape=[], minval=0, maxval=3, dtype=tf.int32)
targets = x['targets']
res = targets.shape[1]
image_slice = targets[Ellipsis, random_channel: random_channel+1]
image_slice.set_shape([res, res, 1])
x['targets_slice'] = image_slice
x['channel_index'] = random_channel
return x
|
google-research/google-research
|
coltran/utils/datasets_utils.py
|
Python
|
apache-2.0
| 1,912
|
import logging
import tempfile
import time
from googleapiclient import discovery
from googleapiclient import http
from oauth2client.client import GoogleCredentials
# Some of this is copied from:
# https://github.com/GoogleCloudPlatform/python-docs-samples/blob/master/storage/api/crud_object.py
# and:
# https://github.com/GoogleCloudPlatform/python-docs-samples/blob/master/storage/api/list_objects.py
RETRIES_BEFORE_FAILURE = 12
FIRST_RETRY_SLEEP = 2.0
_SERVICE = None
def get_service():
global _SERVICE
if _SERVICE is None:
_SERVICE = create_service()
return _SERVICE
def create_service():
# Get the application default credentials. When running locally, these are
# available after running `gcloud init`. When running on compute
# engine, these are available from the environment.
credentials = GoogleCredentials.get_application_default()
# Construct the service object for interacting with the Cloud Storage API -
# the 'storage' service, at version 'v1'.
# You can browse other available api services and versions here:
# http://g.co/dev/api-client-library/python/apis/
return discovery.build('storage', 'v1', credentials=credentials)
def robustify(function):
def robust_function(*args, **kwargs):
error_num = 0
while True:
try:
return function(*args, **kwargs)
except Exception as e:
error_num += 1
logging.warning(
"Exception calling %s: '%s'. "
"This call has failed %d times. Will retry up to "
"%d times." % (
str(function),
str(e),
error_num,
RETRIES_BEFORE_FAILURE))
if error_num > RETRIES_BEFORE_FAILURE:
raise
sleep_time = FIRST_RETRY_SLEEP**error_num
logging.warn("Sleeping for %0.2f seconds." % sleep_time)
time.sleep(sleep_time)
return robust_function
def split_bucket_and_name(url):
if not url.startswith("gs://"):
raise ValueError("Not a gs:// url: %s" % url)
return url[len("gs://"):].split("/", 1)
@robustify
def list_contents(prefix):
splitted = split_bucket_and_name(prefix)
if len(splitted) == 1:
(bucket_name, file_name_prefix) = (splitted[0], "")
else:
(bucket_name, file_name_prefix) = splitted
# Create a request to objects.list to retrieve a list of objects.
fields_to_return = \
'nextPageToken,items(name)'
req = get_service().objects().list(
bucket=bucket_name,
prefix=file_name_prefix,
maxResults=100000,
fields=fields_to_return)
all_objects = []
# If you have too many items to list in one request, list_next() will
# automatically handle paging with the pageToken.
while req:
resp = req.execute()
all_objects.extend(resp.get('items', []))
req = get_service().objects().list_next(req, resp)
return [item['name'] for item in all_objects]
@robustify
def move(source, dest):
# From https://cloud.google.com/storage/docs/json_api/v1/objects/rewrite
(bucket_name, source_object) = split_bucket_and_name(source)
(bucket_name2, dest_object) = split_bucket_and_name(dest)
service = get_service()
request = service.objects().rewrite(
sourceBucket=bucket_name,
sourceObject=source_object,
destinationBucket=bucket_name,
destinationObject=dest_object,
body={})
request.execute()
# Delete source.
request = service.objects().delete(
bucket=bucket_name,
object=source_object)
request.execute()
@robustify
def put(
name,
input_handle,
readers=[],
owners=[],
mime_type='application/octet-stream'):
input_handle.seek(0)
(bucket_name, file_name) = split_bucket_and_name(name)
# This is the request body as specified:
# http://g.co/cloud/storage/docs/json_api/v1/objects/insert#request
body = {
'name': file_name,
}
# If specified, create the access control objects and add them to the
# request body
if readers or owners:
body['acl'] = []
for r in readers:
body['acl'].append({
'entity': 'user-%s' % r,
'role': 'READER',
'email': r
})
for o in owners:
body['acl'].append({
'entity': 'user-%s' % o,
'role': 'OWNER',
'email': o
})
# Now insert them into the specified bucket as a media insertion.
req = get_service().objects().insert(
bucket=bucket_name,
body=body,
# You can also just set media_body=filename, but # for the sake of
# demonstration, pass in the more generic file handle, which could
# very well be a StringIO or similar.
media_body=http.MediaIoBaseUpload(input_handle, mime_type))
resp = req.execute()
return resp
@robustify
def get(name, output_handle=None):
(bucket_name, file_name) = split_bucket_and_name(name)
if output_handle is None:
output_handle = tempfile.TemporaryFile(
prefix="kubeface-bucket-storage-",
suffix=".data")
# Use get_media instead of get to get the actual contents of the object
req = get_service().objects().get_media(
bucket=bucket_name,
object=file_name)
downloader = http.MediaIoBaseDownload(output_handle, req)
done = False
while done is False:
(status, done) = downloader.next_chunk()
logging.debug("Download {}%.".format(int(status.progress() * 100)))
output_handle.seek(0)
return output_handle
@robustify
def delete(name):
(bucket_name, file_name) = split_bucket_and_name(name)
req = get_service().objects().delete(bucket=bucket_name, object=file_name)
return req.execute()
def access_info(name):
(bucket_name, file_name) = split_bucket_and_name(name)
return (
"https://storage.cloud.google.com/"
"{bucket_name}/{file_name}\t[ {name} ]".format(
bucket_name=bucket_name,
file_name=file_name,
name=name))
|
hammerlab/kubeface
|
kubeface/bucket_storage.py
|
Python
|
apache-2.0
| 6,275
|
#!/usr/bin/env python
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
import numpy
from pyscf import gto, scf
from pyscf import dft
from pyscf import lib
mol = gto.Mole()
mol.verbose = 0
mol.output = None
mol.atom = 'h 0 0 0; h 1 .5 0; h 0 4 1; h 1 0 .2'
mol.basis = 'aug-ccpvdz'
mol.build()
#dm = scf.RHF(mol).run(conv_tol=1e-14).make_rdm1()
dm = numpy.load(os.path.realpath(os.path.join(__file__, '..', 'dm_h4.npy')))
mf = dft.RKS(mol)
mf.grids.atom_grid = {"H": (50, 110)}
mf.prune = None
mf.grids.build(with_non0tab=False)
nao = mol.nao_nr()
ao = dft.numint.eval_ao(mol, mf.grids.coords, deriv=1)
rho = dft.numint.eval_rho(mol, ao, dm, xctype='GGA')
def tearDownModule():
global mol, mf, ao, rho
del mol, mf, ao, rho
def finger(a):
w = numpy.cos(numpy.arange(a.size))
return numpy.dot(w, a.ravel())
class KnownValues(unittest.TestCase):
def test_parse_xc(self):
hyb, fn_facs = dft.libxc.parse_xc('.5*HF+.5*B3LYP,VWN*.5')
self.assertAlmostEqual(hyb[0], .6, 12)
self.assertEqual([x[0] for x in fn_facs], [1,106,131,7])
self.assertTrue(numpy.allclose([x[1] for x in fn_facs],
(0.04, 0.36, 0.405, 0.595)))
hyb, fn_facs = dft.libxc.parse_xc('HF,')
self.assertEqual(hyb[0], 1)
self.assertEqual(fn_facs, [])
hyb, fn_facs = dft.libxc.parse_xc('B88 - SLATER')
self.assertEqual(fn_facs, [(106, 1), (1, -1)])
hyb, fn_facs = dft.libxc.parse_xc('B88 -SLATER*.5')
self.assertEqual(fn_facs, [(106, 1), (1, -0.5)])
hyb, fn_facs = dft.libxc.parse_xc('0.5*B3LYP\n+0.25*B3LYP')
self.assertTrue(numpy.allclose(hyb, [.15, 0, 0]))
hyb = dft.libxc.hybrid_coeff('0.5*B3LYP+0.25*B3LYP')
self.assertAlmostEqual(hyb, .15, 12)
hyb, fn_facs = dft.libxc.parse_xc('0.6*CAM_B3LYP+0.4*B3P86')
self.assertTrue(numpy.allclose(hyb, [.08, 0, 0]))
self.assertTrue(numpy.allclose(fn_facs,
[(433, 0.6), (1, 0.032), (106, 0.288), (132, 0.324), (7, 0.076)]))
rsh = dft.libxc.rsh_coeff('0.6*CAM_B3LYP+0.4*B3P86')
self.assertTrue(numpy.allclose(rsh, (0.33, 0.39, -0.196)))
hyb, fn_facs = dft.libxc.parse_xc('0.4*B3P86+0.6*CAM_B3LYP')
self.assertTrue(numpy.allclose(hyb, [.08, 0, 0]))
self.assertTrue(numpy.allclose(fn_facs,
[(1, 0.032), (106, 0.288), (132, 0.324), (7, 0.076), (433, 0.6)]))
rsh = dft.libxc.rsh_coeff('0.4*B3P86+0.6*CAM_B3LYP')
self.assertTrue(numpy.allclose(rsh, (0.33, 0.39, -0.196)))
hyb, fn_facs = dft.libxc.parse_xc('0.5*SR-HF(0.3) + .8*HF + .22*LR_HF')
self.assertEqual(hyb, [1.3, 1.02, 0.3])
hyb, fn_facs = dft.libxc.parse_xc('0.5*SR-HF + .22*LR_HF(0.3) + .8*HF')
self.assertEqual(hyb, [1.3, 1.02, 0.3])
hyb, fn_facs = dft.libxc.parse_xc('0.5*SR-HF + .8*HF + .22*LR_HF(0.3)')
self.assertEqual(hyb, [1.3, 1.02, 0.3])
hyb, fn_facs = dft.libxc.parse_xc('0.5*RSH(2.04;0.56;0.3) + 0.5*BP86')
self.assertEqual(hyb, [1.3, 1.02, 0.3])
self.assertEqual(fn_facs, [(106, 0.5), (132, 0.5)])
hyb, fn_facs = dft.libxc.parse_xc('0.5*RSH(.3, 2.04, 0.56) + 0.5*BP86')
self.assertEqual(hyb, [1.3, 1.02, 0.3])
self.assertEqual(fn_facs, [(106, 0.5), (132, 0.5)])
self.assertRaises(ValueError, dft.libxc.parse_xc, 'SR_HF(0.3) + LR_HF(.5)')
self.assertRaises(ValueError, dft.libxc.parse_xc, 'LR-HF(0.3) + SR-HF(.5)')
hyb = dft.libxc.hybrid_coeff('M05')
self.assertAlmostEqual(hyb, 0.28, 9)
hyb, fn_facs = dft.libxc.parse_xc('APBE,')
self.assertEqual(fn_facs, [(184, 1)])
#hyb, fn_facs = dft.libxc.parse_xc('TF,')
#self.assertEqual(fn_facs, [(50, 1)])
ref = [(1, 1), (7, 1)]
self.assertEqual(dft.libxc.parse_xc_name('LDA,VWN'), (1,7))
self.assertEqual(dft.libxc.parse_xc(('LDA','VWN'))[1], ref)
self.assertEqual(dft.libxc.parse_xc((1, 7))[1], ref)
self.assertEqual(dft.libxc.parse_xc('1, 7')[1], ref)
self.assertEqual(dft.libxc.parse_xc(7)[1], [(7,1)])
self.assertEqual(dft.libxc.parse_xc('M11-L')[1], [(226,1),(75,1)])
self.assertEqual(dft.libxc.parse_xc('M11L' )[1], [(226,1),(75,1)])
self.assertEqual(dft.libxc.parse_xc('M11-L,M11L' )[1], [(226,1),(75,1)])
self.assertEqual(dft.libxc.parse_xc('M11_L,M11-L')[1], [(226,1),(75,1)])
self.assertEqual(dft.libxc.parse_xc('M11L,M11_L' )[1], [(226,1),(75,1)])
self.assertEqual(dft.libxc.parse_xc('Xpbe,')[1], [(123,1)])
self.assertEqual(dft.libxc.parse_xc('pbe,' )[1], [(101,1)])
hyb, fn_facs = dft.libxc.parse_xc('PBE*.4+LDA')
self.assertEqual(fn_facs, [(101, 0.4), (130, 0.4), (1, 1)])
self.assertRaises(KeyError, dft.libxc.parse_xc, 'PBE+VWN')
self.assertTrue (dft.libxc.is_meta_gga('m05'))
self.assertFalse(dft.libxc.is_meta_gga('pbe0'))
self.assertFalse(dft.libxc.is_meta_gga('tf,'))
self.assertFalse(dft.libxc.is_meta_gga('vv10'))
self.assertTrue (dft.libxc.is_gga('PBE0'))
self.assertFalse(dft.libxc.is_gga('m05'))
self.assertFalse(dft.libxc.is_gga('tf,'))
self.assertTrue (dft.libxc.is_lda('tf,'))
self.assertFalse(dft.libxc.is_lda('vv10'))
self.assertTrue (dft.libxc.is_hybrid_xc('m05'))
self.assertTrue (dft.libxc.is_hybrid_xc('pbe0,'))
self.assertTrue (dft.libxc.is_hybrid_xc('m05,'))
self.assertFalse(dft.libxc.is_hybrid_xc('vv10'))
self.assertTrue (dft.libxc.is_hybrid_xc((402,'vv10')))
self.assertTrue (dft.libxc.is_hybrid_xc(('402','vv10')))
def test_libxc_cam_beta_bug(self):
'''As a detector for libxc-3.0.0. libxc-3.0.1 fixed this bug
'''
import ctypes
rsh_tmp = (ctypes.c_double*3)()
dft.libxc._itrf.LIBXC_rsh_coeff(1, rsh_tmp)
beta = rsh_tmp[2]
self.assertEqual(beta, 0)
dft.libxc._itrf.LIBXC_rsh_coeff(433, rsh_tmp)
dft.libxc._itrf.LIBXC_rsh_coeff(1, rsh_tmp)
beta = rsh_tmp[2]
self.assertEqual(beta, 0) # libxc-3.0.0 produces -0.46
dft.libxc._itrf.LIBXC_is_hybrid(1)
dft.libxc._itrf.LIBXC_rsh_coeff(1, rsh_tmp)
beta = rsh_tmp[2]
self.assertEqual(beta, 0)
def test_nlc_coeff(self):
self.assertEqual(dft.libxc.nlc_coeff('0.5*vv10'), [5.9, 0.0093])
def test_lda(self):
e,v,f,k = dft.libxc.eval_xc('lda,', rho[0][:3], deriv=3)
self.assertAlmostEqual(lib.finger(e) , -0.4720562542635522, 8)
self.assertAlmostEqual(lib.finger(v[0]), -0.6294083390180697, 8)
self.assertAlmostEqual(lib.finger(f[0]), -1.1414693830969338, 8)
self.assertAlmostEqual(lib.finger(k[0]), 4.1402447248393921, 8)
e,v,f,k = dft.libxc.eval_xc('lda,', [rho[0][:3]*.5]*2, spin=1, deriv=3)
self.assertAlmostEqual(lib.finger(e) , -0.4720562542635522, 8)
self.assertAlmostEqual(lib.finger(v[0].T[0]), -0.6294083390180697, 8)
self.assertAlmostEqual(lib.finger(v[0].T[1]), -0.6294083390180697, 8)
self.assertAlmostEqual(lib.finger(f[0].T[0]), -1.1414693830969338*2, 8)
self.assertAlmostEqual(lib.finger(f[0].T[2]), -1.1414693830969338*2, 8)
self.assertAlmostEqual(lib.finger(k[0].T[0]), 4.1402447248393921*4, 7)
self.assertAlmostEqual(lib.finger(k[0].T[3]), 4.1402447248393921*4, 7)
def test_lyp(self):
e,v,f = dft.libxc.eval_xc(',LYP', rho, deriv=2)[:3]
self.assertAlmostEqual(numpy.dot(rho[0],e), -62.114576182676615, 8)
self.assertAlmostEqual(numpy.dot(rho[0],v[0]),-81.771670866308455, 8)
self.assertAlmostEqual(numpy.dot(rho[0],v[1]), 27.485383255125743, 8)
self.assertAlmostEqual(numpy.dot(rho[0],f[0]), 186.823806251777, 2)
self.assertAlmostEqual(numpy.dot(rho[0],f[1]), -3391.2428894571085, 6)
self.assertAlmostEqual(numpy.dot(rho[0],f[2]), 0, 8)
self.assertAlmostEqual(abs(f[2]).sum(), 0, 3)
def test_define_xc(self):
def eval_xc(xc_code, rho, spin=0, relativity=0, deriv=1, verbose=None):
# A fictitious XC functional to demonstrate the usage
rho0, dx, dy, dz = rho[:4]
gamma = (dx**2 + dy**2 + dz**2)
exc = .01 * rho0**2 + .02 * (gamma+.001)**.5
vrho = .01 * 2 * rho0
vgamma = .02 * .5 * (gamma+.001)**(-.5)
vlapl = None
vtau = None
vxc = (vrho, vgamma, vlapl, vtau)
fxc = None # 2nd order functional derivative
kxc = None # 3rd order functional derivative
return exc, vxc, fxc, kxc
mf = dft.RKS(mol)
ni = dft.libxc.define_xc(mf._numint, eval_xc, 'GGA', hyb=0.2)
numpy.random.seed(1)
rho = numpy.random.random((4,10))
exc, vxc = ni.eval_xc(None, rho, 0, deriv=1)[:2]
self.assertAlmostEqual(lib.finger(exc), 0.0012441814416833327, 9)
self.assertAlmostEqual(lib.finger(vxc[0]), 0.0065565189784811129, 9)
self.assertAlmostEqual(lib.finger(vxc[1]), 0.0049270110162854116, 9)
mf = mf.define_xc_('0.5*B3LYP+0.5*B3LYP')
exc0, vxc0 = mf._numint.eval_xc(None, rho, 0, deriv=1)[:2]
exc1, vxc1 = dft.libxc.eval_xc('0.5*B3LYP+0.5*B3LYP', rho, 0, deriv=1)[:2]
self.assertAlmostEqual(abs(exc0-exc1).max(), 0, 9)
self.assertAlmostEqual(abs(vxc0[0]-vxc1[0]).max(), 0, 9)
self.assertAlmostEqual(abs(vxc0[1]-vxc1[1]).max(), 0, 9)
self.assertRaises(ValueError, dft.libxc.define_xc, mf._numint, 0.1)
def test_m05x(self):
rho =(numpy.array([1., 1., 0., 0., 0., 0.165 ]).reshape(-1,1),
numpy.array([.8, 1., 0., 0., 0., 0.1050]).reshape(-1,1))
test_ref = numpy.array([-1.57876583, -2.12127045,-2.11264351,-0.00315462,
0.00000000, -0.00444560, 3.45640232, 4.4349756])
exc, vxc, fxc, kxc = dft.libxc.eval_xc('1.38888888889*m05,', rho, 1, deriv=1)
self.assertAlmostEqual(float(exc)*1.8, test_ref[0], 5)
self.assertAlmostEqual(abs(vxc[0]-test_ref[1:3]).max(), 0, 6)
self.assertAlmostEqual(abs(vxc[1]-test_ref[3:6]).max(), 0, 6)
self.assertAlmostEqual(abs(vxc[3]-test_ref[6:8]).max(), 0, 5)
exc, vxc, fxc, kxc = dft.libxc.eval_xc('1.38888888889*m05,', rho[0], 0, deriv=1)
self.assertAlmostEqual(float(exc), -0.5746231988116002, 5)
self.assertAlmostEqual(float(vxc[0]), -0.8806121005703862, 6)
self.assertAlmostEqual(float(vxc[1]), -0.0032300155406846756, 7)
self.assertAlmostEqual(float(vxc[3]), 0.4474953100487698, 5)
def test_camb3lyp(self):
rho = numpy.array([1., 1., 0.1, 0.1]).reshape(-1,1)
exc, vxc, fxc, kxc = dft.libxc.eval_xc('camb3lyp', rho, 0, deriv=1)
self.assertAlmostEqual(float(exc), -0.5752559666317147, 7)
self.assertAlmostEqual(float(vxc[0]), -0.7709812578936763, 7)
self.assertAlmostEqual(float(vxc[1]), -0.0029862221286189846, 7)
self.assertEqual(dft.libxc.rsh_coeff('camb3lyp'), [0.33, 0.65, -0.46])
rho = numpy.array([1., 1., 0.1, 0.1]).reshape(-1,1)
exc, vxc, fxc, kxc = dft.libxc.eval_xc('RSH(0.5,0.65,-0.46) + 0.46*ITYH + .35*B88,', rho, 0, deriv=1)
self.assertAlmostEqual(float(exc), -0.48916154057161476, 9)
self.assertAlmostEqual(float(vxc[0]), -0.6761177630311709, 9)
self.assertAlmostEqual(float(vxc[1]), -0.002949151742087167, 9)
def test_ityh(self):
rho = numpy.array([1., 1., 0.1, 0.1]).reshape(-1,1)
exc, vxc, fxc, kxc = dft.libxc.eval_xc('ityh,', rho, 0, deriv=1)
self.assertAlmostEqual(float(exc), -0.6359945579326314, 7)
self.assertAlmostEqual(float(vxc[0]), -0.8712041561251518, 7)
self.assertAlmostEqual(float(vxc[1]), -0.003911167644579979, 7)
self.assertEqual(dft.libxc.rsh_coeff('ityh,'), [0.2, 0.0, 0.0])
def test_deriv_order(self):
self.assertTrue(dft.libxc.test_deriv_order('lda', 3, raise_error=False))
self.assertTrue(not dft.libxc.test_deriv_order('m05', 2, raise_error=False))
self.assertRaises(NotImplementedError, dft.libxc.test_deriv_order, 'camb3lyp', 3, True)
#self.assertRaises(NotImplementedError, dft.libxc.test_deriv_order, 'pbe0', 3, True)
self.assertRaises(KeyError, dft.libxc.test_deriv_order, 'OL2', 3, True)
def test_xc_type(self):
self.assertEqual(dft.libxc.xc_type(416), 'GGA')
self.assertEqual(dft.libxc.xc_type('hf'), 'HF')
self.assertEqual(dft.libxc.xc_type(',vwn'), 'LDA')
self.assertEqual(dft.libxc.xc_type('lda+b3lyp'), 'GGA')
self.assertEqual(dft.libxc.xc_type('wb97m_v'), 'MGGA')
self.assertEqual(dft.libxc.xc_type('bp86'), 'GGA')
if __name__ == "__main__":
print("Test libxc")
unittest.main()
|
gkc1000/pyscf
|
pyscf/dft/test/test_libxc.py
|
Python
|
apache-2.0
| 13,507
|
from common import * # NOQA
import yaml
from cattle import ApiError
def _create_service(client, env, image_uuid, service_kind):
labels = {'foo': "bar"}
if service_kind == "service":
launch_config = {"imageUuid": image_uuid, "labels": labels}
service = client.create_service(name=random_str(),
environmentId=env.id,
launchConfig=launch_config)
elif service_kind == "dnsService":
launch_config = {"labels": labels}
service = client.create_dnsService(name=random_str(),
environmentId=env.id,
launchConfig=launch_config)
elif service_kind == "externalService":
launch_config = {"labels": labels}
service = client.create_externalService(name=random_str(),
environmentId=env.id,
launchConfig=launch_config,
hostname="a.com")
return labels, service
def test_service_add_instance_selector(new_context):
client = new_context.client
context = new_context
env = _create_stack(client)
image_uuid = context.image_uuid
# use case #1 - instance having selector's label,
# is present when service with selector is created
labels = {'foo': "bar"}
container1 = client.create_container(imageUuid=image_uuid,
startOnCreate=True,
labels=labels)
container1 = client.wait_success(container1)
assert container1.state == "running"
launch_config = {"imageUuid": "rancher/none"}
service = client.create_service(name=random_str(),
environmentId=env.id,
launchConfig=launch_config,
selectorContainer="foo=bar")
service = client.wait_success(service)
assert service.selectorContainer == "foo=bar"
service = client.wait_success(service.activate())
assert service.state == "active"
compose_config = env.exportconfig()
assert compose_config is not None
document = yaml.load(compose_config.dockerComposeConfig)
svc = document['services'][service.name]
assert len(svc['labels']) == 1
export_labels = {"io.rancher.service.selector.container": "foo=bar"}
assert svc['labels'] == export_labels
wait_for(
lambda: len(client.list_serviceExposeMap(serviceId=service.id)) == 1
)
expose_map = container1.serviceExposeMaps()[0]
assert expose_map.managed == 0
# use case #2 - instance having selector's label,
# is added after service with selector creation
container2 = client.create_container(imageUuid=image_uuid,
startOnCreate=True,
labels=labels)
container2 = client.wait_success(container2)
assert container2.state == "running"
wait_for(
lambda: len(client.list_serviceExposeMap(serviceId=service.id)) == 2
)
expose_map = container2.serviceExposeMaps()[0]
assert expose_map.managed == 0
def _create_stack(client):
env = client.create_environment(name=random_str())
env = client.wait_success(env)
assert env.state == "active"
return env
def test_service_mixed_selector_based_wo_image(client, context):
env = _create_stack(client)
image_uuid = context.image_uuid
labels = {'foo': "barbar"}
container1 = client.create_container(imageUuid=image_uuid,
startOnCreate=True,
labels=labels)
container1 = client.wait_success(container1)
assert container1.state == "running"
launch_config = {"imageUuid": "sim:rancher/none:latest"}
service = client.create_service(name=random_str(),
environmentId=env.id,
launchConfig=launch_config,
selectorContainer="foo=barbar")
service = client.wait_success(service)
assert service.selectorContainer == "foo=barbar"
service = client.wait_success(service.activate())
assert service.state == "active"
wait_for(
lambda: len(client.list_serviceExposeMap(serviceId=service.id)) == 1
)
# add instance having selector label
labels = {'foo': "barbar"}
container2 = client.create_container(imageUuid=image_uuid,
startOnCreate=True,
labels=labels)
container2 = client.wait_success(container2)
assert container2.state == "running"
wait_for(
lambda: len(client.list_serviceExposeMap(serviceId=service.id)) == 2
)
def test_service_no_image_no_selector(client, context):
env = _create_stack(client)
with pytest.raises(ApiError) as e:
launch_config = {"imageUuid": "rancher/none"}
client.create_service(name=random_str(),
environmentId=env.id,
launchConfig=launch_config)
assert e.value.error.status == 422
assert e.value.error.code == 'InvalidOption'
def test_svc_invalid_selector(client):
env = _create_stack(client)
launch_config = {"imageUuid": "rancher/none"}
with pytest.raises(ApiError) as e:
client.create_service(name=random_str(),
environmentId=env.id,
launchConfig=launch_config,
selectorContainer="foo not in barbar")
assert e.value.error.status == 422
assert e.value.error.code == 'InvalidFormat'
def test_update_instance_selector(client, context):
env = _create_stack(client)
image_uuid = context.image_uuid
labels = {'foo1': "bar1"}
c1 = client.create_container(name=random_str(),
imageUuid=image_uuid,
startOnCreate=True,
labels=labels)
c1 = client.wait_success(c1)
assert c1.state == "running"
labels = {'bar1': "foo1"}
c2 = client.create_container(name=random_str(),
imageUuid=image_uuid,
startOnCreate=True,
labels=labels)
c2 = client.wait_success(c2)
assert c2.state == "running"
launch_config = {"imageUuid": "rancher/none"}
svc = client.create_service(name=random_str(),
environmentId=env.id,
launchConfig=launch_config,
selectorContainer="foo1=bar1")
svc = client.wait_success(svc)
assert svc.selectorContainer == "foo1=bar1"
svc = client.wait_success(svc.activate())
wait_for(
lambda: len(client.
list_serviceExposeMap(serviceId=svc.id,
state='active')) == 1
)
maps = client.list_serviceExposeMap(serviceId=svc.id,
state='active')
assert maps[0].instanceId == c1.id
# update selector, validate c1 got de-registered, and c2 registered
svc = client.update(svc, selectorContainer="bar1=foo1")
client.wait_success(svc)
wait_for(
lambda: len(client.
list_serviceExposeMap(serviceId=svc.id,
state='active')) == 1
)
maps = client.list_serviceExposeMap(serviceId=svc.id, state='active')
assert maps[0].instanceId == c2.id
|
vincent99/cattle
|
tests/integration-v1/cattletest/core/test_svc_selectors.py
|
Python
|
apache-2.0
| 7,736
|
# coding: utf-8
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for swift.obj.server"""
import six.moves.cPickle as pickle
import datetime
import json
import errno
import operator
import os
import mock
import six
from six import StringIO
import unittest
import math
import random
from shutil import rmtree
from time import gmtime, strftime, time, struct_time
from tempfile import mkdtemp
from hashlib import md5
import tempfile
from collections import defaultdict
from contextlib import contextmanager
from eventlet import sleep, spawn, wsgi, listen, Timeout, tpool, greenthread
from eventlet.green import httplib
from nose import SkipTest
from swift import __version__ as swift_version
from swift.common.http import is_success
from test.unit import FakeLogger, debug_logger, mocked_http_conn, \
make_timestamp_iter, DEFAULT_TEST_EC_TYPE
from test.unit import connect_tcp, readuntil2crlfs, patch_policies
from swift.obj import server as object_server
from swift.obj import updater
from swift.obj import diskfile
from swift.common import utils, bufferedhttp
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.utils import hash_path, mkdirs, normalize_timestamp, \
NullLogger, storage_directory, public, replication, encode_timestamps, \
Timestamp
from swift.common import constraints
from swift.common.swob import Request, WsgiBytesIO
from swift.common.splice import splice
from swift.common.storage_policy import (StoragePolicy, ECStoragePolicy,
POLICIES, EC_POLICY)
from swift.common.exceptions import DiskFileDeviceUnavailable, DiskFileNoSpace
def mock_time(*args, **kwargs):
return 5000.0
test_policies = [
StoragePolicy(0, name='zero', is_default=True),
ECStoragePolicy(1, name='one', ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=10, ec_nparity=4),
]
@contextmanager
def fake_spawn():
"""
Spawn and capture the result so we can later wait on it. This means we can
test code executing in a greenthread but still wait() on the result to
ensure that the method has completed.
"""
greenlets = []
def _inner_fake_spawn(func, *a, **kw):
gt = greenthread.spawn(func, *a, **kw)
greenlets.append(gt)
return gt
object_server.spawn = _inner_fake_spawn
with mock.patch('swift.obj.server.spawn', _inner_fake_spawn):
try:
yield
finally:
for gt in greenlets:
gt.wait()
@patch_policies(test_policies)
class TestObjectController(unittest.TestCase):
"""Test swift.obj.server.ObjectController"""
def setUp(self):
"""Set up for testing swift.object.server.ObjectController"""
utils.HASH_PATH_SUFFIX = 'endcap'
utils.HASH_PATH_PREFIX = 'startcap'
self.tmpdir = mkdtemp()
self.testdir = os.path.join(self.tmpdir,
'tmp_test_object_server_ObjectController')
mkdirs(os.path.join(self.testdir, 'sda1'))
self.conf = {'devices': self.testdir, 'mount_check': 'false',
'container_update_timeout': 0.0}
self.object_controller = object_server.ObjectController(
self.conf, logger=debug_logger())
self.object_controller.bytes_per_sync = 1
self._orig_tpool_exc = tpool.execute
tpool.execute = lambda f, *args, **kwargs: f(*args, **kwargs)
self.df_mgr = diskfile.DiskFileManager(self.conf,
self.object_controller.logger)
self.logger = debug_logger('test-object-controller')
self.ts = make_timestamp_iter()
def tearDown(self):
"""Tear down for testing swift.object.server.ObjectController"""
rmtree(self.tmpdir)
tpool.execute = self._orig_tpool_exc
def _stage_tmp_dir(self, policy):
mkdirs(os.path.join(self.testdir, 'sda1',
diskfile.get_tmp_dir(policy)))
def check_all_api_methods(self, obj_name='o', alt_res=None):
path = '/sda1/p/a/c/%s' % obj_name
body = 'SPECIAL_STRING'
op_table = {
"PUT": (body, alt_res or 201, ''), # create one
"GET": ('', alt_res or 200, body), # check it
"POST": ('', alt_res or 202, ''), # update it
"HEAD": ('', alt_res or 200, ''), # head it
"DELETE": ('', alt_res or 204, '') # delete it
}
for method in ["PUT", "GET", "POST", "HEAD", "DELETE"]:
in_body, res, out_body = op_table[method]
timestamp = normalize_timestamp(time())
req = Request.blank(
path, environ={'REQUEST_METHOD': method},
headers={'X-Timestamp': timestamp,
'Content-Type': 'application/x-test'})
req.body = in_body
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, res)
if out_body and (200 <= res < 300):
self.assertEqual(resp.body, out_body)
def test_REQUEST_SPECIAL_CHARS(self):
obj = 'special昆%20/%'
self.check_all_api_methods(obj)
def test_device_unavailable(self):
def raise_disk_unavail(*args, **kwargs):
raise DiskFileDeviceUnavailable()
self.object_controller.get_diskfile = raise_disk_unavail
self.check_all_api_methods(alt_res=507)
def test_allowed_headers(self):
dah = ['content-disposition', 'content-encoding', 'x-delete-at',
'x-object-manifest', 'x-static-large-object']
conf = {'devices': self.testdir, 'mount_check': 'false',
'allowed_headers': ','.join(['content-length'] + dah)}
self.object_controller = object_server.ObjectController(
conf, logger=debug_logger())
self.assertEqual(self.object_controller.allowed_headers, set(dah))
def test_POST_update_meta(self):
# Test swift.obj.server.ObjectController.POST
original_headers = self.object_controller.allowed_headers
test_headers = 'content-encoding foo bar'.split()
self.object_controller.allowed_headers = set(test_headers)
put_timestamp = normalize_timestamp(time())
headers = {'X-Timestamp': put_timestamp,
'Content-Type': 'application/x-test',
'Foo': 'fooheader',
'Baz': 'bazheader',
'X-Object-Sysmeta-Color': 'blue',
'X-Object-Meta-1': 'One',
'X-Object-Meta-Two': 'Two'}
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers=headers)
req.body = 'VERIFY'
etag = '"%s"' % md5('VERIFY').hexdigest()
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
self.assertEqual(dict(resp.headers), {
'Content-Type': 'text/html; charset=UTF-8',
'Content-Length': str(len(resp.body)),
'Etag': etag,
})
post_timestamp = normalize_timestamp(time())
headers = {'X-Timestamp': post_timestamp,
'X-Object-Meta-3': 'Three',
'X-Object-Meta-4': 'Four',
'Content-Encoding': 'gzip',
'Foo': 'fooheader',
'Bar': 'barheader',
'Content-Type': 'application/x-test'}
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers=headers)
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 202)
self.assertEqual(dict(resp.headers), {
'Content-Type': 'text/html; charset=UTF-8',
'Content-Length': str(len(resp.body)),
})
req = Request.blank('/sda1/p/a/c/o')
resp = req.get_response(self.object_controller)
expected_headers = {
'Content-Type': 'application/x-test',
'Content-Length': '6',
'Etag': etag,
'X-Object-Sysmeta-Color': 'blue',
'X-Object-Meta-3': 'Three',
'X-Object-Meta-4': 'Four',
'Foo': 'fooheader',
'Bar': 'barheader',
'Content-Encoding': 'gzip',
'X-Backend-Timestamp': post_timestamp,
'X-Timestamp': post_timestamp,
'X-Backend-Data-Timestamp': put_timestamp,
'X-Backend-Durable-Timestamp': put_timestamp,
'Last-Modified': strftime(
'%a, %d %b %Y %H:%M:%S GMT',
gmtime(math.ceil(float(post_timestamp)))),
}
self.assertEqual(dict(resp.headers), expected_headers)
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.object_controller)
self.assertEqual(dict(resp.headers), expected_headers)
post_timestamp = normalize_timestamp(time())
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': post_timestamp,
'X-Object-Sysmeta-Color': 'red',
'Content-Type': 'application/x-test'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 202)
self.assertEqual(dict(resp.headers), {
'Content-Type': 'text/html; charset=UTF-8',
'Content-Length': str(len(resp.body)),
})
req = Request.blank('/sda1/p/a/c/o')
resp = req.get_response(self.object_controller)
self.assertEqual(dict(resp.headers), {
'Content-Type': 'application/x-test',
'Content-Length': '6',
'Etag': etag,
'X-Object-Sysmeta-Color': 'blue',
'X-Backend-Timestamp': post_timestamp,
'X-Timestamp': post_timestamp,
'X-Backend-Data-Timestamp': put_timestamp,
'X-Backend-Durable-Timestamp': put_timestamp,
'Last-Modified': strftime(
'%a, %d %b %Y %H:%M:%S GMT',
gmtime(math.ceil(float(post_timestamp)))),
})
# test defaults
self.object_controller.allowed_headers = original_headers
put_timestamp = normalize_timestamp(time())
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': put_timestamp,
'Content-Type': 'application/x-test',
'Foo': 'fooheader',
'X-Object-Sysmeta-Color': 'red',
'X-Object-Meta-1': 'One',
'X-Object-Manifest': 'c/bar',
'Content-Encoding': 'gzip',
'Content-Disposition': 'bar',
'X-Static-Large-Object': 'True',
})
req.body = 'VERIFY'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
self.assertEqual(dict(resp.headers), {
'Content-Type': 'text/html; charset=UTF-8',
'Content-Length': str(len(resp.body)),
'Etag': etag,
})
req = Request.blank('/sda1/p/a/c/o')
resp = req.get_response(self.object_controller)
self.assertEqual(dict(resp.headers), {
'Content-Type': 'application/x-test',
'Content-Length': '6',
'Etag': etag,
'X-Object-Sysmeta-Color': 'red',
'X-Object-Meta-1': 'One',
'Content-Encoding': 'gzip',
'X-Object-Manifest': 'c/bar',
'Content-Disposition': 'bar',
'X-Static-Large-Object': 'True',
'X-Backend-Timestamp': put_timestamp,
'X-Timestamp': put_timestamp,
'X-Backend-Data-Timestamp': put_timestamp,
'X-Backend-Durable-Timestamp': put_timestamp,
'Last-Modified': strftime(
'%a, %d %b %Y %H:%M:%S GMT',
gmtime(math.ceil(float(put_timestamp)))),
})
post_timestamp = normalize_timestamp(time())
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': post_timestamp,
'X-Object-Meta-3': 'Three',
'Foo': 'fooheader',
'Content-Type': 'application/x-test'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 202)
self.assertEqual(dict(resp.headers), {
'Content-Type': 'text/html; charset=UTF-8',
'Content-Length': str(len(resp.body)),
})
req = Request.blank('/sda1/p/a/c/o')
resp = req.get_response(self.object_controller)
self.assertEqual(dict(resp.headers), {
'Content-Type': 'application/x-test',
'Content-Length': '6',
'Etag': etag,
'X-Object-Sysmeta-Color': 'red',
'X-Object-Meta-3': 'Three',
'X-Static-Large-Object': 'True',
'X-Backend-Timestamp': post_timestamp,
'X-Timestamp': post_timestamp,
'X-Backend-Data-Timestamp': put_timestamp,
'X-Backend-Durable-Timestamp': put_timestamp,
'Last-Modified': strftime(
'%a, %d %b %Y %H:%M:%S GMT',
gmtime(math.ceil(float(post_timestamp)))),
})
# Test for empty metadata
post_timestamp = normalize_timestamp(time())
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': post_timestamp,
'Content-Type': 'application/x-test',
'X-Object-Meta-3': ''})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 202)
self.assertEqual(dict(resp.headers), {
'Content-Type': 'text/html; charset=UTF-8',
'Content-Length': str(len(resp.body)),
})
req = Request.blank('/sda1/p/a/c/o')
resp = req.get_response(self.object_controller)
self.assertEqual(dict(resp.headers), {
'Content-Type': 'application/x-test',
'Content-Length': '6',
'Etag': etag,
'X-Object-Sysmeta-Color': 'red',
'X-Object-Meta-3': '',
'X-Static-Large-Object': 'True',
'X-Backend-Timestamp': post_timestamp,
'X-Timestamp': post_timestamp,
'X-Backend-Data-Timestamp': put_timestamp,
'X-Backend-Durable-Timestamp': put_timestamp,
'Last-Modified': strftime(
'%a, %d %b %Y %H:%M:%S GMT',
gmtime(math.ceil(float(post_timestamp)))),
})
def test_POST_old_timestamp(self):
ts = time()
orig_timestamp = utils.Timestamp(ts).internal
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': orig_timestamp,
'Content-Type': 'application/x-test',
'X-Object-Meta-1': 'One',
'X-Object-Meta-Two': 'Two'})
req.body = 'VERIFY'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
# Same timestamp should result in 409
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': orig_timestamp,
'X-Object-Meta-3': 'Three',
'X-Object-Meta-4': 'Four',
'Content-Encoding': 'gzip',
'Content-Type': 'application/x-test'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 409)
self.assertEqual(resp.headers['X-Backend-Timestamp'], orig_timestamp)
# Earlier timestamp should result in 409
timestamp = normalize_timestamp(ts - 1)
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': timestamp,
'X-Object-Meta-5': 'Five',
'X-Object-Meta-6': 'Six',
'Content-Encoding': 'gzip',
'Content-Type': 'application/x-test'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 409)
self.assertEqual(resp.headers['X-Backend-Timestamp'], orig_timestamp)
def test_POST_conflicts_with_later_POST(self):
t_put = next(self.ts).internal
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': t_put,
'Content-Length': 0,
'Content-Type': 'plain/text'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
t_post1 = next(self.ts).internal
t_post2 = next(self.ts).internal
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': t_post2})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 202)
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': t_post1})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 409)
obj_dir = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.get_data_dir(0), 'p',
hash_path('a', 'c', 'o')))
ts_file = os.path.join(obj_dir, t_post2 + '.meta')
self.assertTrue(os.path.isfile(ts_file))
meta_file = os.path.join(obj_dir, t_post1 + '.meta')
self.assertFalse(os.path.isfile(meta_file))
def test_POST_not_exist(self):
timestamp = normalize_timestamp(time())
req = Request.blank('/sda1/p/a/c/fail',
environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': timestamp,
'X-Object-Meta-1': 'One',
'X-Object-Meta-2': 'Two',
'Content-Type': 'text/plain'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 404)
def test_POST_invalid_path(self):
timestamp = normalize_timestamp(time())
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': timestamp,
'X-Object-Meta-1': 'One',
'X-Object-Meta-2': 'Two',
'Content-Type': 'text/plain'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 400)
def test_POST_no_timestamp(self):
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={'X-Object-Meta-1': 'One',
'X-Object-Meta-2': 'Two',
'Content-Type': 'text/plain'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 400)
def test_POST_bad_timestamp(self):
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': 'bad',
'X-Object-Meta-1': 'One',
'X-Object-Meta-2': 'Two',
'Content-Type': 'text/plain'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 400)
def test_POST_container_connection(self):
# Test that POST does call container_update and returns success
# whether update to container server succeeds or fails
def mock_http_connect(calls, response, with_exc=False):
class FakeConn(object):
def __init__(self, calls, status, with_exc):
self.calls = calls
self.status = status
self.reason = 'Fake'
self.host = '1.2.3.4'
self.port = '1234'
self.with_exc = with_exc
def getresponse(self):
calls[0] += 1
if self.with_exc:
raise Exception('test')
return self
def read(self, amt=None):
return ''
return lambda *args, **kwargs: FakeConn(calls, response, with_exc)
ts = time()
timestamp = normalize_timestamp(ts)
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp,
'Content-Type': 'text/plain',
'Content-Length': '0'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': normalize_timestamp(ts + 1),
'X-Container-Host': '1.2.3.4:0',
'X-Container-Partition': '3',
'X-Container-Device': 'sda1',
'X-Container-Timestamp': '1',
'Content-Type': 'application/new1'})
calls = [0]
with mock.patch.object(object_server, 'http_connect',
mock_http_connect(calls, 202)):
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 202)
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': normalize_timestamp(ts + 2),
'X-Container-Host': '1.2.3.4:0',
'X-Container-Partition': '3',
'X-Container-Device': 'sda1',
'X-Container-Timestamp': '1',
'Content-Type': 'application/new1'})
calls = [0]
with mock.patch.object(object_server, 'http_connect',
mock_http_connect(calls, 202, with_exc=True)):
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 202)
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': normalize_timestamp(ts + 3),
'X-Container-Host': '1.2.3.4:0',
'X-Container-Partition': '3',
'X-Container-Device': 'sda1',
'X-Container-Timestamp': '1',
'Content-Type': 'application/new2'})
calls = [0]
with mock.patch.object(object_server, 'http_connect',
mock_http_connect(calls, 500)):
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 202)
def _test_POST_container_updates(self, policy, update_etag=None):
# Test that POST requests result in correct calls to container_update
t = [next(self.ts) for _ in range(0, 5)]
calls_made = []
update_etag = update_etag or '098f6bcd4621d373cade4e832627b4f6'
def mock_container_update(ctlr, op, account, container, obj, request,
headers_out, objdevice, policy):
calls_made.append((headers_out, policy))
body = 'test'
headers = {
'X-Timestamp': t[1].internal,
'Content-Type': 'application/octet-stream;swift_bytes=123456789',
'X-Backend-Storage-Policy-Index': int(policy)}
if policy.policy_type == EC_POLICY:
# EC fragments will typically have a different size to the body and
# for small bodies the fragments may be longer. For this test all
# that matters is that the fragment and body lengths differ.
body = body + 'ec_overhead'
headers['X-Backend-Container-Update-Override-Etag'] = update_etag
headers['X-Backend-Container-Update-Override-Size'] = '4'
headers['X-Object-Sysmeta-Ec-Etag'] = update_etag
headers['X-Object-Sysmeta-Ec-Content-Length'] = '4'
headers['X-Object-Sysmeta-Ec-Frag-Index'] = 2
headers['Content-Length'] = str(len(body))
req = Request.blank('/sda1/p/a/c/o', body=body,
environ={'REQUEST_METHOD': 'PUT'},
headers=headers)
with mock.patch('swift.obj.server.ObjectController.container_update',
mock_container_update):
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
self.assertEqual(1, len(calls_made))
expected_headers = HeaderKeyDict({
'x-size': '4',
'x-content-type': 'application/octet-stream;swift_bytes=123456789',
'x-timestamp': t[1].internal,
'x-etag': update_etag})
self.assertDictEqual(expected_headers, calls_made[0][0])
self.assertEqual(policy, calls_made[0][1])
# POST with no metadata newer than the data should return 409,
# container update not expected
calls_made = []
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': t[0].internal,
'X-Backend-Storage-Policy-Index': int(policy)})
with mock.patch('swift.obj.server.ObjectController.container_update',
mock_container_update):
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 409)
self.assertEqual(resp.headers['x-backend-timestamp'],
t[1].internal)
self.assertEqual(0, len(calls_made))
# POST with newer metadata returns success and container update
# is expected
calls_made = []
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': t[3].internal,
'X-Backend-Storage-Policy-Index': int(policy)})
with mock.patch('swift.obj.server.ObjectController.container_update',
mock_container_update):
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 202)
self.assertEqual(1, len(calls_made))
expected_headers = HeaderKeyDict({
'x-size': '4',
'x-content-type': 'application/octet-stream;swift_bytes=123456789',
'x-timestamp': t[1].internal,
'x-content-type-timestamp': t[1].internal,
'x-meta-timestamp': t[3].internal,
'x-etag': update_etag})
self.assertDictEqual(expected_headers, calls_made[0][0])
self.assertEqual(policy, calls_made[0][1])
# POST with no metadata newer than existing metadata should return
# 409, container update not expected
calls_made = []
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': t[2].internal,
'X-Backend-Storage-Policy-Index': int(policy)})
with mock.patch('swift.obj.server.ObjectController.container_update',
mock_container_update):
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 409)
self.assertEqual(resp.headers['x-backend-timestamp'],
t[3].internal)
self.assertEqual(0, len(calls_made))
# POST with newer content-type but older metadata returns success
# and container update is expected newer content-type should have
# existing swift_bytes appended
calls_made = []
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={
'X-Timestamp': t[2].internal,
'Content-Type': 'text/plain',
'Content-Type-Timestamp': t[2].internal,
'X-Backend-Storage-Policy-Index': int(policy)
})
with mock.patch('swift.obj.server.ObjectController.container_update',
mock_container_update):
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 202)
self.assertEqual(1, len(calls_made))
expected_headers = HeaderKeyDict({
'x-size': '4',
'x-content-type': 'text/plain;swift_bytes=123456789',
'x-timestamp': t[1].internal,
'x-content-type-timestamp': t[2].internal,
'x-meta-timestamp': t[3].internal,
'x-etag': update_etag})
self.assertDictEqual(expected_headers, calls_made[0][0])
self.assertEqual(policy, calls_made[0][1])
# POST with older content-type but newer metadata returns success
# and container update is expected
calls_made = []
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={
'X-Timestamp': t[4].internal,
'Content-Type': 'older',
'Content-Type-Timestamp': t[1].internal,
'X-Backend-Storage-Policy-Index': int(policy)
})
with mock.patch('swift.obj.server.ObjectController.container_update',
mock_container_update):
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 202)
self.assertEqual(1, len(calls_made))
expected_headers = HeaderKeyDict({
'x-size': '4',
'x-content-type': 'text/plain;swift_bytes=123456789',
'x-timestamp': t[1].internal,
'x-content-type-timestamp': t[2].internal,
'x-meta-timestamp': t[4].internal,
'x-etag': update_etag})
self.assertDictEqual(expected_headers, calls_made[0][0])
self.assertEqual(policy, calls_made[0][1])
# POST with same-time content-type and metadata returns 409
# and no container update is expected
calls_made = []
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={
'X-Timestamp': t[4].internal,
'Content-Type': 'ignored',
'Content-Type-Timestamp': t[2].internal,
'X-Backend-Storage-Policy-Index': int(policy)
})
with mock.patch('swift.obj.server.ObjectController.container_update',
mock_container_update):
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 409)
self.assertEqual(0, len(calls_made))
# POST with implicit newer content-type but older metadata
# returns success and container update is expected,
# update reports existing metadata timestamp
calls_made = []
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={
'X-Timestamp': t[3].internal,
'Content-Type': 'text/newer',
'X-Backend-Storage-Policy-Index': int(policy)
})
with mock.patch('swift.obj.server.ObjectController.container_update',
mock_container_update):
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 202)
self.assertEqual(1, len(calls_made))
expected_headers = HeaderKeyDict({
'x-size': '4',
'x-content-type': 'text/newer;swift_bytes=123456789',
'x-timestamp': t[1].internal,
'x-content-type-timestamp': t[3].internal,
'x-meta-timestamp': t[4].internal,
'x-etag': update_etag})
self.assertDictEqual(expected_headers, calls_made[0][0])
self.assertEqual(policy, calls_made[0][1])
def test_POST_container_updates_with_replication_policy(self):
self._test_POST_container_updates(POLICIES[0])
def test_POST_container_updates_with_EC_policy(self):
self._test_POST_container_updates(
POLICIES[1], update_etag='override_etag')
def test_POST_container_updates_precedence(self):
# Verify correct etag and size being sent with container updates for a
# PUT and for a subsequent POST.
def do_test(body, headers, policy):
def mock_container_update(ctlr, op, account, container, obj, req,
headers_out, objdevice, policy):
calls_made.append((headers_out, policy))
calls_made = []
ts_put = next(self.ts)
# make PUT with given headers and verify correct etag is sent in
# container update
headers.update({
'Content-Type':
'application/octet-stream;swift_bytes=123456789',
'X-Backend-Storage-Policy-Index': int(policy),
'X-Object-Sysmeta-Ec-Frag-Index': 2,
'X-Timestamp': ts_put.internal,
'Content-Length': len(body)})
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers=headers, body=body)
with mock.patch(
'swift.obj.server.ObjectController.container_update',
mock_container_update):
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
self.assertEqual(1, len(calls_made))
expected_headers = HeaderKeyDict({
'x-size': '4',
'x-content-type':
'application/octet-stream;swift_bytes=123456789',
'x-timestamp': ts_put.internal,
'x-etag': 'expected'})
self.assertDictEqual(expected_headers, calls_made[0][0])
self.assertEqual(policy, calls_made[0][1])
# make a POST and verify container update has the same etag
calls_made = []
ts_post = next(self.ts)
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': ts_post.internal,
'X-Backend-Storage-Policy-Index': int(policy)})
with mock.patch(
'swift.obj.server.ObjectController.container_update',
mock_container_update):
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 202)
self.assertEqual(1, len(calls_made))
expected_headers.update({
'x-content-type-timestamp': ts_put.internal,
'x-meta-timestamp': ts_post.internal})
self.assertDictEqual(expected_headers, calls_made[0][0])
self.assertEqual(policy, calls_made[0][1])
# sanity check - EC headers are ok
headers = {
'X-Backend-Container-Update-Override-Etag': 'expected',
'X-Backend-Container-Update-Override-Size': '4',
'X-Object-Sysmeta-Ec-Etag': 'expected',
'X-Object-Sysmeta-Ec-Content-Length': '4'}
do_test('test ec frag longer than 4', headers, POLICIES[1])
# middleware overrides take precedence over EC/older overrides
headers = {
'X-Backend-Container-Update-Override-Etag': 'unexpected',
'X-Backend-Container-Update-Override-Size': '3',
'X-Object-Sysmeta-Ec-Etag': 'unexpected',
'X-Object-Sysmeta-Ec-Content-Length': '3',
'X-Object-Sysmeta-Container-Update-Override-Etag': 'expected',
'X-Object-Sysmeta-Container-Update-Override-Size': '4'}
do_test('test ec frag longer than 4', headers, POLICIES[1])
# overrides with replication policy
headers = {
'X-Object-Sysmeta-Container-Update-Override-Etag': 'expected',
'X-Object-Sysmeta-Container-Update-Override-Size': '4'}
do_test('longer than 4', headers, POLICIES[0])
# middleware overrides take precedence over EC/older overrides with
# replication policy
headers = {
'X-Backend-Container-Update-Override-Etag': 'unexpected',
'X-Backend-Container-Update-Override-Size': '3',
'X-Object-Sysmeta-Container-Update-Override-Etag': 'expected',
'X-Object-Sysmeta-Container-Update-Override-Size': '4'}
do_test('longer than 4', headers, POLICIES[0])
def _test_PUT_then_POST_async_pendings(self, policy, update_etag=None):
# Test that PUT and POST requests result in distinct async pending
# files when sync container update fails.
def fake_http_connect(*args):
raise Exception('test')
device_dir = os.path.join(self.testdir, 'sda1')
t_put = next(self.ts)
update_etag = update_etag or '098f6bcd4621d373cade4e832627b4f6'
put_headers = {
'X-Trans-Id': 'put_trans_id',
'X-Timestamp': t_put.internal,
'Content-Type': 'application/octet-stream;swift_bytes=123456789',
'Content-Length': '4',
'X-Backend-Storage-Policy-Index': int(policy),
'X-Container-Host': 'chost:cport',
'X-Container-Partition': 'cpartition',
'X-Container-Device': 'cdevice'}
if policy.policy_type == EC_POLICY:
put_headers.update({
'X-Object-Sysmeta-Ec-Frag-Index': '2',
'X-Backend-Container-Update-Override-Etag': update_etag,
'X-Object-Sysmeta-Ec-Etag': update_etag})
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers=put_headers, body='test')
with mock.patch('swift.obj.server.http_connect', fake_http_connect), \
mock.patch('swift.common.utils.HASH_PATH_PREFIX', ''), \
fake_spawn():
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
async_pending_file_put = os.path.join(
device_dir, diskfile.get_async_dir(policy), 'a83',
'06fbf0b514e5199dfc4e00f42eb5ea83-%s' % t_put.internal)
self.assertTrue(os.path.isfile(async_pending_file_put),
'Expected %s to be a file but it is not.'
% async_pending_file_put)
expected_put_headers = {
'Referer': 'PUT http://localhost/sda1/p/a/c/o',
'X-Trans-Id': 'put_trans_id',
'X-Timestamp': t_put.internal,
'X-Content-Type': 'application/octet-stream;swift_bytes=123456789',
'X-Size': '4',
'X-Etag': '098f6bcd4621d373cade4e832627b4f6',
'User-Agent': 'object-server %s' % os.getpid(),
'X-Backend-Storage-Policy-Index': '%d' % int(policy)}
if policy.policy_type == EC_POLICY:
expected_put_headers['X-Etag'] = update_etag
self.assertDictEqual(
pickle.load(open(async_pending_file_put)),
{'headers': expected_put_headers,
'account': 'a', 'container': 'c', 'obj': 'o', 'op': 'PUT'})
# POST with newer metadata returns success and container update
# is expected
t_post = next(self.ts)
post_headers = {
'X-Trans-Id': 'post_trans_id',
'X-Timestamp': t_post.internal,
'Content-Type': 'application/other',
'X-Backend-Storage-Policy-Index': int(policy),
'X-Container-Host': 'chost:cport',
'X-Container-Partition': 'cpartition',
'X-Container-Device': 'cdevice'}
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers=post_headers)
with mock.patch('swift.obj.server.http_connect', fake_http_connect), \
mock.patch('swift.common.utils.HASH_PATH_PREFIX', ''), \
fake_spawn():
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 202)
self.maxDiff = None
# check async pending file for PUT is still intact
self.assertDictEqual(
pickle.load(open(async_pending_file_put)),
{'headers': expected_put_headers,
'account': 'a', 'container': 'c', 'obj': 'o', 'op': 'PUT'})
# check distinct async pending file for POST
async_pending_file_post = os.path.join(
device_dir, diskfile.get_async_dir(policy), 'a83',
'06fbf0b514e5199dfc4e00f42eb5ea83-%s' % t_post.internal)
self.assertTrue(os.path.isfile(async_pending_file_post),
'Expected %s to be a file but it is not.'
% async_pending_file_post)
expected_post_headers = {
'Referer': 'POST http://localhost/sda1/p/a/c/o',
'X-Trans-Id': 'post_trans_id',
'X-Timestamp': t_put.internal,
'X-Content-Type': 'application/other;swift_bytes=123456789',
'X-Size': '4',
'X-Etag': '098f6bcd4621d373cade4e832627b4f6',
'User-Agent': 'object-server %s' % os.getpid(),
'X-Backend-Storage-Policy-Index': '%d' % int(policy),
'X-Meta-Timestamp': t_post.internal,
'X-Content-Type-Timestamp': t_post.internal,
}
if policy.policy_type == EC_POLICY:
expected_post_headers['X-Etag'] = update_etag
self.assertDictEqual(
pickle.load(open(async_pending_file_post)),
{'headers': expected_post_headers,
'account': 'a', 'container': 'c', 'obj': 'o', 'op': 'PUT'})
# verify that only the POST (most recent) async update gets sent by the
# object updater, and that both update files are deleted
with mock.patch(
'swift.obj.updater.ObjectUpdater.object_update') as mock_update, \
mock.patch('swift.obj.updater.dump_recon_cache'):
object_updater = updater.ObjectUpdater(
{'devices': self.testdir,
'mount_check': 'false'}, logger=debug_logger())
node = {'id': 1}
mock_ring = mock.MagicMock()
mock_ring.get_nodes.return_value = (99, [node])
object_updater.container_ring = mock_ring
mock_update.return_value = ((True, 1))
object_updater.run_once()
self.assertEqual(1, mock_update.call_count)
self.assertEqual((node, 99, 'PUT', '/a/c/o'),
mock_update.call_args_list[0][0][0:4])
actual_headers = mock_update.call_args_list[0][0][4]
self.assertTrue(
actual_headers.pop('user-agent').startswith('object-updater'))
self.assertDictEqual(expected_post_headers, actual_headers)
self.assertFalse(
os.listdir(os.path.join(
device_dir, diskfile.get_async_dir(policy))))
def test_PUT_then_POST_async_pendings_with_repl_policy(self):
self._test_PUT_then_POST_async_pendings(POLICIES[0])
def test_PUT_then_POST_async_pendings_with_EC_policy(self):
self._test_PUT_then_POST_async_pendings(
POLICIES[1], update_etag='override_etag')
def test_POST_quarantine_zbyte(self):
timestamp = normalize_timestamp(time())
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp,
'Content-Type': 'application/x-test'})
req.body = 'VERIFY'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
objfile = self.df_mgr.get_diskfile('sda1', 'p', 'a', 'c', 'o',
policy=POLICIES.legacy)
objfile.open()
file_name = os.path.basename(objfile._data_file)
with open(objfile._data_file) as fp:
metadata = diskfile.read_metadata(fp)
os.unlink(objfile._data_file)
with open(objfile._data_file, 'w') as fp:
diskfile.write_metadata(fp, metadata)
self.assertEqual(os.listdir(objfile._datadir)[0], file_name)
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': normalize_timestamp(time())})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 404)
quar_dir = os.path.join(
self.testdir, 'sda1', 'quarantined', 'objects',
os.path.basename(os.path.dirname(objfile._data_file)))
self.assertEqual(os.listdir(quar_dir)[0], file_name)
def test_PUT_invalid_path(self):
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 400)
def test_PUT_no_timestamp(self):
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT',
'CONTENT_LENGTH': '0'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 400)
def test_PUT_no_content_type(self):
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(time()),
'Content-Length': '6'})
req.body = 'VERIFY'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 400)
def test_PUT_invalid_content_type(self):
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(time()),
'Content-Length': '6',
'Content-Type': '\xff\xff'})
req.body = 'VERIFY'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 400)
self.assertTrue('Content-Type' in resp.body)
def test_PUT_no_content_length(self):
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(time()),
'Content-Type': 'application/octet-stream'})
req.body = 'VERIFY'
del req.headers['Content-Length']
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 411)
def test_PUT_zero_content_length(self):
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(time()),
'Content-Type': 'application/octet-stream'})
req.body = ''
self.assertEqual(req.headers['Content-Length'], '0')
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
def test_PUT_bad_transfer_encoding(self):
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(time()),
'Content-Type': 'application/octet-stream'})
req.body = 'VERIFY'
req.headers['Transfer-Encoding'] = 'bad'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 400)
def test_PUT_if_none_match_star(self):
# First PUT should succeed
timestamp = normalize_timestamp(time())
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp,
'Content-Length': '6',
'Content-Type': 'application/octet-stream',
'If-None-Match': '*'})
req.body = 'VERIFY'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
# File should already exist so it should fail
timestamp = normalize_timestamp(time())
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp,
'Content-Length': '6',
'Content-Type': 'application/octet-stream',
'If-None-Match': '*'})
req.body = 'VERIFY'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 412)
def test_PUT_if_none_match(self):
# PUT with if-none-match set and nothing there should succeed
timestamp = normalize_timestamp(time())
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp,
'Content-Length': '6',
'Content-Type': 'application/octet-stream',
'If-None-Match': 'notthere'})
req.body = 'VERIFY'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
# PUT with if-none-match of the object etag should fail
timestamp = normalize_timestamp(time())
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp,
'Content-Length': '6',
'Content-Type': 'application/octet-stream',
'If-None-Match': '0b4c12d7e0a73840c1c4f148fda3b037'})
req.body = 'VERIFY'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 412)
def test_PUT_common(self):
timestamp = normalize_timestamp(time())
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp,
'Content-Length': '6',
'Content-Type': 'application/octet-stream',
'x-object-meta-test': 'one',
'Custom-Header': '*',
'X-Backend-Replication-Headers':
'Content-Type Content-Length'})
req.body = 'VERIFY'
with mock.patch.object(self.object_controller, 'allowed_headers',
['Custom-Header']):
self.object_controller.allowed_headers = ['Custom-Header']
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
objfile = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.get_data_dir(POLICIES[0]),
'p', hash_path('a', 'c', 'o')),
utils.Timestamp(timestamp).internal + '.data')
self.assertTrue(os.path.isfile(objfile))
self.assertEqual(open(objfile).read(), 'VERIFY')
self.assertEqual(diskfile.read_metadata(objfile),
{'X-Timestamp': utils.Timestamp(timestamp).internal,
'Content-Length': '6',
'ETag': '0b4c12d7e0a73840c1c4f148fda3b037',
'Content-Type': 'application/octet-stream',
'name': '/a/c/o',
'X-Object-Meta-Test': 'one',
'Custom-Header': '*'})
def test_PUT_overwrite(self):
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(time()),
'Content-Length': '6',
'Content-Type': 'application/octet-stream'})
req.body = 'VERIFY'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
sleep(.00001)
timestamp = normalize_timestamp(time())
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp,
'Content-Type': 'text/plain',
'Content-Encoding': 'gzip'})
req.body = 'VERIFY TWO'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
objfile = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.get_data_dir(POLICIES[0]), 'p',
hash_path('a', 'c', 'o')),
utils.Timestamp(timestamp).internal + '.data')
self.assertTrue(os.path.isfile(objfile))
self.assertEqual(open(objfile).read(), 'VERIFY TWO')
self.assertEqual(diskfile.read_metadata(objfile),
{'X-Timestamp': utils.Timestamp(timestamp).internal,
'Content-Length': '10',
'ETag': 'b381a4c5dab1eaa1eb9711fa647cd039',
'Content-Type': 'text/plain',
'name': '/a/c/o',
'Content-Encoding': 'gzip'})
def test_PUT_overwrite_to_older_ts_success(self):
old_timestamp = next(self.ts)
new_timestamp = next(self.ts)
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': old_timestamp.normal,
'Content-Length': '0',
'Content-Type': 'application/octet-stream'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 404)
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': new_timestamp.normal,
'Content-Type': 'text/plain',
'Content-Encoding': 'gzip'})
req.body = 'VERIFY TWO'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
objfile = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.get_data_dir(POLICIES[0]), 'p',
hash_path('a', 'c', 'o')),
new_timestamp.internal + '.data')
self.assertTrue(os.path.isfile(objfile))
self.assertEqual(open(objfile).read(), 'VERIFY TWO')
self.assertEqual(
diskfile.read_metadata(objfile),
{'X-Timestamp': new_timestamp.internal,
'Content-Length': '10',
'ETag': 'b381a4c5dab1eaa1eb9711fa647cd039',
'Content-Type': 'text/plain',
'name': '/a/c/o',
'Content-Encoding': 'gzip'})
def test_PUT_overwrite_to_newer_ts_failed(self):
old_timestamp = next(self.ts)
new_timestamp = next(self.ts)
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': new_timestamp.normal,
'Content-Length': '0',
'Content-Type': 'application/octet-stream'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 404)
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': old_timestamp.normal,
'Content-Type': 'text/plain',
'Content-Encoding': 'gzip'})
req.body = 'VERIFY TWO'
with mock.patch(
'swift.obj.diskfile.BaseDiskFile.create') as mock_create:
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 409)
self.assertEqual(mock_create.call_count, 0)
# data file doesn't exist there (This is sanity because
# if .data written unexpectedly, it will be removed
# by cleanup_ondisk_files)
datafile = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.get_data_dir(POLICIES[0]), 'p',
hash_path('a', 'c', 'o')),
old_timestamp.internal + '.data')
self.assertFalse(os.path.exists(datafile))
# ts file sitll exists
tsfile = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.get_data_dir(POLICIES[0]), 'p',
hash_path('a', 'c', 'o')),
new_timestamp.internal + '.ts')
self.assertTrue(os.path.isfile(tsfile))
def test_PUT_overwrite_w_delete_at(self):
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(time()),
'X-Delete-At': 9999999999,
'Content-Length': '6',
'Content-Type': 'application/octet-stream'})
req.body = 'VERIFY'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
sleep(.00001)
timestamp = normalize_timestamp(time())
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp,
'Content-Type': 'text/plain',
'Content-Encoding': 'gzip'})
req.body = 'VERIFY TWO'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
objfile = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.get_data_dir(POLICIES[0]), 'p',
hash_path('a', 'c', 'o')),
utils.Timestamp(timestamp).internal + '.data')
self.assertTrue(os.path.isfile(objfile))
self.assertEqual(open(objfile).read(), 'VERIFY TWO')
self.assertEqual(diskfile.read_metadata(objfile),
{'X-Timestamp': utils.Timestamp(timestamp).internal,
'Content-Length': '10',
'ETag': 'b381a4c5dab1eaa1eb9711fa647cd039',
'Content-Type': 'text/plain',
'name': '/a/c/o',
'Content-Encoding': 'gzip'})
def test_PUT_old_timestamp(self):
ts = time()
orig_timestamp = utils.Timestamp(ts).internal
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': orig_timestamp,
'Content-Length': '6',
'Content-Type': 'application/octet-stream'})
req.body = 'VERIFY'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(ts),
'Content-Type': 'text/plain',
'Content-Encoding': 'gzip'})
req.body = 'VERIFY TWO'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 409)
self.assertEqual(resp.headers['X-Backend-Timestamp'], orig_timestamp)
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={
'X-Timestamp': normalize_timestamp(ts - 1),
'Content-Type': 'text/plain',
'Content-Encoding': 'gzip'})
req.body = 'VERIFY THREE'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 409)
self.assertEqual(resp.headers['X-Backend-Timestamp'], orig_timestamp)
def test_PUT_new_object_really_old_timestamp(self):
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': '-1', # 1969-12-31 23:59:59
'Content-Length': '6',
'Content-Type': 'application/octet-stream'})
req.body = 'VERIFY'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 400)
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': '1', # 1970-01-01 00:00:01
'Content-Length': '6',
'Content-Type': 'application/octet-stream'})
req.body = 'VERIFY'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
def test_PUT_object_really_new_timestamp(self):
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': '9999999999', # 2286-11-20 17:46:40
'Content-Length': '6',
'Content-Type': 'application/octet-stream'})
req.body = 'VERIFY'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
# roll over to 11 digits before the decimal
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': '10000000000',
'Content-Length': '6',
'Content-Type': 'application/octet-stream'})
req.body = 'VERIFY'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 400)
def test_PUT_no_etag(self):
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(time()),
'Content-Type': 'text/plain'})
req.body = 'test'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
def test_PUT_invalid_etag(self):
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(time()),
'Content-Type': 'text/plain',
'ETag': 'invalid'})
req.body = 'test'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 422)
def test_PUT_user_metadata(self):
timestamp = normalize_timestamp(time())
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp,
'Content-Type': 'text/plain',
'ETag': 'b114ab7b90d9ccac4bd5d99cc7ebb568',
'X-Object-Meta-1': 'One',
'X-Object-Meta-Two': 'Two'})
req.body = 'VERIFY THREE'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
objfile = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.get_data_dir(POLICIES[0]), 'p',
hash_path('a', 'c', 'o')),
utils.Timestamp(timestamp).internal + '.data')
self.assertTrue(os.path.isfile(objfile))
self.assertEqual(open(objfile).read(), 'VERIFY THREE')
self.assertEqual(diskfile.read_metadata(objfile),
{'X-Timestamp': utils.Timestamp(timestamp).internal,
'Content-Length': '12',
'ETag': 'b114ab7b90d9ccac4bd5d99cc7ebb568',
'Content-Type': 'text/plain',
'name': '/a/c/o',
'X-Object-Meta-1': 'One',
'X-Object-Meta-Two': 'Two'})
def test_PUT_etag_in_footer(self):
timestamp = normalize_timestamp(time())
req = Request.blank(
'/sda1/p/a/c/o',
headers={'X-Timestamp': timestamp,
'Content-Type': 'text/plain',
'Transfer-Encoding': 'chunked',
'Etag': 'other-etag',
'X-Backend-Obj-Metadata-Footer': 'yes',
'X-Backend-Obj-Multipart-Mime-Boundary': 'boundary'},
environ={'REQUEST_METHOD': 'PUT'})
obj_etag = md5("obj data").hexdigest()
footer_meta = json.dumps({"Etag": obj_etag})
footer_meta_cksum = md5(footer_meta).hexdigest()
req.body = "\r\n".join((
"--boundary",
"",
"obj data",
"--boundary",
"Content-MD5: " + footer_meta_cksum,
"",
footer_meta,
"--boundary--",
))
req.headers.pop("Content-Length", None)
resp = req.get_response(self.object_controller)
self.assertEqual(resp.etag, obj_etag)
self.assertEqual(resp.status_int, 201)
objfile = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.get_data_dir(POLICIES[0]), 'p',
hash_path('a', 'c', 'o')),
utils.Timestamp(timestamp).internal + '.data')
with open(objfile) as fh:
self.assertEqual(fh.read(), "obj data")
def _check_container_override_etag_preference(self, override_headers,
override_footers):
def mock_container_update(ctlr, op, account, container, obj, req,
headers_out, objdevice, policy):
calls_made.append((headers_out, policy))
calls_made = []
ts_put = next(self.ts)
headers = {
'X-Timestamp': ts_put.internal,
'Content-Type': 'text/plain',
'Transfer-Encoding': 'chunked',
'Etag': 'other-etag',
'X-Backend-Obj-Metadata-Footer': 'yes',
'X-Backend-Obj-Multipart-Mime-Boundary': 'boundary'}
headers.update(override_headers)
req = Request.blank(
'/sda1/p/a/c/o', headers=headers,
environ={'REQUEST_METHOD': 'PUT'})
obj_etag = md5("obj data").hexdigest()
footers = {'Etag': obj_etag}
footers.update(override_footers)
footer_meta = json.dumps(footers)
footer_meta_cksum = md5(footer_meta).hexdigest()
req.body = "\r\n".join((
"--boundary",
"",
"obj data",
"--boundary",
"Content-MD5: " + footer_meta_cksum,
"",
footer_meta,
"--boundary--",
))
req.headers.pop("Content-Length", None)
with mock.patch(
'swift.obj.server.ObjectController.container_update',
mock_container_update):
resp = req.get_response(self.object_controller)
self.assertEqual(resp.etag, obj_etag)
self.assertEqual(resp.status_int, 201)
self.assertEqual(1, len(calls_made))
self.assertEqual({
'X-Size': str(len('obj data')),
'X-Etag': 'update-etag',
'X-Content-Type': 'text/plain',
'X-Timestamp': ts_put.internal,
}, calls_made[0][0])
self.assertEqual(POLICIES[0], calls_made[0][1])
def test_override_etag_lone_header_footer(self):
self._check_container_override_etag_preference(
{'X-Backend-Container-Update-Override-Etag': 'update-etag'}, {})
self._check_container_override_etag_preference(
{}, {'X-Backend-Container-Update-Override-Etag': 'update-etag'})
self._check_container_override_etag_preference(
{'X-Object-Sysmeta-Container-Update-Override-Etag':
'update-etag'}, {})
self._check_container_override_etag_preference(
{}, {'X-Object-Sysmeta-Container-Update-Override-Etag':
'update-etag'}),
def test_override_etag_footer_trumps_header(self):
self._check_container_override_etag_preference(
{'X-Backend-Container-Update-Override-Etag': 'ignored-etag'},
{'X-Backend-Container-Update-Override-Etag': 'update-etag'})
self._check_container_override_etag_preference(
{'X-Object-Sysmeta-Container-Update-Override-Etag':
'ignored-etag'},
{'X-Object-Sysmeta-Container-Update-Override-Etag':
'update-etag'})
def test_override_etag_sysmeta_trumps_backend(self):
self._check_container_override_etag_preference(
{'X-Backend-Container-Update-Override-Etag': 'ignored-etag',
'X-Object-Sysmeta-Container-Update-Override-Etag':
'update-etag'}, {})
self._check_container_override_etag_preference(
{}, {'X-Backend-Container-Update-Override-Etag': 'ignored-etag',
'X-Object-Sysmeta-Container-Update-Override-Etag':
'update-etag'})
def test_override_etag_sysmeta_header_trumps_backend_footer(self):
headers = {'X-Object-Sysmeta-Container-Update-Override-Etag':
'update-etag'}
footers = {'X-Backend-Container-Update-Override-Etag':
'ignored-etag'}
self._check_container_override_etag_preference(headers, footers)
def test_override_etag_sysmeta_footer_trumps_backend_header(self):
headers = {'X-Backend-Container-Update-Override-Etag':
'ignored-etag'}
footers = {'X-Object-Sysmeta-Container-Update-Override-Etag':
'update-etag'}
self._check_container_override_etag_preference(headers, footers)
def test_PUT_etag_in_footer_mismatch(self):
timestamp = normalize_timestamp(time())
req = Request.blank(
'/sda1/p/a/c/o',
headers={'X-Timestamp': timestamp,
'Content-Type': 'text/plain',
'Transfer-Encoding': 'chunked',
'X-Backend-Obj-Metadata-Footer': 'yes',
'X-Backend-Obj-Multipart-Mime-Boundary': 'boundary'},
environ={'REQUEST_METHOD': 'PUT'})
footer_meta = json.dumps({"Etag": md5("green").hexdigest()})
footer_meta_cksum = md5(footer_meta).hexdigest()
req.body = "\r\n".join((
"--boundary",
"",
"blue",
"--boundary",
"Content-MD5: " + footer_meta_cksum,
"",
footer_meta,
"--boundary--",
))
req.headers.pop("Content-Length", None)
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 422)
def test_PUT_meta_in_footer(self):
timestamp = normalize_timestamp(time())
req = Request.blank(
'/sda1/p/a/c/o',
headers={'X-Timestamp': timestamp,
'Content-Type': 'text/plain',
'Transfer-Encoding': 'chunked',
'X-Object-Meta-X': 'Z',
'X-Object-Sysmeta-X': 'Z',
'X-Backend-Obj-Metadata-Footer': 'yes',
'X-Backend-Obj-Multipart-Mime-Boundary': 'boundary'},
environ={'REQUEST_METHOD': 'PUT'})
footer_meta = json.dumps({
'X-Object-Meta-X': 'Y',
'X-Object-Sysmeta-X': 'Y',
})
footer_meta_cksum = md5(footer_meta).hexdigest()
req.body = "\r\n".join((
"--boundary",
"",
"stuff stuff stuff",
"--boundary",
"Content-MD5: " + footer_meta_cksum,
"",
footer_meta,
"--boundary--",
))
req.headers.pop("Content-Length", None)
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
timestamp = normalize_timestamp(time())
req = Request.blank(
'/sda1/p/a/c/o',
headers={'X-Timestamp': timestamp},
environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.headers.get('X-Object-Meta-X'), 'Y')
self.assertEqual(resp.headers.get('X-Object-Sysmeta-X'), 'Y')
def test_PUT_missing_footer_checksum(self):
timestamp = normalize_timestamp(time())
req = Request.blank(
'/sda1/p/a/c/o',
headers={'X-Timestamp': timestamp,
'Content-Type': 'text/plain',
'Transfer-Encoding': 'chunked',
'X-Backend-Obj-Metadata-Footer': 'yes',
'X-Backend-Obj-Multipart-Mime-Boundary': 'boundary'},
environ={'REQUEST_METHOD': 'PUT'})
footer_meta = json.dumps({"Etag": md5("obj data").hexdigest()})
req.body = "\r\n".join((
"--boundary",
"",
"obj data",
"--boundary",
# no Content-MD5
"",
footer_meta,
"--boundary--",
))
req.headers.pop("Content-Length", None)
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 400)
def test_PUT_bad_footer_checksum(self):
timestamp = normalize_timestamp(time())
req = Request.blank(
'/sda1/p/a/c/o',
headers={'X-Timestamp': timestamp,
'Content-Type': 'text/plain',
'Transfer-Encoding': 'chunked',
'X-Backend-Obj-Metadata-Footer': 'yes',
'X-Backend-Obj-Multipart-Mime-Boundary': 'boundary'},
environ={'REQUEST_METHOD': 'PUT'})
footer_meta = json.dumps({"Etag": md5("obj data").hexdigest()})
bad_footer_meta_cksum = md5(footer_meta + "bad").hexdigest()
req.body = "\r\n".join((
"--boundary",
"",
"obj data",
"--boundary",
"Content-MD5: " + bad_footer_meta_cksum,
"",
footer_meta,
"--boundary--",
))
req.headers.pop("Content-Length", None)
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 422)
def test_PUT_bad_footer_json(self):
timestamp = normalize_timestamp(time())
req = Request.blank(
'/sda1/p/a/c/o',
headers={'X-Timestamp': timestamp,
'Content-Type': 'text/plain',
'Transfer-Encoding': 'chunked',
'X-Backend-Obj-Metadata-Footer': 'yes',
'X-Backend-Obj-Multipart-Mime-Boundary': 'boundary'},
environ={'REQUEST_METHOD': 'PUT'})
footer_meta = "{{{[[{{[{[[{[{[[{{{[{{{{[[{{[{["
footer_meta_cksum = md5(footer_meta).hexdigest()
req.body = "\r\n".join((
"--boundary",
"",
"obj data",
"--boundary",
"Content-MD5: " + footer_meta_cksum,
"",
footer_meta,
"--boundary--",
))
req.headers.pop("Content-Length", None)
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 400)
def test_PUT_extra_mime_docs_ignored(self):
timestamp = normalize_timestamp(time())
req = Request.blank(
'/sda1/p/a/c/o',
headers={'X-Timestamp': timestamp,
'Content-Type': 'text/plain',
'Transfer-Encoding': 'chunked',
'X-Backend-Obj-Metadata-Footer': 'yes',
'X-Backend-Obj-Multipart-Mime-Boundary': 'boundary'},
environ={'REQUEST_METHOD': 'PUT'})
footer_meta = json.dumps({'X-Object-Meta-Mint': 'pepper'})
footer_meta_cksum = md5(footer_meta).hexdigest()
req.body = "\r\n".join((
"--boundary",
"",
"obj data",
"--boundary",
"Content-MD5: " + footer_meta_cksum,
"",
footer_meta,
"--boundary",
"This-Document-Is-Useless: yes",
"",
"blah blah I take up space",
"--boundary--"
))
req.headers.pop("Content-Length", None)
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
# swob made this into a StringIO for us
wsgi_input = req.environ['wsgi.input']
self.assertEqual(wsgi_input.tell(), len(wsgi_input.getvalue()))
def test_PUT_user_metadata_no_xattr(self):
timestamp = normalize_timestamp(time())
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp,
'Content-Type': 'text/plain',
'ETag': 'b114ab7b90d9ccac4bd5d99cc7ebb568',
'X-Object-Meta-1': 'One',
'X-Object-Meta-Two': 'Two'})
req.body = 'VERIFY THREE'
def mock_get_and_setxattr(*args, **kargs):
error_num = errno.ENOTSUP if hasattr(errno, 'ENOTSUP') else \
errno.EOPNOTSUPP
raise IOError(error_num, 'Operation not supported')
with mock.patch('xattr.getxattr', mock_get_and_setxattr):
with mock.patch('xattr.setxattr', mock_get_and_setxattr):
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 507)
def test_PUT_client_timeout(self):
class FakeTimeout(BaseException):
def __enter__(self):
raise self
def __exit__(self, typ, value, tb):
pass
# This is just so the test fails when run on older object server code
# instead of exploding.
if not hasattr(object_server, 'ChunkReadTimeout'):
object_server.ChunkReadTimeout = None
with mock.patch.object(object_server, 'ChunkReadTimeout', FakeTimeout):
timestamp = normalize_timestamp(time())
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp,
'Content-Type': 'text/plain',
'Content-Length': '6'})
req.environ['wsgi.input'] = WsgiBytesIO(b'VERIFY')
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 408)
def test_PUT_system_metadata(self):
# check that sysmeta is stored in diskfile
timestamp = normalize_timestamp(time())
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp,
'Content-Type': 'text/plain',
'ETag': '1000d172764c9dbc3a5798a67ec5bb76',
'X-Object-Meta-1': 'One',
'X-Object-Sysmeta-1': 'One',
'X-Object-Sysmeta-Two': 'Two',
'X-Object-Transient-Sysmeta-Foo': 'Bar'})
req.body = 'VERIFY SYSMETA'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
objfile = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.get_data_dir(POLICIES[0]), 'p',
hash_path('a', 'c', 'o')),
timestamp + '.data')
self.assertTrue(os.path.isfile(objfile))
self.assertEqual(open(objfile).read(), 'VERIFY SYSMETA')
self.assertEqual(diskfile.read_metadata(objfile),
{'X-Timestamp': timestamp,
'Content-Length': '14',
'Content-Type': 'text/plain',
'ETag': '1000d172764c9dbc3a5798a67ec5bb76',
'name': '/a/c/o',
'X-Object-Meta-1': 'One',
'X-Object-Sysmeta-1': 'One',
'X-Object-Sysmeta-Two': 'Two',
'X-Object-Transient-Sysmeta-Foo': 'Bar'})
def test_PUT_succeeds_with_later_POST(self):
t_put = next(self.ts).internal
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': t_put,
'Content-Length': 0,
'Content-Type': 'plain/text'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
t_put2 = next(self.ts).internal
t_post = next(self.ts).internal
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': t_post})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 202)
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': t_put2,
'Content-Length': 0,
'Content-Type': 'plain/text'},
)
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
obj_dir = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.get_data_dir(0), 'p',
hash_path('a', 'c', 'o')))
ts_file = os.path.join(obj_dir, t_put2 + '.data')
self.assertTrue(os.path.isfile(ts_file))
meta_file = os.path.join(obj_dir, t_post + '.meta')
self.assertTrue(os.path.isfile(meta_file))
def test_POST_system_metadata(self):
# check that diskfile sysmeta is not changed by a POST
timestamp1 = normalize_timestamp(time())
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp1,
'Content-Type': 'text/plain',
'ETag': '1000d172764c9dbc3a5798a67ec5bb76',
'X-Object-Meta-1': 'One',
'X-Object-Sysmeta-1': 'One',
'X-Object-Sysmeta-Two': 'Two'})
req.body = 'VERIFY SYSMETA'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
timestamp2 = normalize_timestamp(time())
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': timestamp2,
'X-Object-Meta-1': 'Not One',
'X-Object-Sysmeta-1': 'Not One',
'X-Object-Sysmeta-Two': 'Not Two'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 202)
# original .data file metadata should be unchanged
objfile = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.get_data_dir(POLICIES[0]), 'p',
hash_path('a', 'c', 'o')),
timestamp1 + '.data')
self.assertTrue(os.path.isfile(objfile))
self.assertEqual(open(objfile).read(), 'VERIFY SYSMETA')
self.assertEqual(diskfile.read_metadata(objfile),
{'X-Timestamp': timestamp1,
'Content-Length': '14',
'Content-Type': 'text/plain',
'ETag': '1000d172764c9dbc3a5798a67ec5bb76',
'name': '/a/c/o',
'X-Object-Meta-1': 'One',
'X-Object-Sysmeta-1': 'One',
'X-Object-Sysmeta-Two': 'Two'})
# .meta file metadata should have only user meta items
metafile = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.get_data_dir(POLICIES[0]), 'p',
hash_path('a', 'c', 'o')),
timestamp2 + '.meta')
self.assertTrue(os.path.isfile(metafile))
self.assertEqual(diskfile.read_metadata(metafile),
{'X-Timestamp': timestamp2,
'name': '/a/c/o',
'X-Object-Meta-1': 'Not One'})
def test_POST_then_fetch_content_type(self):
# check that content_type is updated by a POST
timestamp1 = normalize_timestamp(time())
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp1,
'Content-Type': 'text/plain',
'ETag': '1000d172764c9dbc3a5798a67ec5bb76',
'X-Object-Meta-1': 'One'})
req.body = 'VERIFY SYSMETA'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
timestamp2 = normalize_timestamp(time())
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': timestamp2,
'X-Object-Meta-1': 'Not One',
'Content-Type': 'text/html'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 202)
# original .data file metadata should be unchanged
objfile = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.get_data_dir(0), 'p',
hash_path('a', 'c', 'o')),
timestamp1 + '.data')
self.assertTrue(os.path.isfile(objfile))
self.assertEqual(open(objfile).read(), 'VERIFY SYSMETA')
self.assertEqual(diskfile.read_metadata(objfile),
{'X-Timestamp': timestamp1,
'Content-Length': '14',
'Content-Type': 'text/plain',
'ETag': '1000d172764c9dbc3a5798a67ec5bb76',
'name': '/a/c/o',
'X-Object-Meta-1': 'One'})
# .meta file metadata should have updated content-type
metafile_name = encode_timestamps(Timestamp(timestamp2),
Timestamp(timestamp2),
explicit=True)
metafile = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.get_data_dir(0), 'p',
hash_path('a', 'c', 'o')),
metafile_name + '.meta')
self.assertTrue(os.path.isfile(metafile))
self.assertEqual(diskfile.read_metadata(metafile),
{'X-Timestamp': timestamp2,
'name': '/a/c/o',
'Content-Type': 'text/html',
'Content-Type-Timestamp': timestamp2,
'X-Object-Meta-1': 'Not One'})
def check_response(resp):
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.content_length, 14)
self.assertEqual(resp.content_type, 'text/html')
self.assertEqual(resp.headers['content-type'], 'text/html')
self.assertEqual(
resp.headers['last-modified'],
strftime('%a, %d %b %Y %H:%M:%S GMT',
gmtime(math.ceil(float(timestamp2)))))
self.assertEqual(resp.headers['etag'],
'"1000d172764c9dbc3a5798a67ec5bb76"')
self.assertEqual(resp.headers['x-object-meta-1'], 'Not One')
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.object_controller)
check_response(resp)
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.object_controller)
check_response(resp)
def test_POST_transient_sysmeta(self):
# check that diskfile transient system meta is changed by a POST
timestamp1 = normalize_timestamp(time())
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp1,
'Content-Type': 'text/plain',
'ETag': '1000d172764c9dbc3a5798a67ec5bb76',
'X-Object-Meta-1': 'One',
'X-Object-Sysmeta-1': 'One',
'X-Object-Transient-Sysmeta-Foo': 'Bar'})
req.body = 'VERIFY SYSMETA'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
timestamp2 = normalize_timestamp(time())
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': timestamp2,
'X-Object-Meta-1': 'Not One',
'X-Object-Sysmeta-1': 'Not One',
'X-Object-Transient-Sysmeta-Foo': 'Not Bar'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 202)
# original .data file metadata should be unchanged
objfile = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.get_data_dir(0), 'p',
hash_path('a', 'c', 'o')),
timestamp1 + '.data')
self.assertTrue(os.path.isfile(objfile))
self.assertEqual(open(objfile).read(), 'VERIFY SYSMETA')
self.assertDictEqual(diskfile.read_metadata(objfile),
{'X-Timestamp': timestamp1,
'Content-Length': '14',
'Content-Type': 'text/plain',
'ETag': '1000d172764c9dbc3a5798a67ec5bb76',
'name': '/a/c/o',
'X-Object-Meta-1': 'One',
'X-Object-Sysmeta-1': 'One',
'X-Object-Transient-Sysmeta-Foo': 'Bar'})
# .meta file metadata should have only user meta items
metafile = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.get_data_dir(0), 'p',
hash_path('a', 'c', 'o')),
timestamp2 + '.meta')
self.assertTrue(os.path.isfile(metafile))
self.assertDictEqual(diskfile.read_metadata(metafile),
{'X-Timestamp': timestamp2,
'name': '/a/c/o',
'X-Object-Meta-1': 'Not One',
'X-Object-Transient-Sysmeta-Foo': 'Not Bar'})
def test_PUT_then_fetch_system_metadata(self):
timestamp = normalize_timestamp(time())
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp,
'Content-Type': 'text/plain',
'ETag': '1000d172764c9dbc3a5798a67ec5bb76',
'X-Object-Meta-1': 'One',
'X-Object-Sysmeta-1': 'One',
'X-Object-Sysmeta-Two': 'Two',
'X-Object-Transient-Sysmeta-Foo': 'Bar'})
req.body = 'VERIFY SYSMETA'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
def check_response(resp):
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.content_length, 14)
self.assertEqual(resp.content_type, 'text/plain')
self.assertEqual(resp.headers['content-type'], 'text/plain')
self.assertEqual(
resp.headers['last-modified'],
strftime('%a, %d %b %Y %H:%M:%S GMT',
gmtime(math.ceil(float(timestamp)))))
self.assertEqual(resp.headers['etag'],
'"1000d172764c9dbc3a5798a67ec5bb76"')
self.assertEqual(resp.headers['x-object-meta-1'], 'One')
self.assertEqual(resp.headers['x-object-sysmeta-1'], 'One')
self.assertEqual(resp.headers['x-object-sysmeta-two'], 'Two')
self.assertEqual(resp.headers['x-object-transient-sysmeta-foo'],
'Bar')
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.object_controller)
check_response(resp)
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.object_controller)
check_response(resp)
def test_PUT_then_POST_then_fetch_system_metadata(self):
timestamp = normalize_timestamp(time())
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp,
'Content-Type': 'text/plain',
'ETag': '1000d172764c9dbc3a5798a67ec5bb76',
'X-Object-Meta-0': 'deleted by post',
'X-Object-Sysmeta-0': 'Zero',
'X-Object-Transient-Sysmeta-0': 'deleted by post',
'X-Object-Meta-1': 'One',
'X-Object-Sysmeta-1': 'One',
'X-Object-Sysmeta-Two': 'Two',
'X-Object-Transient-Sysmeta-Foo': 'Bar'})
req.body = 'VERIFY SYSMETA'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
timestamp2 = normalize_timestamp(time())
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': timestamp2,
'X-Object-Meta-1': 'Not One',
'X-Object-Sysmeta-1': 'Not One',
'X-Object-Sysmeta-Two': 'Not Two',
'X-Object-Transient-Sysmeta-Foo': 'Not Bar'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 202)
def check_response(resp):
# user meta should be updated but not sysmeta
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.content_length, 14)
self.assertEqual(resp.content_type, 'text/plain')
self.assertEqual(resp.headers['content-type'], 'text/plain')
self.assertEqual(
resp.headers['last-modified'],
strftime('%a, %d %b %Y %H:%M:%S GMT',
gmtime(math.ceil(float(timestamp2)))))
self.assertEqual(resp.headers['etag'],
'"1000d172764c9dbc3a5798a67ec5bb76"')
self.assertEqual(resp.headers['x-object-meta-1'], 'Not One')
self.assertEqual(resp.headers['x-object-sysmeta-0'], 'Zero')
self.assertEqual(resp.headers['x-object-sysmeta-1'], 'One')
self.assertEqual(resp.headers['x-object-sysmeta-two'], 'Two')
self.assertEqual(resp.headers['x-object-transient-sysmeta-foo'],
'Not Bar')
self.assertNotIn('x-object-meta-0', resp.headers)
self.assertNotIn('x-object-transient-sysmeta-0', resp.headers)
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.object_controller)
check_response(resp)
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.object_controller)
check_response(resp)
def test_PUT_with_replication_headers(self):
# check that otherwise disallowed headers are accepted when specified
# by X-Backend-Replication-Headers
# first PUT object
timestamp1 = normalize_timestamp(time())
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp1,
'Content-Type': 'text/plain',
'Content-Length': '14',
'Etag': '1000d172764c9dbc3a5798a67ec5bb76',
'Custom-Header': 'custom1',
'X-Object-Meta-1': 'meta1',
'X-Static-Large-Object': 'False'})
req.body = 'VERIFY SYSMETA'
# restrict set of allowed headers on this server
with mock.patch.object(self.object_controller, 'allowed_headers',
['Custom-Header']):
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
objfile = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.get_data_dir(0), 'p',
hash_path('a', 'c', 'o')),
timestamp1 + '.data')
# X-Static-Large-Object is disallowed.
self.assertEqual(diskfile.read_metadata(objfile),
{'X-Timestamp': timestamp1,
'Content-Type': 'text/plain',
'Content-Length': '14',
'ETag': '1000d172764c9dbc3a5798a67ec5bb76',
'name': '/a/c/o',
'Custom-Header': 'custom1',
'X-Object-Meta-1': 'meta1'})
# PUT object again with X-Backend-Replication-Headers
timestamp2 = normalize_timestamp(time())
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp2,
'Content-Type': 'text/plain',
'Content-Length': '14',
'Etag': '1000d172764c9dbc3a5798a67ec5bb76',
'Custom-Header': 'custom1',
'X-Object-Meta-1': 'meta1',
'X-Static-Large-Object': 'False',
'X-Backend-Replication-Headers':
'X-Static-Large-Object'})
req.body = 'VERIFY SYSMETA'
with mock.patch.object(self.object_controller, 'allowed_headers',
['Custom-Header']):
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
objfile = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.get_data_dir(0), 'p',
hash_path('a', 'c', 'o')),
timestamp2 + '.data')
# X-Static-Large-Object should be copied since it is now allowed by
# replication headers.
self.assertEqual(diskfile.read_metadata(objfile),
{'X-Timestamp': timestamp2,
'Content-Type': 'text/plain',
'Content-Length': '14',
'ETag': '1000d172764c9dbc3a5798a67ec5bb76',
'name': '/a/c/o',
'Custom-Header': 'custom1',
'X-Object-Meta-1': 'meta1',
'X-Static-Large-Object': 'False'})
def test_PUT_container_connection(self):
def mock_http_connect(response, with_exc=False):
class FakeConn(object):
def __init__(self, status, with_exc):
self.status = status
self.reason = 'Fake'
self.host = '1.2.3.4'
self.port = '1234'
self.with_exc = with_exc
def getresponse(self):
if self.with_exc:
raise Exception('test')
return self
def read(self, amt=None):
return ''
return lambda *args, **kwargs: FakeConn(response, with_exc)
timestamp = normalize_timestamp(time())
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp,
'X-Container-Host': '1.2.3.4:0',
'X-Container-Partition': '3',
'X-Container-Device': 'sda1',
'X-Container-Timestamp': '1',
'Content-Type': 'application/new1',
'Content-Length': '0'})
with mock.patch.object(
object_server, 'http_connect', mock_http_connect(201)):
with fake_spawn():
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
timestamp = normalize_timestamp(time())
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp,
'X-Container-Host': '1.2.3.4:0',
'X-Container-Partition': '3',
'X-Container-Device': 'sda1',
'X-Container-Timestamp': '1',
'Content-Type': 'application/new1',
'Content-Length': '0'})
with mock.patch.object(
object_server, 'http_connect', mock_http_connect(500)):
with fake_spawn():
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
timestamp = normalize_timestamp(time())
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp,
'X-Container-Host': '1.2.3.4:0',
'X-Container-Partition': '3',
'X-Container-Device': 'sda1',
'X-Container-Timestamp': '1',
'Content-Type': 'application/new1',
'Content-Length': '0'})
with mock.patch.object(
object_server, 'http_connect',
mock_http_connect(500, with_exc=True)):
with fake_spawn():
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
def test_PUT_ssync_multi_frag(self):
timestamp = utils.Timestamp(time()).internal
def put_with_index(expected_rsp, frag_index, node_index=None):
data_file_tail = '#%d.data' % frag_index
headers = {'X-Timestamp': timestamp,
'Content-Length': '6',
'Content-Type': 'application/octet-stream',
'X-Backend-Ssync-Frag-Index': node_index,
'X-Object-Sysmeta-Ec-Frag-Index': frag_index,
'X-Backend-Storage-Policy-Index': int(policy)}
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers=headers)
req.body = 'VERIFY'
resp = req.get_response(self.object_controller)
self.assertEqual(
resp.status_int, expected_rsp,
'got %s != %s for frag_index=%s node_index=%s' % (
resp.status_int, expected_rsp,
frag_index, node_index))
if expected_rsp == 409:
return
obj_dir = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.get_data_dir(int(policy)),
'p', hash_path('a', 'c', 'o')))
data_file = os.path.join(obj_dir, timestamp) + data_file_tail
self.assertTrue(os.path.isfile(data_file),
'Expected file %r not found in %r for policy %r'
% (data_file, os.listdir(obj_dir), int(policy)))
for policy in POLICIES:
if policy.policy_type == EC_POLICY:
# upload with a ec-frag-index
put_with_index(201, 3)
# same timestamp will conflict a different ec-frag-index
put_with_index(409, 2)
# but with the ssync-frag-index (primary node) it will just
# save both!
put_with_index(201, 2, 2)
# but even with the ssync-frag-index we can still get a
# timestamp collisison if the file already exists
put_with_index(409, 3, 3)
# FWIW, ssync will never send in-consistent indexes - but if
# something else did, from the object server perspective ...
# ... the ssync-frag-index is canonical on the
# read/pre-existance check
put_with_index(409, 7, 2)
# ... but the ec-frag-index is canonical when it comes to on
# disk file
put_with_index(201, 7, 6)
def test_PUT_durable_files(self):
for policy in POLICIES:
timestamp = utils.Timestamp(int(time())).internal
data_file_tail = '.data'
headers = {'X-Timestamp': timestamp,
'Content-Length': '6',
'Content-Type': 'application/octet-stream',
'X-Backend-Storage-Policy-Index': int(policy)}
if policy.policy_type == EC_POLICY:
headers['X-Object-Sysmeta-Ec-Frag-Index'] = '2'
data_file_tail = '#2.data'
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers=headers)
req.body = 'VERIFY'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
obj_dir = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.get_data_dir(int(policy)),
'p', hash_path('a', 'c', 'o')))
data_file = os.path.join(obj_dir, timestamp) + data_file_tail
self.assertTrue(os.path.isfile(data_file),
'Expected file %r not found in %r for policy %r'
% (data_file, os.listdir(obj_dir), int(policy)))
durable_file = os.path.join(obj_dir, timestamp) + '.durable'
if policy.policy_type == EC_POLICY:
self.assertTrue(os.path.isfile(durable_file))
self.assertFalse(os.path.getsize(durable_file))
else:
self.assertFalse(os.path.isfile(durable_file))
rmtree(obj_dir)
def test_HEAD(self):
# Test swift.obj.server.ObjectController.HEAD
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 400)
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 404)
self.assertFalse('X-Backend-Timestamp' in resp.headers)
timestamp = normalize_timestamp(time())
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp,
'Content-Type': 'application/x-test',
'X-Object-Meta-1': 'One',
'X-Object-Meta-Two': 'Two'})
req.body = 'VERIFY'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.content_length, 6)
self.assertEqual(resp.content_type, 'application/x-test')
self.assertEqual(resp.headers['content-type'], 'application/x-test')
self.assertEqual(
resp.headers['last-modified'],
strftime('%a, %d %b %Y %H:%M:%S GMT',
gmtime(math.ceil(float(timestamp)))))
self.assertEqual(resp.headers['etag'],
'"0b4c12d7e0a73840c1c4f148fda3b037"')
self.assertEqual(resp.headers['x-object-meta-1'], 'One')
self.assertEqual(resp.headers['x-object-meta-two'], 'Two')
objfile = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.get_data_dir(POLICIES[0]), 'p',
hash_path('a', 'c', 'o')),
utils.Timestamp(timestamp).internal + '.data')
os.unlink(objfile)
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 404)
sleep(.00001)
timestamp = normalize_timestamp(time())
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={
'X-Timestamp': timestamp,
'Content-Type': 'application/octet-stream',
'Content-length': '6'})
req.body = 'VERIFY'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
sleep(.00001)
timestamp = normalize_timestamp(time())
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': timestamp})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 204)
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 404)
self.assertEqual(resp.headers['X-Backend-Timestamp'],
utils.Timestamp(timestamp).internal)
def test_HEAD_quarantine_zbyte(self):
# Test swift.obj.server.ObjectController.GET
timestamp = normalize_timestamp(time())
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp,
'Content-Type': 'application/x-test'})
req.body = 'VERIFY'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
disk_file = self.df_mgr.get_diskfile('sda1', 'p', 'a', 'c', 'o',
policy=POLICIES.legacy)
disk_file.open()
file_name = os.path.basename(disk_file._data_file)
with open(disk_file._data_file) as fp:
metadata = diskfile.read_metadata(fp)
os.unlink(disk_file._data_file)
with open(disk_file._data_file, 'w') as fp:
diskfile.write_metadata(fp, metadata)
file_name = os.path.basename(disk_file._data_file)
self.assertEqual(os.listdir(disk_file._datadir)[0], file_name)
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 404)
quar_dir = os.path.join(
self.testdir, 'sda1', 'quarantined', 'objects',
os.path.basename(os.path.dirname(disk_file._data_file)))
self.assertEqual(os.listdir(quar_dir)[0], file_name)
def test_OPTIONS(self):
conf = {'devices': self.testdir, 'mount_check': 'false'}
server_handler = object_server.ObjectController(
conf, logger=debug_logger())
req = Request.blank('/sda1/p/a/c/o', {'REQUEST_METHOD': 'OPTIONS'})
req.content_length = 0
resp = server_handler.OPTIONS(req)
self.assertEqual(200, resp.status_int)
for verb in 'OPTIONS GET POST PUT DELETE HEAD REPLICATE \
SSYNC'.split():
self.assertTrue(
verb in resp.headers['Allow'].split(', '))
self.assertEqual(len(resp.headers['Allow'].split(', ')), 8)
self.assertEqual(resp.headers['Server'],
(server_handler.server_type + '/' + swift_version))
def test_GET(self):
# Test swift.obj.server.ObjectController.GET
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 400)
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 404)
self.assertFalse('X-Backend-Timestamp' in resp.headers)
timestamp = normalize_timestamp(time())
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp,
'Content-Type': 'application/x-test',
'X-Object-Meta-1': 'One',
'X-Object-Meta-Two': 'Two'})
req.body = 'VERIFY'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body, 'VERIFY')
self.assertEqual(resp.content_length, 6)
self.assertEqual(resp.content_type, 'application/x-test')
self.assertEqual(resp.headers['content-length'], '6')
self.assertEqual(resp.headers['content-type'], 'application/x-test')
self.assertEqual(
resp.headers['last-modified'],
strftime('%a, %d %b %Y %H:%M:%S GMT',
gmtime(math.ceil(float(timestamp)))))
self.assertEqual(resp.headers['etag'],
'"0b4c12d7e0a73840c1c4f148fda3b037"')
self.assertEqual(resp.headers['x-object-meta-1'], 'One')
self.assertEqual(resp.headers['x-object-meta-two'], 'Two')
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'})
req.range = 'bytes=1-3'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 206)
self.assertEqual(resp.body, 'ERI')
self.assertEqual(resp.headers['content-length'], '3')
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'})
req.range = 'bytes=1-'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 206)
self.assertEqual(resp.body, 'ERIFY')
self.assertEqual(resp.headers['content-length'], '5')
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'})
req.range = 'bytes=-2'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 206)
self.assertEqual(resp.body, 'FY')
self.assertEqual(resp.headers['content-length'], '2')
objfile = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.get_data_dir(POLICIES[0]), 'p',
hash_path('a', 'c', 'o')),
utils.Timestamp(timestamp).internal + '.data')
os.unlink(objfile)
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 404)
sleep(.00001)
timestamp = normalize_timestamp(time())
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={
'X-Timestamp': timestamp,
'Content-Type': 'application:octet-stream',
'Content-Length': '6'})
req.body = 'VERIFY'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
sleep(.00001)
timestamp = normalize_timestamp(time())
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': timestamp})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 204)
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 404)
self.assertEqual(resp.headers['X-Backend-Timestamp'],
utils.Timestamp(timestamp).internal)
def test_GET_if_match(self):
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={
'X-Timestamp': normalize_timestamp(time()),
'Content-Type': 'application/octet-stream',
'Content-Length': '4'})
req.body = 'test'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
etag = resp.etag
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.etag, etag)
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'},
headers={'If-Match': '*'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.etag, etag)
req = Request.blank('/sda1/p/a/c/o2',
environ={'REQUEST_METHOD': 'GET'},
headers={'If-Match': '*'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 412)
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'},
headers={'If-Match': '"%s"' % etag})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.etag, etag)
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'},
headers={'If-Match': '"11111111111111111111111111111111"'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 412)
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'},
headers={
'If-Match': '"11111111111111111111111111111111", "%s"' % etag})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'},
headers={
'If-Match':
'"11111111111111111111111111111111", '
'"22222222222222222222222222222222"'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 412)
def test_GET_if_match_etag_is_at(self):
headers = {
'X-Timestamp': utils.Timestamp(time()).internal,
'Content-Type': 'application/octet-stream',
'X-Object-Meta-Xtag': 'madeup',
'X-Object-Sysmeta-Xtag': 'alternate madeup',
}
req = Request.blank('/sda1/p/a/c/o', method='PUT',
headers=headers)
req.body = 'test'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
real_etag = resp.etag
# match x-backend-etag-is-at
req = Request.blank('/sda1/p/a/c/o', headers={
'If-Match': 'madeup',
'X-Backend-Etag-Is-At': 'X-Object-Meta-Xtag'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
# match x-backend-etag-is-at, using first in list of alternates
req = Request.blank('/sda1/p/a/c/o', headers={
'If-Match': 'madeup',
'X-Backend-Etag-Is-At':
'X-Object-Meta-Xtag,X-Object-Sysmeta-Z'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
# match x-backend-etag-is-at, using second in list of alternates
alts = 'X-Object-Sysmeta-Y,X-Object-Meta-Xtag,X-Object-Sysmeta-Z'
req = Request.blank('/sda1/p/a/c/o', headers={
'If-Match': 'madeup',
'X-Backend-Etag-Is-At': alts})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
# match x-backend-etag-is-at, choosing first of multiple alternates
alts = 'X-Object-Sysmeta-Y,X-Object-Meta-Xtag,X-Object-Sysmeta-Xtag'
req = Request.blank('/sda1/p/a/c/o', headers={
'If-Match': 'madeup',
'X-Backend-Etag-Is-At': alts})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
# match x-backend-etag-is-at, choosing first of multiple alternates
# (switches order of second two alternates from previous assertion)
alts = 'X-Object-Sysmeta-Y,X-Object-Sysmeta-Xtag,X-Object-Meta-Xtag'
req = Request.blank('/sda1/p/a/c/o', headers={
'If-Match': 'alternate madeup',
'X-Backend-Etag-Is-At': alts})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
# no match x-backend-etag-is-at
req = Request.blank('/sda1/p/a/c/o', headers={
'If-Match': real_etag,
'X-Backend-Etag-Is-At': 'X-Object-Meta-Xtag'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 412)
# etag-is-at metadata doesn't exist, default to real etag
req = Request.blank('/sda1/p/a/c/o', headers={
'If-Match': real_etag,
'X-Backend-Etag-Is-At': 'X-Object-Meta-Missing'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
# sanity no-match with no etag-is-at
req = Request.blank('/sda1/p/a/c/o', headers={
'If-Match': 'madeup'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 412)
# sanity match with no etag-is-at
req = Request.blank('/sda1/p/a/c/o', headers={
'If-Match': real_etag})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
# sanity with no if-match
req = Request.blank('/sda1/p/a/c/o', headers={
'X-Backend-Etag-Is-At': 'X-Object-Meta-Xtag'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
def test_HEAD_if_match(self):
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={
'X-Timestamp': normalize_timestamp(time()),
'Content-Type': 'application/octet-stream',
'Content-Length': '4'})
req.body = 'test'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
etag = resp.etag
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.etag, etag)
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'HEAD'},
headers={'If-Match': '*'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.etag, etag)
req = Request.blank('/sda1/p/a/c/o2',
environ={'REQUEST_METHOD': 'HEAD'},
headers={'If-Match': '*'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 412)
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'HEAD'},
headers={'If-Match': '"%s"' % etag})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.etag, etag)
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'HEAD'},
headers={'If-Match': '"11111111111111111111111111111111"'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 412)
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'HEAD'},
headers={
'If-Match': '"11111111111111111111111111111111", "%s"' % etag})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'HEAD'},
headers={
'If-Match':
'"11111111111111111111111111111111", '
'"22222222222222222222222222222222"'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 412)
def test_GET_if_none_match(self):
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={
'X-Timestamp': normalize_timestamp(time()),
'X-Object-Meta-Soup': 'gazpacho',
'Content-Type': 'application/fizzbuzz',
'Content-Length': '4'})
req.body = 'test'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
etag = resp.etag
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.etag, etag)
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'},
headers={'If-None-Match': '*'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 304)
self.assertEqual(resp.etag, etag)
self.assertEqual(resp.headers['Content-Type'], 'application/fizzbuzz')
self.assertEqual(resp.headers['X-Object-Meta-Soup'], 'gazpacho')
req = Request.blank('/sda1/p/a/c/o2',
environ={'REQUEST_METHOD': 'GET'},
headers={'If-None-Match': '*'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 404)
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'},
headers={'If-None-Match': '"%s"' % etag})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 304)
self.assertEqual(resp.etag, etag)
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'},
headers={'If-None-Match': '"11111111111111111111111111111111"'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.etag, etag)
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'},
headers={'If-None-Match':
'"11111111111111111111111111111111", '
'"%s"' % etag})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 304)
self.assertEqual(resp.etag, etag)
def test_HEAD_if_none_match(self):
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={
'X-Timestamp': normalize_timestamp(time()),
'Content-Type': 'application/octet-stream',
'Content-Length': '4'})
req.body = 'test'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
etag = resp.etag
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.etag, etag)
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'HEAD'},
headers={'If-None-Match': '*'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 304)
self.assertEqual(resp.etag, etag)
req = Request.blank('/sda1/p/a/c/o2',
environ={'REQUEST_METHOD': 'HEAD'},
headers={'If-None-Match': '*'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 404)
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'HEAD'},
headers={'If-None-Match': '"%s"' % etag})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 304)
self.assertEqual(resp.etag, etag)
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'HEAD'},
headers={'If-None-Match': '"11111111111111111111111111111111"'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.etag, etag)
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'HEAD'},
headers={'If-None-Match':
'"11111111111111111111111111111111", '
'"%s"' % etag})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 304)
self.assertEqual(resp.etag, etag)
def test_GET_if_modified_since(self):
timestamp = normalize_timestamp(time())
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={
'X-Timestamp': timestamp,
'Content-Type': 'application/octet-stream',
'Content-Length': '4'})
req.body = 'test'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
since = strftime('%a, %d %b %Y %H:%M:%S GMT',
gmtime(float(timestamp) + 1))
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'},
headers={'If-Modified-Since': since})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 304)
since = \
strftime('%a, %d %b %Y %H:%M:%S GMT', gmtime(float(timestamp) - 1))
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'},
headers={'If-Modified-Since': since})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
since = \
strftime('%a, %d %b %Y %H:%M:%S GMT', gmtime(float(timestamp) + 1))
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'},
headers={'If-Modified-Since': since})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 304)
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.object_controller)
since = resp.headers['Last-Modified']
self.assertEqual(since, strftime('%a, %d %b %Y %H:%M:%S GMT',
gmtime(math.ceil(float(timestamp)))))
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'},
headers={'If-Modified-Since': since})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 304)
timestamp = normalize_timestamp(int(time()))
req = Request.blank('/sda1/p/a/c/o2',
environ={'REQUEST_METHOD': 'PUT'},
headers={
'X-Timestamp': timestamp,
'Content-Type': 'application/octet-stream',
'Content-Length': '4'})
req.body = 'test'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
since = strftime('%a, %d %b %Y %H:%M:%S GMT',
gmtime(float(timestamp)))
req = Request.blank('/sda1/p/a/c/o2',
environ={'REQUEST_METHOD': 'GET'},
headers={'If-Modified-Since': since})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 304)
def test_HEAD_if_modified_since(self):
timestamp = normalize_timestamp(time())
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={
'X-Timestamp': timestamp,
'Content-Type': 'application/octet-stream',
'Content-Length': '4'})
req.body = 'test'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
since = strftime('%a, %d %b %Y %H:%M:%S GMT',
gmtime(float(timestamp) + 1))
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'HEAD'},
headers={'If-Modified-Since': since})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 304)
since = \
strftime('%a, %d %b %Y %H:%M:%S GMT', gmtime(float(timestamp) - 1))
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'HEAD'},
headers={'If-Modified-Since': since})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
since = \
strftime('%a, %d %b %Y %H:%M:%S GMT', gmtime(float(timestamp) + 1))
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'HEAD'},
headers={'If-Modified-Since': since})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 304)
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.object_controller)
since = resp.headers['Last-Modified']
self.assertEqual(since, strftime('%a, %d %b %Y %H:%M:%S GMT',
gmtime(math.ceil(float(timestamp)))))
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'HEAD'},
headers={'If-Modified-Since': since})
resp = self.object_controller.GET(req)
self.assertEqual(resp.status_int, 304)
timestamp = normalize_timestamp(int(time()))
req = Request.blank('/sda1/p/a/c/o2',
environ={'REQUEST_METHOD': 'PUT'},
headers={
'X-Timestamp': timestamp,
'Content-Type': 'application/octet-stream',
'Content-Length': '4'})
req.body = 'test'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
since = strftime('%a, %d %b %Y %H:%M:%S GMT',
gmtime(float(timestamp)))
req = Request.blank('/sda1/p/a/c/o2',
environ={'REQUEST_METHOD': 'HEAD'},
headers={'If-Modified-Since': since})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 304)
def test_GET_if_unmodified_since(self):
timestamp = normalize_timestamp(time())
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={
'X-Timestamp': timestamp,
'X-Object-Meta-Burr': 'ito',
'Content-Type': 'application/cat-picture',
'Content-Length': '4'})
req.body = 'test'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
since = strftime('%a, %d %b %Y %H:%M:%S GMT',
gmtime(float(timestamp) + 1))
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'},
headers={'If-Unmodified-Since': since})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
since = \
strftime('%a, %d %b %Y %H:%M:%S GMT', gmtime(float(timestamp) - 9))
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'},
headers={'If-Unmodified-Since': since})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 412)
self.assertEqual(resp.headers['Content-Type'],
'application/cat-picture')
self.assertEqual(resp.headers['X-Object-Meta-Burr'], 'ito')
since = \
strftime('%a, %d %b %Y %H:%M:%S GMT', gmtime(float(timestamp) + 9))
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'},
headers={'If-Unmodified-Since': since})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.object_controller)
since = resp.headers['Last-Modified']
self.assertEqual(since, strftime('%a, %d %b %Y %H:%M:%S GMT',
gmtime(math.ceil(float(timestamp)))))
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'},
headers={'If-Unmodified-Since': since})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
def test_HEAD_if_unmodified_since(self):
timestamp = normalize_timestamp(time())
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp,
'Content-Type': 'application/octet-stream',
'Content-Length': '4'})
req.body = 'test'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
since = strftime('%a, %d %b %Y %H:%M:%S GMT',
gmtime(math.ceil(float(timestamp)) + 1))
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'HEAD'},
headers={'If-Unmodified-Since': since})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
since = strftime('%a, %d %b %Y %H:%M:%S GMT',
gmtime(math.ceil(float(timestamp))))
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'HEAD'},
headers={'If-Unmodified-Since': since})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
since = strftime('%a, %d %b %Y %H:%M:%S GMT',
gmtime(math.ceil(float(timestamp)) - 1))
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'HEAD'},
headers={'If-Unmodified-Since': since})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 412)
def _create_ondisk_fragments(self, policy):
# Create some on disk files...
ts_iter = make_timestamp_iter()
# PUT at ts_0
ts_0 = next(ts_iter)
headers = {'X-Timestamp': ts_0.internal,
'Content-Length': '5',
'Content-Type': 'application/octet-stream',
'X-Backend-Storage-Policy-Index': int(policy)}
if policy.policy_type == EC_POLICY:
headers['X-Object-Sysmeta-Ec-Frag-Index'] = '0'
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers=headers)
req.body = 'OLDER'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
# POST at ts_1
ts_1 = next(ts_iter)
headers = {'X-Timestamp': ts_1.internal,
'X-Backend-Storage-Policy-Index': int(policy)}
headers['X-Object-Meta-Test'] = 'abc'
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers=headers)
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 202)
# PUT again at ts_2 but without a .durable file
ts_2 = next(ts_iter)
headers = {'X-Timestamp': ts_2.internal,
'Content-Length': '5',
'Content-Type': 'application/octet-stream',
'X-Backend-Storage-Policy-Index': int(policy)}
if policy.policy_type == EC_POLICY:
headers['X-Object-Sysmeta-Ec-Frag-Index'] = '2'
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers=headers)
req.body = 'NEWER'
# patch the commit method to do nothing so EC object gets
# no .durable file
with mock.patch('swift.obj.diskfile.ECDiskFileWriter.commit'):
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
return ts_0, ts_1, ts_2
def test_GET_HEAD_with_fragment_preferences(self):
for policy in POLICIES:
ts_0, ts_1, ts_2 = self._create_ondisk_fragments(policy)
backend_frags = json.dumps({ts_0.internal: [0],
ts_2.internal: [2]})
def _assert_frag_0_at_ts_0(resp):
expect = {
'X-Timestamp': ts_1.normal,
'X-Backend-Timestamp': ts_1.internal,
'X-Backend-Data-Timestamp': ts_0.internal,
'X-Backend-Durable-Timestamp': ts_0.internal,
'X-Backend-Fragments': backend_frags,
'X-Object-Sysmeta-Ec-Frag-Index': '0',
'X-Object-Meta-Test': 'abc'}
self.assertDictContainsSubset(expect, resp.headers)
def _assert_repl_data_at_ts_2():
self.assertIn(resp.status_int, (200, 202))
expect = {
'X-Timestamp': ts_2.normal,
'X-Backend-Timestamp': ts_2.internal,
'X-Backend-Data-Timestamp': ts_2.internal,
'X-Backend-Durable-Timestamp': ts_2.internal}
self.assertDictContainsSubset(expect, resp.headers)
self.assertNotIn('X-Object-Meta-Test', resp.headers)
# Sanity check: Request with no preferences should default to the
# durable frag
headers = {'X-Backend-Storage-Policy-Index': int(policy)}
req = Request.blank('/sda1/p/a/c/o', headers=headers,
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.object_controller)
if policy.policy_type == EC_POLICY:
_assert_frag_0_at_ts_0(resp)
self.assertEqual(resp.body, 'OLDER')
else:
_assert_repl_data_at_ts_2()
self.assertEqual(resp.body, 'NEWER')
req = Request.blank('/sda1/p/a/c/o', headers=headers,
environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.object_controller)
if policy.policy_type == EC_POLICY:
_assert_frag_0_at_ts_0(resp)
else:
_assert_repl_data_at_ts_2()
# Request with preferences can select the older frag
prefs = json.dumps(
[{'timestamp': ts_0.internal, 'exclude': [1, 3]}])
headers = {'X-Backend-Storage-Policy-Index': int(policy),
'X-Backend-Fragment-Preferences': prefs}
req = Request.blank('/sda1/p/a/c/o', headers=headers,
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.object_controller)
if policy.policy_type == EC_POLICY:
_assert_frag_0_at_ts_0(resp)
self.assertEqual(resp.body, 'OLDER')
else:
_assert_repl_data_at_ts_2()
self.assertEqual(resp.body, 'NEWER')
req = Request.blank('/sda1/p/a/c/o', headers=headers,
environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.object_controller)
if policy.policy_type == EC_POLICY:
_assert_frag_0_at_ts_0(resp)
else:
_assert_repl_data_at_ts_2()
def _assert_frag_2_at_ts_2(resp):
self.assertIn(resp.status_int, (200, 202))
# do not expect meta file to be included since it is older
expect = {
'X-Timestamp': ts_2.normal,
'X-Backend-Timestamp': ts_2.internal,
'X-Backend-Data-Timestamp': ts_2.internal,
'X-Backend-Durable-Timestamp': ts_0.internal,
'X-Backend-Fragments': backend_frags,
'X-Object-Sysmeta-Ec-Frag-Index': '2'}
self.assertDictContainsSubset(expect, resp.headers)
self.assertNotIn('X-Object-Meta-Test', resp.headers)
# Request with preferences can select the newer non-durable frag
prefs = json.dumps(
[{'timestamp': ts_2.internal, 'exclude': [1, 3]}])
headers = {'X-Backend-Storage-Policy-Index': int(policy),
'X-Backend-Fragment-Preferences': prefs}
req = Request.blank('/sda1/p/a/c/o', headers=headers,
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.object_controller)
if policy.policy_type == EC_POLICY:
_assert_frag_2_at_ts_2(resp)
else:
_assert_repl_data_at_ts_2()
self.assertEqual(resp.body, 'NEWER')
req = Request.blank('/sda1/p/a/c/o', headers=headers,
environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.object_controller)
if policy.policy_type == EC_POLICY:
_assert_frag_2_at_ts_2(resp)
else:
_assert_repl_data_at_ts_2()
# Request with preference for ts_0 but excludes index 0 will
# default to newest frag
prefs = json.dumps(
[{'timestamp': ts_0.internal, 'exclude': [0]}])
headers = {'X-Backend-Storage-Policy-Index': int(policy),
'X-Backend-Fragment-Preferences': prefs}
req = Request.blank('/sda1/p/a/c/o', headers=headers,
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.object_controller)
if policy.policy_type == EC_POLICY:
_assert_frag_2_at_ts_2(resp)
else:
_assert_repl_data_at_ts_2()
self.assertEqual(resp.body, 'NEWER')
req = Request.blank('/sda1/p/a/c/o', headers=headers,
environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.object_controller)
if policy.policy_type == EC_POLICY:
_assert_frag_2_at_ts_2(resp)
else:
_assert_repl_data_at_ts_2()
# Request with preferences that exclude all frags get nothing
prefs = json.dumps(
[{'timestamp': ts_0.internal, 'exclude': [0]},
{'timestamp': ts_2.internal, 'exclude': [2]}])
headers = {'X-Backend-Storage-Policy-Index': int(policy),
'X-Backend-Fragment-Preferences': prefs}
req = Request.blank('/sda1/p/a/c/o', headers=headers,
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.object_controller)
if policy.policy_type == EC_POLICY:
self.assertEqual(resp.status_int, 404)
else:
_assert_repl_data_at_ts_2()
self.assertEqual(resp.body, 'NEWER')
req = Request.blank('/sda1/p/a/c/o', headers=headers,
environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.object_controller)
if policy.policy_type == EC_POLICY:
self.assertEqual(resp.status_int, 404)
else:
_assert_repl_data_at_ts_2()
# Request with empty preferences will get non-durable
prefs = json.dumps([])
headers = {'X-Backend-Storage-Policy-Index': int(policy),
'X-Backend-Fragment-Preferences': prefs}
req = Request.blank('/sda1/p/a/c/o', headers=headers,
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.object_controller)
if policy.policy_type == EC_POLICY:
_assert_frag_2_at_ts_2(resp)
else:
_assert_repl_data_at_ts_2()
self.assertEqual(resp.body, 'NEWER')
req = Request.blank('/sda1/p/a/c/o', headers=headers,
environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.object_controller)
if policy.policy_type == EC_POLICY:
_assert_frag_2_at_ts_2(resp)
else:
_assert_repl_data_at_ts_2()
def test_GET_quarantine(self):
# Test swift.obj.server.ObjectController.GET
timestamp = normalize_timestamp(time())
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp,
'Content-Type': 'application/x-test'})
req.body = 'VERIFY'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
disk_file = self.df_mgr.get_diskfile('sda1', 'p', 'a', 'c', 'o',
policy=POLICIES.legacy)
disk_file.open()
file_name = os.path.basename(disk_file._data_file)
etag = md5()
etag.update('VERIF')
etag = etag.hexdigest()
metadata = {'X-Timestamp': timestamp, 'name': '/a/c/o',
'Content-Length': 6, 'ETag': etag}
diskfile.write_metadata(disk_file._fp, metadata)
self.assertEqual(os.listdir(disk_file._datadir)[0], file_name)
req = Request.blank('/sda1/p/a/c/o')
resp = req.get_response(self.object_controller)
quar_dir = os.path.join(
self.testdir, 'sda1', 'quarantined', 'objects',
os.path.basename(os.path.dirname(disk_file._data_file)))
self.assertEqual(os.listdir(disk_file._datadir)[0], file_name)
body = resp.body # actually does quarantining
self.assertEqual(body, 'VERIFY')
self.assertEqual(os.listdir(quar_dir)[0], file_name)
req = Request.blank('/sda1/p/a/c/o')
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 404)
def test_GET_quarantine_zbyte(self):
# Test swift.obj.server.ObjectController.GET
timestamp = normalize_timestamp(time())
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp,
'Content-Type': 'application/x-test'})
req.body = 'VERIFY'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
disk_file = self.df_mgr.get_diskfile('sda1', 'p', 'a', 'c', 'o',
policy=POLICIES.legacy)
disk_file.open()
file_name = os.path.basename(disk_file._data_file)
with open(disk_file._data_file) as fp:
metadata = diskfile.read_metadata(fp)
os.unlink(disk_file._data_file)
with open(disk_file._data_file, 'w') as fp:
diskfile.write_metadata(fp, metadata)
self.assertEqual(os.listdir(disk_file._datadir)[0], file_name)
req = Request.blank('/sda1/p/a/c/o')
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 404)
quar_dir = os.path.join(
self.testdir, 'sda1', 'quarantined', 'objects',
os.path.basename(os.path.dirname(disk_file._data_file)))
self.assertEqual(os.listdir(quar_dir)[0], file_name)
def test_GET_quarantine_range(self):
# Test swift.obj.server.ObjectController.GET
timestamp = normalize_timestamp(time())
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp,
'Content-Type': 'application/x-test'})
req.body = 'VERIFY'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
disk_file = self.df_mgr.get_diskfile('sda1', 'p', 'a', 'c', 'o',
policy=POLICIES.legacy)
disk_file.open()
file_name = os.path.basename(disk_file._data_file)
etag = md5()
etag.update('VERIF')
etag = etag.hexdigest()
metadata = {'X-Timestamp': timestamp, 'name': '/a/c/o',
'Content-Length': 6, 'ETag': etag}
diskfile.write_metadata(disk_file._fp, metadata)
self.assertEqual(os.listdir(disk_file._datadir)[0], file_name)
req = Request.blank('/sda1/p/a/c/o')
req.range = 'bytes=0-4' # partial
resp = req.get_response(self.object_controller)
quar_dir = os.path.join(
self.testdir, 'sda1', 'quarantined', 'objects',
os.path.basename(os.path.dirname(disk_file._data_file)))
resp.body
self.assertEqual(os.listdir(disk_file._datadir)[0], file_name)
self.assertFalse(os.path.isdir(quar_dir))
req = Request.blank('/sda1/p/a/c/o')
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
req = Request.blank('/sda1/p/a/c/o')
req.range = 'bytes=1-6' # partial
resp = req.get_response(self.object_controller)
quar_dir = os.path.join(
self.testdir, 'sda1', 'quarantined', 'objects',
os.path.basename(os.path.dirname(disk_file._data_file)))
resp.body
self.assertEqual(os.listdir(disk_file._datadir)[0], file_name)
self.assertFalse(os.path.isdir(quar_dir))
req = Request.blank('/sda1/p/a/c/o')
req.range = 'bytes=0-14' # full
resp = req.get_response(self.object_controller)
quar_dir = os.path.join(
self.testdir, 'sda1', 'quarantined', 'objects',
os.path.basename(os.path.dirname(disk_file._data_file)))
self.assertEqual(os.listdir(disk_file._datadir)[0], file_name)
resp.body
self.assertTrue(os.path.isdir(quar_dir))
req = Request.blank('/sda1/p/a/c/o')
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 404)
@mock.patch("time.time", mock_time)
def test_DELETE(self):
# Test swift.obj.server.ObjectController.DELETE
req = Request.blank('/sda1/p/a/c',
environ={'REQUEST_METHOD': 'DELETE'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 400)
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 400)
# The following should have created a tombstone file
timestamp = normalize_timestamp(1000)
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': timestamp})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 404)
ts_1000_file = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.get_data_dir(POLICIES[0]), 'p',
hash_path('a', 'c', 'o')),
utils.Timestamp(timestamp).internal + '.ts')
self.assertTrue(os.path.isfile(ts_1000_file))
# There should now be a 1000 ts file.
self.assertEqual(len(os.listdir(os.path.dirname(ts_1000_file))), 1)
# The following should *not* have created a tombstone file.
timestamp = normalize_timestamp(999)
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': timestamp})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 404)
ts_999_file = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.get_data_dir(POLICIES[0]), 'p',
hash_path('a', 'c', 'o')),
utils.Timestamp(timestamp).internal + '.ts')
self.assertFalse(os.path.isfile(ts_999_file))
self.assertTrue(os.path.isfile(ts_1000_file))
self.assertEqual(len(os.listdir(os.path.dirname(ts_1000_file))), 1)
orig_timestamp = utils.Timestamp(1002).internal
headers = {'X-Timestamp': orig_timestamp,
'Content-Type': 'application/octet-stream',
'Content-Length': '4'}
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers=headers)
req.body = 'test'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
# There should now be 1000 ts and a 1001 data file.
data_1002_file = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.get_data_dir(POLICIES[0]), 'p',
hash_path('a', 'c', 'o')),
orig_timestamp + '.data')
self.assertTrue(os.path.isfile(data_1002_file))
self.assertEqual(len(os.listdir(os.path.dirname(data_1002_file))), 1)
# The following should *not* have created a tombstone file.
timestamp = normalize_timestamp(1001)
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': timestamp})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 409)
self.assertEqual(resp.headers['X-Backend-Timestamp'], orig_timestamp)
ts_1001_file = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.get_data_dir(POLICIES[0]), 'p',
hash_path('a', 'c', 'o')),
utils.Timestamp(timestamp).internal + '.ts')
self.assertFalse(os.path.isfile(ts_1001_file))
self.assertTrue(os.path.isfile(data_1002_file))
self.assertEqual(len(os.listdir(os.path.dirname(ts_1001_file))), 1)
timestamp = normalize_timestamp(1003)
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': timestamp})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 204)
ts_1003_file = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.get_data_dir(POLICIES[0]), 'p',
hash_path('a', 'c', 'o')),
utils.Timestamp(timestamp).internal + '.ts')
self.assertTrue(os.path.isfile(ts_1003_file))
self.assertEqual(len(os.listdir(os.path.dirname(ts_1003_file))), 1)
def test_DELETE_succeeds_with_later_POST(self):
t_put = next(self.ts).internal
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': t_put,
'Content-Length': 0,
'Content-Type': 'plain/text'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
t_delete = next(self.ts).internal
t_post = next(self.ts).internal
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': t_post})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 202)
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': t_delete},
)
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 204)
obj_dir = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.get_data_dir(0), 'p',
hash_path('a', 'c', 'o')))
ts_file = os.path.join(obj_dir, t_delete + '.ts')
self.assertTrue(os.path.isfile(ts_file))
meta_file = os.path.join(obj_dir, t_post + '.meta')
self.assertTrue(os.path.isfile(meta_file))
def test_DELETE_container_updates(self):
# Test swift.obj.server.ObjectController.DELETE and container
# updates, making sure container update is called in the correct
# state.
start = time()
orig_timestamp = utils.Timestamp(start)
headers = {'X-Timestamp': orig_timestamp.internal,
'Content-Type': 'application/octet-stream',
'Content-Length': '4'}
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers=headers)
req.body = 'test'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
calls_made = [0]
def our_container_update(*args, **kwargs):
calls_made[0] += 1
orig_cu = self.object_controller.container_update
self.object_controller.container_update = our_container_update
try:
# The following request should return 409 (HTTP Conflict). A
# tombstone file should not have been created with this timestamp.
timestamp = utils.Timestamp(start - 0.00001)
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': timestamp.internal})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 409)
self.assertEqual(resp.headers['x-backend-timestamp'],
orig_timestamp.internal)
objfile = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.get_data_dir(POLICIES[0]), 'p',
hash_path('a', 'c', 'o')),
utils.Timestamp(timestamp).internal + '.ts')
self.assertFalse(os.path.isfile(objfile))
self.assertEqual(len(os.listdir(os.path.dirname(objfile))), 1)
self.assertEqual(0, calls_made[0])
# The following request should return 204, and the object should
# be truly deleted (container update is performed) because this
# timestamp is newer. A tombstone file should have been created
# with this timestamp.
timestamp = utils.Timestamp(start + 0.00001)
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': timestamp.internal})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 204)
objfile = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.get_data_dir(POLICIES[0]), 'p',
hash_path('a', 'c', 'o')),
utils.Timestamp(timestamp).internal + '.ts')
self.assertTrue(os.path.isfile(objfile))
self.assertEqual(1, calls_made[0])
self.assertEqual(len(os.listdir(os.path.dirname(objfile))), 1)
# The following request should return a 404, as the object should
# already have been deleted, but it should have also performed a
# container update because the timestamp is newer, and a tombstone
# file should also exist with this timestamp.
timestamp = utils.Timestamp(start + 0.00002)
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': timestamp.internal})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 404)
objfile = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.get_data_dir(POLICIES[0]), 'p',
hash_path('a', 'c', 'o')),
utils.Timestamp(timestamp).internal + '.ts')
self.assertTrue(os.path.isfile(objfile))
self.assertEqual(2, calls_made[0])
self.assertEqual(len(os.listdir(os.path.dirname(objfile))), 1)
# The following request should return a 404, as the object should
# already have been deleted, and it should not have performed a
# container update because the timestamp is older, or created a
# tombstone file with this timestamp.
timestamp = utils.Timestamp(start + 0.00001)
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': timestamp.internal})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 404)
objfile = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.get_data_dir(POLICIES[0]), 'p',
hash_path('a', 'c', 'o')),
utils.Timestamp(timestamp).internal + '.ts')
self.assertFalse(os.path.isfile(objfile))
self.assertEqual(2, calls_made[0])
self.assertEqual(len(os.listdir(os.path.dirname(objfile))), 1)
finally:
self.object_controller.container_update = orig_cu
def test_DELETE_full_drive(self):
def mock_diskfile_delete(self, timestamp):
raise DiskFileNoSpace()
t_put = utils.Timestamp(time())
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': t_put.internal,
'Content-Length': 0,
'Content-Type': 'plain/text'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
with mock.patch('swift.obj.diskfile.BaseDiskFile.delete',
mock_diskfile_delete):
t_delete = utils.Timestamp(time())
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': t_delete.internal})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 507)
def test_object_update_with_offset(self):
container_updates = []
def capture_updates(ip, port, method, path, headers, *args, **kwargs):
container_updates.append((ip, port, method, path, headers))
# create a new object
create_timestamp = next(self.ts).internal
req = Request.blank('/sda1/p/a/c/o', method='PUT', body='test1',
headers={'X-Timestamp': create_timestamp,
'X-Container-Host': '10.0.0.1:8080',
'X-Container-Device': 'sda1',
'X-Container-Partition': 'p',
'Content-Type': 'text/plain'})
with mocked_http_conn(200, give_connect=capture_updates) as fake_conn:
with fake_spawn():
resp = req.get_response(self.object_controller)
self.assertRaises(StopIteration, fake_conn.code_iter.next)
self.assertEqual(resp.status_int, 201)
self.assertEqual(1, len(container_updates))
for update in container_updates:
ip, port, method, path, headers = update
self.assertEqual(ip, '10.0.0.1')
self.assertEqual(port, '8080')
self.assertEqual(method, 'PUT')
self.assertEqual(path, '/sda1/p/a/c/o')
expected = {
'X-Size': len('test1'),
'X-Etag': md5('test1').hexdigest(),
'X-Content-Type': 'text/plain',
'X-Timestamp': create_timestamp,
}
for key, value in expected.items():
self.assertEqual(headers[key], str(value))
container_updates = [] # reset
# read back object
req = Request.blank('/sda1/p/a/c/o', method='GET')
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['X-Timestamp'],
utils.Timestamp(create_timestamp).normal)
self.assertEqual(resp.headers['X-Backend-Timestamp'],
create_timestamp)
self.assertEqual(resp.body, 'test1')
# send an update with an offset
offset_timestamp = utils.Timestamp(
create_timestamp, offset=1).internal
req = Request.blank('/sda1/p/a/c/o', method='PUT', body='test2',
headers={'X-Timestamp': offset_timestamp,
'X-Container-Host': '10.0.0.1:8080',
'X-Container-Device': 'sda1',
'X-Container-Partition': 'p',
'Content-Type': 'text/html'})
with mocked_http_conn(200, give_connect=capture_updates) as fake_conn:
with fake_spawn():
resp = req.get_response(self.object_controller)
self.assertRaises(StopIteration, fake_conn.code_iter.next)
self.assertEqual(resp.status_int, 201)
self.assertEqual(1, len(container_updates))
for update in container_updates:
ip, port, method, path, headers = update
self.assertEqual(ip, '10.0.0.1')
self.assertEqual(port, '8080')
self.assertEqual(method, 'PUT')
self.assertEqual(path, '/sda1/p/a/c/o')
expected = {
'X-Size': len('test2'),
'X-Etag': md5('test2').hexdigest(),
'X-Content-Type': 'text/html',
'X-Timestamp': offset_timestamp,
}
for key, value in expected.items():
self.assertEqual(headers[key], str(value))
container_updates = [] # reset
# read back new offset
req = Request.blank('/sda1/p/a/c/o', method='GET')
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['X-Timestamp'],
utils.Timestamp(offset_timestamp).normal)
self.assertEqual(resp.headers['X-Backend-Timestamp'],
offset_timestamp)
self.assertEqual(resp.body, 'test2')
# now overwrite with a newer time
overwrite_timestamp = next(self.ts).internal
req = Request.blank('/sda1/p/a/c/o', method='PUT', body='test3',
headers={'X-Timestamp': overwrite_timestamp,
'X-Container-Host': '10.0.0.1:8080',
'X-Container-Device': 'sda1',
'X-Container-Partition': 'p',
'Content-Type': 'text/enriched'})
with mocked_http_conn(200, give_connect=capture_updates) as fake_conn:
with fake_spawn():
resp = req.get_response(self.object_controller)
self.assertRaises(StopIteration, fake_conn.code_iter.next)
self.assertEqual(resp.status_int, 201)
self.assertEqual(1, len(container_updates))
for update in container_updates:
ip, port, method, path, headers = update
self.assertEqual(ip, '10.0.0.1')
self.assertEqual(port, '8080')
self.assertEqual(method, 'PUT')
self.assertEqual(path, '/sda1/p/a/c/o')
expected = {
'X-Size': len('test3'),
'X-Etag': md5('test3').hexdigest(),
'X-Content-Type': 'text/enriched',
'X-Timestamp': overwrite_timestamp,
}
for key, value in expected.items():
self.assertEqual(headers[key], str(value))
container_updates = [] # reset
# read back overwrite
req = Request.blank('/sda1/p/a/c/o', method='GET')
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['X-Timestamp'],
utils.Timestamp(overwrite_timestamp).normal)
self.assertEqual(resp.headers['X-Backend-Timestamp'],
overwrite_timestamp)
self.assertEqual(resp.body, 'test3')
# delete with an offset
offset_delete = utils.Timestamp(overwrite_timestamp,
offset=1).internal
req = Request.blank('/sda1/p/a/c/o', method='DELETE',
headers={'X-Timestamp': offset_delete,
'X-Container-Host': '10.0.0.1:8080',
'X-Container-Device': 'sda1',
'X-Container-Partition': 'p'})
with mocked_http_conn(200, give_connect=capture_updates) as fake_conn:
with fake_spawn():
resp = req.get_response(self.object_controller)
self.assertRaises(StopIteration, fake_conn.code_iter.next)
self.assertEqual(resp.status_int, 204)
self.assertEqual(1, len(container_updates))
for update in container_updates:
ip, port, method, path, headers = update
self.assertEqual(ip, '10.0.0.1')
self.assertEqual(port, '8080')
self.assertEqual(method, 'DELETE')
self.assertEqual(path, '/sda1/p/a/c/o')
expected = {
'X-Timestamp': offset_delete,
}
for key, value in expected.items():
self.assertEqual(headers[key], str(value))
container_updates = [] # reset
# read back offset delete
req = Request.blank('/sda1/p/a/c/o', method='GET')
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 404)
self.assertEqual(resp.headers['X-Timestamp'], None)
self.assertEqual(resp.headers['X-Backend-Timestamp'], offset_delete)
# and one more delete with a newer timestamp
delete_timestamp = next(self.ts).internal
req = Request.blank('/sda1/p/a/c/o', method='DELETE',
headers={'X-Timestamp': delete_timestamp,
'X-Container-Host': '10.0.0.1:8080',
'X-Container-Device': 'sda1',
'X-Container-Partition': 'p'})
with mocked_http_conn(200, give_connect=capture_updates) as fake_conn:
with fake_spawn():
resp = req.get_response(self.object_controller)
self.assertRaises(StopIteration, fake_conn.code_iter.next)
self.assertEqual(resp.status_int, 404)
self.assertEqual(1, len(container_updates))
for update in container_updates:
ip, port, method, path, headers = update
self.assertEqual(ip, '10.0.0.1')
self.assertEqual(port, '8080')
self.assertEqual(method, 'DELETE')
self.assertEqual(path, '/sda1/p/a/c/o')
expected = {
'X-Timestamp': delete_timestamp,
}
for key, value in expected.items():
self.assertEqual(headers[key], str(value))
container_updates = [] # reset
# read back delete
req = Request.blank('/sda1/p/a/c/o', method='GET')
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 404)
self.assertEqual(resp.headers['X-Timestamp'], None)
self.assertEqual(resp.headers['X-Backend-Timestamp'], delete_timestamp)
def test_call_bad_request(self):
# Test swift.obj.server.ObjectController.__call__
inbuf = WsgiBytesIO()
errbuf = StringIO()
outbuf = StringIO()
def start_response(*args):
"""Sends args to outbuf"""
outbuf.writelines(args)
self.object_controller.__call__({'REQUEST_METHOD': 'PUT',
'SCRIPT_NAME': '',
'PATH_INFO': '/sda1/p/a/c/o',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'CONTENT_LENGTH': '0',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': inbuf,
'wsgi.errors': errbuf,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False},
start_response)
self.assertEqual(errbuf.getvalue(), '')
self.assertEqual(outbuf.getvalue()[:4], '400 ')
def test_call_not_found(self):
inbuf = WsgiBytesIO()
errbuf = StringIO()
outbuf = StringIO()
def start_response(*args):
"""Sends args to outbuf"""
outbuf.writelines(args)
self.object_controller.__call__({'REQUEST_METHOD': 'GET',
'SCRIPT_NAME': '',
'PATH_INFO': '/sda1/p/a/c/o',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'CONTENT_LENGTH': '0',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': inbuf,
'wsgi.errors': errbuf,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False},
start_response)
self.assertEqual(errbuf.getvalue(), '')
self.assertEqual(outbuf.getvalue()[:4], '404 ')
def test_call_bad_method(self):
inbuf = WsgiBytesIO()
errbuf = StringIO()
outbuf = StringIO()
def start_response(*args):
"""Sends args to outbuf"""
outbuf.writelines(args)
self.object_controller.__call__({'REQUEST_METHOD': 'INVALID',
'SCRIPT_NAME': '',
'PATH_INFO': '/sda1/p/a/c/o',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'CONTENT_LENGTH': '0',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': inbuf,
'wsgi.errors': errbuf,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False},
start_response)
self.assertEqual(errbuf.getvalue(), '')
self.assertEqual(outbuf.getvalue()[:4], '405 ')
def test_call_name_collision(self):
def my_check(*args):
return False
def my_hash_path(*args):
return md5('collide').hexdigest()
with mock.patch("swift.obj.diskfile.hash_path", my_hash_path):
with mock.patch("swift.obj.server.check_object_creation",
my_check):
inbuf = WsgiBytesIO()
errbuf = StringIO()
outbuf = StringIO()
def start_response(*args):
"""Sends args to outbuf"""
outbuf.writelines(args)
self.object_controller.__call__({
'REQUEST_METHOD': 'PUT',
'SCRIPT_NAME': '',
'PATH_INFO': '/sda1/p/a/c/o',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'CONTENT_LENGTH': '0',
'CONTENT_TYPE': 'text/html',
'HTTP_X_TIMESTAMP': normalize_timestamp(1.2),
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': inbuf,
'wsgi.errors': errbuf,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False},
start_response)
self.assertEqual(errbuf.getvalue(), '')
self.assertEqual(outbuf.getvalue()[:4], '201 ')
inbuf = WsgiBytesIO()
errbuf = StringIO()
outbuf = StringIO()
def start_response(*args):
"""Sends args to outbuf"""
outbuf.writelines(args)
self.object_controller.__call__({
'REQUEST_METHOD': 'PUT',
'SCRIPT_NAME': '',
'PATH_INFO': '/sda1/p/b/d/x',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'CONTENT_LENGTH': '0',
'CONTENT_TYPE': 'text/html',
'HTTP_X_TIMESTAMP': normalize_timestamp(1.3),
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': inbuf,
'wsgi.errors': errbuf,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False},
start_response)
self.assertEqual(errbuf.getvalue(), '')
self.assertEqual(outbuf.getvalue()[:4], '403 ')
def test_invalid_method_doesnt_exist(self):
errbuf = StringIO()
outbuf = StringIO()
def start_response(*args):
outbuf.writelines(args)
self.object_controller.__call__({
'REQUEST_METHOD': 'method_doesnt_exist',
'PATH_INFO': '/sda1/p/a/c/o'},
start_response)
self.assertEqual(errbuf.getvalue(), '')
self.assertEqual(outbuf.getvalue()[:4], '405 ')
def test_invalid_method_is_not_public(self):
errbuf = StringIO()
outbuf = StringIO()
def start_response(*args):
outbuf.writelines(args)
self.object_controller.__call__({'REQUEST_METHOD': '__init__',
'PATH_INFO': '/sda1/p/a/c/o'},
start_response)
self.assertEqual(errbuf.getvalue(), '')
self.assertEqual(outbuf.getvalue()[:4], '405 ')
def test_chunked_put(self):
listener = listen(('localhost', 0))
port = listener.getsockname()[1]
killer = spawn(wsgi.server, listener, self.object_controller,
NullLogger())
sock = connect_tcp(('localhost', port))
fd = sock.makefile()
fd.write('PUT /sda1/p/a/c/o HTTP/1.1\r\nHost: localhost\r\n'
'Content-Type: text/plain\r\n'
'Connection: close\r\nX-Timestamp: %s\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'2\r\noh\r\n4\r\n hai\r\n0\r\n\r\n' % normalize_timestamp(
1.0))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
sock = connect_tcp(('localhost', port))
fd = sock.makefile()
fd.write('GET /sda1/p/a/c/o HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
response = fd.read()
self.assertEqual(response, 'oh hai')
killer.kill()
def test_chunked_content_length_mismatch_zero(self):
listener = listen(('localhost', 0))
port = listener.getsockname()[1]
killer = spawn(wsgi.server, listener, self.object_controller,
NullLogger())
sock = connect_tcp(('localhost', port))
fd = sock.makefile()
fd.write('PUT /sda1/p/a/c/o HTTP/1.1\r\nHost: localhost\r\n'
'Content-Type: text/plain\r\n'
'Connection: close\r\nX-Timestamp: %s\r\n'
'Content-Length: 0\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'2\r\noh\r\n4\r\n hai\r\n0\r\n\r\n' % normalize_timestamp(
1.0))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
sock = connect_tcp(('localhost', port))
fd = sock.makefile()
fd.write('GET /sda1/p/a/c/o HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
response = fd.read()
self.assertEqual(response, 'oh hai')
killer.kill()
def test_max_object_name_length(self):
timestamp = normalize_timestamp(time())
max_name_len = constraints.MAX_OBJECT_NAME_LENGTH
req = Request.blank(
'/sda1/p/a/c/' + ('1' * max_name_len),
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp,
'Content-Length': '4',
'Content-Type': 'application/octet-stream'})
req.body = 'DATA'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c/' + ('2' * (max_name_len + 1)),
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp,
'Content-Length': '4',
'Content-Type': 'application/octet-stream'})
req.body = 'DATA'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 400)
def test_max_upload_time(self):
class SlowBody(object):
def __init__(self):
self.sent = 0
def read(self, size=-1):
if self.sent < 4:
sleep(0.1)
self.sent += 1
return ' '
return ''
def set_hundred_continue_response_headers(*a, **kw):
pass
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'PUT', 'wsgi.input': SlowBody()},
headers={'X-Timestamp': normalize_timestamp(time()),
'Content-Length': '4', 'Content-Type': 'text/plain'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
self.object_controller.max_upload_time = 0.1
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'PUT', 'wsgi.input': SlowBody()},
headers={'X-Timestamp': normalize_timestamp(time()),
'Content-Length': '4', 'Content-Type': 'text/plain'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 408)
def test_short_body(self):
class ShortBody(object):
def __init__(self):
self.sent = False
def read(self, size=-1):
if not self.sent:
self.sent = True
return ' '
return ''
def set_hundred_continue_response_headers(*a, **kw):
pass
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'PUT', 'wsgi.input': ShortBody()},
headers={'X-Timestamp': normalize_timestamp(time()),
'Content-Length': '4', 'Content-Type': 'text/plain'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 499)
def test_bad_sinces(self):
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(time()),
'Content-Length': '4', 'Content-Type': 'text/plain'},
body=' ')
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'},
headers={'If-Unmodified-Since': 'Not a valid date'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'},
headers={'If-Modified-Since': 'Not a valid date'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
too_big_date_list = list(datetime.datetime.max.timetuple())
too_big_date_list[0] += 1 # bump up the year
too_big_date = strftime(
"%a, %d %b %Y %H:%M:%S UTC", struct_time(too_big_date_list))
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'},
headers={'If-Unmodified-Since': too_big_date})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
def test_content_encoding(self):
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(time()),
'Content-Length': '4', 'Content-Type': 'text/plain',
'Content-Encoding': 'gzip'},
body=' ')
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['content-encoding'], 'gzip')
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['content-encoding'], 'gzip')
def test_async_update_http_connect(self):
policy = random.choice(list(POLICIES))
self._stage_tmp_dir(policy)
given_args = []
def fake_http_connect(*args):
given_args.extend(args)
raise Exception('test')
orig_http_connect = object_server.http_connect
try:
object_server.http_connect = fake_http_connect
self.object_controller.async_update(
'PUT', 'a', 'c', 'o', '127.0.0.1:1234', 1, 'sdc1',
{'x-timestamp': '1', 'x-out': 'set',
'X-Backend-Storage-Policy-Index': int(policy)}, 'sda1',
policy)
finally:
object_server.http_connect = orig_http_connect
self.assertEqual(
given_args,
['127.0.0.1', '1234', 'sdc1', 1, 'PUT', '/a/c/o', {
'x-timestamp': '1', 'x-out': 'set',
'user-agent': 'object-server %s' % os.getpid(),
'X-Backend-Storage-Policy-Index': int(policy)}])
@patch_policies([StoragePolicy(0, 'zero', True),
StoragePolicy(1, 'one'),
StoragePolicy(37, 'fantastico')])
def test_updating_multiple_delete_at_container_servers(self):
# update router post patch
self.object_controller._diskfile_router = diskfile.DiskFileRouter(
self.conf, self.object_controller.logger)
policy = random.choice(list(POLICIES))
self.object_controller.expiring_objects_account = 'exp'
self.object_controller.expiring_objects_container_divisor = 60
http_connect_args = []
def fake_http_connect(ipaddr, port, device, partition, method, path,
headers=None, query_string=None, ssl=False):
class SuccessfulFakeConn(object):
@property
def status(self):
return 200
def getresponse(self):
return self
def read(self):
return ''
captured_args = {'ipaddr': ipaddr, 'port': port,
'device': device, 'partition': partition,
'method': method, 'path': path, 'ssl': ssl,
'headers': headers, 'query_string': query_string}
http_connect_args.append(
dict((k, v) for k, v in captured_args.items()
if v is not None))
return SuccessfulFakeConn()
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': '12345',
'Content-Type': 'application/burrito',
'Content-Length': '0',
'X-Backend-Storage-Policy-Index': int(policy),
'X-Container-Partition': '20',
'X-Container-Host': '1.2.3.4:5',
'X-Container-Device': 'sdb1',
'X-Delete-At': 9999999999,
'X-Delete-At-Container': '9999999960',
'X-Delete-At-Host': "10.1.1.1:6201,10.2.2.2:6202",
'X-Delete-At-Partition': '6237',
'X-Delete-At-Device': 'sdp,sdq'})
with mock.patch.object(
object_server, 'http_connect', fake_http_connect):
with fake_spawn():
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
http_connect_args.sort(key=operator.itemgetter('ipaddr'))
self.assertEqual(len(http_connect_args), 3)
self.assertEqual(
http_connect_args[0],
{'ipaddr': '1.2.3.4',
'port': '5',
'path': '/a/c/o',
'device': 'sdb1',
'partition': '20',
'method': 'PUT',
'ssl': False,
'headers': HeaderKeyDict({
'x-content-type': 'application/burrito',
'x-etag': 'd41d8cd98f00b204e9800998ecf8427e',
'x-size': '0',
'x-timestamp': utils.Timestamp('12345').internal,
'referer': 'PUT http://localhost/sda1/p/a/c/o',
'user-agent': 'object-server %d' % os.getpid(),
'X-Backend-Storage-Policy-Index': int(policy),
'x-trans-id': '-'})})
self.assertEqual(
http_connect_args[1],
{'ipaddr': '10.1.1.1',
'port': '6201',
'path': '/exp/9999999960/9999999999-a/c/o',
'device': 'sdp',
'partition': '6237',
'method': 'PUT',
'ssl': False,
'headers': HeaderKeyDict({
'x-content-type': 'text/plain',
'x-etag': 'd41d8cd98f00b204e9800998ecf8427e',
'x-size': '0',
'x-timestamp': utils.Timestamp('12345').internal,
'referer': 'PUT http://localhost/sda1/p/a/c/o',
'user-agent': 'object-server %d' % os.getpid(),
# system account storage policy is 0
'X-Backend-Storage-Policy-Index': 0,
'x-trans-id': '-'})})
self.assertEqual(
http_connect_args[2],
{'ipaddr': '10.2.2.2',
'port': '6202',
'path': '/exp/9999999960/9999999999-a/c/o',
'device': 'sdq',
'partition': '6237',
'method': 'PUT',
'ssl': False,
'headers': HeaderKeyDict({
'x-content-type': 'text/plain',
'x-etag': 'd41d8cd98f00b204e9800998ecf8427e',
'x-size': '0',
'x-timestamp': utils.Timestamp('12345').internal,
'referer': 'PUT http://localhost/sda1/p/a/c/o',
'user-agent': 'object-server %d' % os.getpid(),
# system account storage policy is 0
'X-Backend-Storage-Policy-Index': 0,
'x-trans-id': '-'})})
@patch_policies([StoragePolicy(0, 'zero', True),
StoragePolicy(1, 'one'),
StoragePolicy(26, 'twice-thirteen')])
def test_updating_multiple_container_servers(self):
# update router post patch
self.object_controller._diskfile_router = diskfile.DiskFileRouter(
self.conf, self.object_controller.logger)
http_connect_args = []
def fake_http_connect(ipaddr, port, device, partition, method, path,
headers=None, query_string=None, ssl=False):
class SuccessfulFakeConn(object):
@property
def status(self):
return 200
def getresponse(self):
return self
def read(self):
return ''
captured_args = {'ipaddr': ipaddr, 'port': port,
'device': device, 'partition': partition,
'method': method, 'path': path, 'ssl': ssl,
'headers': headers, 'query_string': query_string}
http_connect_args.append(
dict((k, v) for k, v in captured_args.items()
if v is not None))
return SuccessfulFakeConn()
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': '12345',
'Content-Type': 'application/burrito',
'Content-Length': '0',
'X-Backend-Storage-Policy-Index': '26',
'X-Container-Partition': '20',
'X-Container-Host': '1.2.3.4:5, 6.7.8.9:10',
'X-Container-Device': 'sdb1, sdf1'})
with mock.patch.object(
object_server, 'http_connect', fake_http_connect):
with fake_spawn():
req.get_response(self.object_controller)
http_connect_args.sort(key=operator.itemgetter('ipaddr'))
self.assertEqual(len(http_connect_args), 2)
self.assertEqual(
http_connect_args[0],
{'ipaddr': '1.2.3.4',
'port': '5',
'path': '/a/c/o',
'device': 'sdb1',
'partition': '20',
'method': 'PUT',
'ssl': False,
'headers': HeaderKeyDict({
'x-content-type': 'application/burrito',
'x-etag': 'd41d8cd98f00b204e9800998ecf8427e',
'x-size': '0',
'x-timestamp': utils.Timestamp('12345').internal,
'X-Backend-Storage-Policy-Index': '26',
'referer': 'PUT http://localhost/sda1/p/a/c/o',
'user-agent': 'object-server %d' % os.getpid(),
'x-trans-id': '-'})})
self.assertEqual(
http_connect_args[1],
{'ipaddr': '6.7.8.9',
'port': '10',
'path': '/a/c/o',
'device': 'sdf1',
'partition': '20',
'method': 'PUT',
'ssl': False,
'headers': HeaderKeyDict({
'x-content-type': 'application/burrito',
'x-etag': 'd41d8cd98f00b204e9800998ecf8427e',
'x-size': '0',
'x-timestamp': utils.Timestamp('12345').internal,
'X-Backend-Storage-Policy-Index': '26',
'referer': 'PUT http://localhost/sda1/p/a/c/o',
'user-agent': 'object-server %d' % os.getpid(),
'x-trans-id': '-'})})
def test_object_delete_at_async_update(self):
policy = random.choice(list(POLICIES))
container_updates = []
def capture_updates(ip, port, method, path, headers, *args, **kwargs):
container_updates.append((ip, port, method, path, headers))
put_timestamp = next(self.ts).internal
delete_at_timestamp = utils.normalize_delete_at_timestamp(
next(self.ts).normal)
delete_at_container = (
int(delete_at_timestamp) /
self.object_controller.expiring_objects_container_divisor *
self.object_controller.expiring_objects_container_divisor)
headers = {
'Content-Type': 'text/plain',
'X-Timestamp': put_timestamp,
'X-Container-Host': '10.0.0.1:6201',
'X-Container-Device': 'sda1',
'X-Container-Partition': 'p',
'X-Delete-At': delete_at_timestamp,
'X-Delete-At-Container': delete_at_container,
'X-Delete-At-Partition': 'p',
'X-Delete-At-Host': '10.0.0.2:6202',
'X-Delete-At-Device': 'sda1',
'X-Backend-Storage-Policy-Index': int(policy)}
if policy.policy_type == EC_POLICY:
headers['X-Object-Sysmeta-Ec-Frag-Index'] = '2'
req = Request.blank(
'/sda1/p/a/c/o', method='PUT', body='', headers=headers)
with mocked_http_conn(
500, 500, give_connect=capture_updates) as fake_conn:
with fake_spawn():
resp = req.get_response(self.object_controller)
self.assertRaises(StopIteration, fake_conn.code_iter.next)
self.assertEqual(resp.status_int, 201)
self.assertEqual(2, len(container_updates))
delete_at_update, container_update = container_updates
# delete_at_update
ip, port, method, path, headers = delete_at_update
self.assertEqual(ip, '10.0.0.2')
self.assertEqual(port, '6202')
self.assertEqual(method, 'PUT')
self.assertEqual(path, '/sda1/p/.expiring_objects/%s/%s-a/c/o' %
(delete_at_container, delete_at_timestamp))
expected = {
'X-Timestamp': put_timestamp,
# system account storage policy is 0
'X-Backend-Storage-Policy-Index': 0,
}
for key, value in expected.items():
self.assertEqual(headers[key], str(value))
# container_update
ip, port, method, path, headers = container_update
self.assertEqual(ip, '10.0.0.1')
self.assertEqual(port, '6201')
self.assertEqual(method, 'PUT')
self.assertEqual(path, '/sda1/p/a/c/o')
expected = {
'X-Timestamp': put_timestamp,
'X-Backend-Storage-Policy-Index': int(policy),
}
for key, value in expected.items():
self.assertEqual(headers[key], str(value))
# check async pendings
async_dir = os.path.join(self.testdir, 'sda1',
diskfile.get_async_dir(policy))
found_files = []
for root, dirs, files in os.walk(async_dir):
for f in files:
async_file = os.path.join(root, f)
found_files.append(async_file)
data = pickle.load(open(async_file))
if data['account'] == 'a':
self.assertEqual(
int(data['headers']
['X-Backend-Storage-Policy-Index']), int(policy))
elif data['account'] == '.expiring_objects':
self.assertEqual(
int(data['headers']
['X-Backend-Storage-Policy-Index']), 0)
else:
self.fail('unexpected async pending data')
self.assertEqual(2, len(found_files))
def test_async_update_saves_on_exception(self):
policy = random.choice(list(POLICIES))
self._stage_tmp_dir(policy)
_prefix = utils.HASH_PATH_PREFIX
utils.HASH_PATH_PREFIX = ''
def fake_http_connect(*args):
raise Exception('test')
orig_http_connect = object_server.http_connect
try:
object_server.http_connect = fake_http_connect
self.object_controller.async_update(
'PUT', 'a', 'c', 'o', '127.0.0.1:1234', 1, 'sdc1',
{'x-timestamp': '1', 'x-out': 'set',
'X-Backend-Storage-Policy-Index': int(policy)}, 'sda1',
policy)
finally:
object_server.http_connect = orig_http_connect
utils.HASH_PATH_PREFIX = _prefix
async_dir = diskfile.get_async_dir(policy)
self.assertEqual(
pickle.load(open(os.path.join(
self.testdir, 'sda1', async_dir, 'a83',
'06fbf0b514e5199dfc4e00f42eb5ea83-%s' %
utils.Timestamp(1).internal))),
{'headers': {'x-timestamp': '1', 'x-out': 'set',
'user-agent': 'object-server %s' % os.getpid(),
'X-Backend-Storage-Policy-Index': int(policy)},
'account': 'a', 'container': 'c', 'obj': 'o', 'op': 'PUT'})
def test_async_update_saves_on_non_2xx(self):
policy = random.choice(list(POLICIES))
self._stage_tmp_dir(policy)
_prefix = utils.HASH_PATH_PREFIX
utils.HASH_PATH_PREFIX = ''
def fake_http_connect(status):
class FakeConn(object):
def __init__(self, status):
self.status = status
def getresponse(self):
return self
def read(self):
return ''
return lambda *args: FakeConn(status)
orig_http_connect = object_server.http_connect
try:
for status in (199, 300, 503):
object_server.http_connect = fake_http_connect(status)
self.object_controller.async_update(
'PUT', 'a', 'c', 'o', '127.0.0.1:1234', 1, 'sdc1',
{'x-timestamp': '1', 'x-out': str(status),
'X-Backend-Storage-Policy-Index': int(policy)}, 'sda1',
policy)
async_dir = diskfile.get_async_dir(policy)
self.assertEqual(
pickle.load(open(os.path.join(
self.testdir, 'sda1', async_dir, 'a83',
'06fbf0b514e5199dfc4e00f42eb5ea83-%s' %
utils.Timestamp(1).internal))),
{'headers': {'x-timestamp': '1', 'x-out': str(status),
'user-agent':
'object-server %s' % os.getpid(),
'X-Backend-Storage-Policy-Index':
int(policy)},
'account': 'a', 'container': 'c', 'obj': 'o',
'op': 'PUT'})
finally:
object_server.http_connect = orig_http_connect
utils.HASH_PATH_PREFIX = _prefix
def test_async_update_does_not_save_on_2xx(self):
_prefix = utils.HASH_PATH_PREFIX
utils.HASH_PATH_PREFIX = ''
def fake_http_connect(status):
class FakeConn(object):
def __init__(self, status):
self.status = status
def getresponse(self):
return self
def read(self):
return ''
return lambda *args: FakeConn(status)
orig_http_connect = object_server.http_connect
try:
for status in (200, 299):
object_server.http_connect = fake_http_connect(status)
self.object_controller.async_update(
'PUT', 'a', 'c', 'o', '127.0.0.1:1234', 1, 'sdc1',
{'x-timestamp': '1', 'x-out': str(status)}, 'sda1', 0)
self.assertFalse(
os.path.exists(os.path.join(
self.testdir, 'sda1', 'async_pending', 'a83',
'06fbf0b514e5199dfc4e00f42eb5ea83-0000000001.00000')))
finally:
object_server.http_connect = orig_http_connect
utils.HASH_PATH_PREFIX = _prefix
def test_async_update_saves_on_timeout(self):
policy = random.choice(list(POLICIES))
self._stage_tmp_dir(policy)
_prefix = utils.HASH_PATH_PREFIX
utils.HASH_PATH_PREFIX = ''
def fake_http_connect():
class FakeConn(object):
def getresponse(self):
return sleep(1)
return lambda *args: FakeConn()
orig_http_connect = object_server.http_connect
try:
for status in (200, 299):
object_server.http_connect = fake_http_connect()
self.object_controller.node_timeout = 0.001
self.object_controller.async_update(
'PUT', 'a', 'c', 'o', '127.0.0.1:1234', 1, 'sdc1',
{'x-timestamp': '1', 'x-out': str(status)}, 'sda1',
policy)
async_dir = diskfile.get_async_dir(policy)
self.assertTrue(
os.path.exists(os.path.join(
self.testdir, 'sda1', async_dir, 'a83',
'06fbf0b514e5199dfc4e00f42eb5ea83-%s' %
utils.Timestamp(1).internal)))
finally:
object_server.http_connect = orig_http_connect
utils.HASH_PATH_PREFIX = _prefix
def test_container_update_no_async_update(self):
policy = random.choice(list(POLICIES))
given_args = []
def fake_async_update(*args):
given_args.extend(args)
self.object_controller.async_update = fake_async_update
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': 1,
'X-Trans-Id': '1234',
'X-Backend-Storage-Policy-Index': int(policy)})
self.object_controller.container_update(
'PUT', 'a', 'c', 'o', req, {
'x-size': '0', 'x-etag': 'd41d8cd98f00b204e9800998ecf8427e',
'x-content-type': 'text/plain', 'x-timestamp': '1'},
'sda1', policy)
self.assertEqual(given_args, [])
def test_container_update_success(self):
container_updates = []
def capture_updates(ip, port, method, path, headers, *args, **kwargs):
container_updates.append((ip, port, method, path, headers))
req = Request.blank(
'/sda1/0/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': 1,
'X-Trans-Id': '123',
'X-Container-Host': 'chost:cport',
'X-Container-Partition': 'cpartition',
'X-Container-Device': 'cdevice',
'Content-Type': 'text/plain'}, body='')
with mocked_http_conn(200, give_connect=capture_updates) as fake_conn:
with fake_spawn():
resp = req.get_response(self.object_controller)
self.assertRaises(StopIteration, fake_conn.code_iter.next)
self.assertEqual(resp.status_int, 201)
self.assertEqual(len(container_updates), 1)
ip, port, method, path, headers = container_updates[0]
self.assertEqual(ip, 'chost')
self.assertEqual(port, 'cport')
self.assertEqual(method, 'PUT')
self.assertEqual(path, '/cdevice/cpartition/a/c/o')
self.assertEqual(headers, HeaderKeyDict({
'user-agent': 'object-server %s' % os.getpid(),
'x-size': '0',
'x-etag': 'd41d8cd98f00b204e9800998ecf8427e',
'x-content-type': 'text/plain',
'x-timestamp': utils.Timestamp(1).internal,
'X-Backend-Storage-Policy-Index': '0', # default when not given
'x-trans-id': '123',
'referer': 'PUT http://localhost/sda1/0/a/c/o'}))
def test_PUT_container_update_overrides(self):
def do_test(override_headers):
container_updates = []
def capture_updates(
ip, port, method, path, headers, *args, **kwargs):
container_updates.append((ip, port, method, path, headers))
ts_put = next(self.ts)
headers = {
'X-Timestamp': ts_put.internal,
'X-Trans-Id': '123',
'X-Container-Host': 'chost:cport',
'X-Container-Partition': 'cpartition',
'X-Container-Device': 'cdevice',
'Content-Type': 'text/plain',
}
headers.update(override_headers)
req = Request.blank('/sda1/0/a/c/o', method='PUT',
headers=headers, body='')
with mocked_http_conn(
200, give_connect=capture_updates) as fake_conn:
with fake_spawn():
resp = req.get_response(self.object_controller)
self.assertRaises(StopIteration, fake_conn.code_iter.next)
self.assertEqual(resp.status_int, 201)
self.assertEqual(len(container_updates), 1)
ip, port, method, path, headers = container_updates[0]
self.assertEqual(ip, 'chost')
self.assertEqual(port, 'cport')
self.assertEqual(method, 'PUT')
self.assertEqual(path, '/cdevice/cpartition/a/c/o')
self.assertEqual(headers, HeaderKeyDict({
'user-agent': 'object-server %s' % os.getpid(),
'x-size': '0',
'x-etag': 'override_etag',
'x-content-type': 'override_val',
'x-timestamp': ts_put.internal,
'X-Backend-Storage-Policy-Index': '0', # default
'x-trans-id': '123',
'referer': 'PUT http://localhost/sda1/0/a/c/o',
'x-foo': 'bar'}))
# EC policy override headers
do_test({
'X-Backend-Container-Update-Override-Etag': 'override_etag',
'X-Backend-Container-Update-Override-Content-Type': 'override_val',
'X-Backend-Container-Update-Override-Foo': 'bar',
'X-Backend-Container-Ignored': 'ignored'})
# middleware override headers
do_test({
'X-Object-Sysmeta-Container-Update-Override-Etag': 'override_etag',
'X-Object-Sysmeta-Container-Update-Override-Content-Type':
'override_val',
'X-Object-Sysmeta-Container-Update-Override-Foo': 'bar',
'X-Object-Sysmeta-Ignored': 'ignored'})
# middleware override headers take precedence over EC policy headers
do_test({
'X-Object-Sysmeta-Container-Update-Override-Etag': 'override_etag',
'X-Object-Sysmeta-Container-Update-Override-Content-Type':
'override_val',
'X-Object-Sysmeta-Container-Update-Override-Foo': 'bar',
'X-Backend-Container-Update-Override-Etag': 'ignored',
'X-Backend-Container-Update-Override-Content-Type': 'ignored',
'X-Backend-Container-Update-Override-Foo': 'ignored'})
def test_container_update_async(self):
policy = random.choice(list(POLICIES))
req = Request.blank(
'/sda1/0/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': 1,
'X-Trans-Id': '123',
'X-Container-Host': 'chost:cport',
'X-Container-Partition': 'cpartition',
'X-Container-Device': 'cdevice',
'Content-Type': 'text/plain',
'X-Object-Sysmeta-Ec-Frag-Index': 0,
'X-Backend-Storage-Policy-Index': int(policy)}, body='')
given_args = []
def fake_pickle_async_update(*args):
given_args[:] = args
diskfile_mgr = self.object_controller._diskfile_router[policy]
diskfile_mgr.pickle_async_update = fake_pickle_async_update
with mocked_http_conn(500) as fake_conn, fake_spawn():
resp = req.get_response(self.object_controller)
self.assertRaises(StopIteration, fake_conn.code_iter.next)
self.assertEqual(resp.status_int, 201)
self.assertEqual(len(given_args), 7)
(objdevice, account, container, obj, data, timestamp,
policy) = given_args
self.assertEqual(objdevice, 'sda1')
self.assertEqual(account, 'a')
self.assertEqual(container, 'c')
self.assertEqual(obj, 'o')
self.assertEqual(timestamp, utils.Timestamp(1).internal)
self.assertEqual(policy, policy)
self.assertEqual(data, {
'headers': HeaderKeyDict({
'X-Size': '0',
'User-Agent': 'object-server %s' % os.getpid(),
'X-Content-Type': 'text/plain',
'X-Timestamp': utils.Timestamp(1).internal,
'X-Trans-Id': '123',
'Referer': 'PUT http://localhost/sda1/0/a/c/o',
'X-Backend-Storage-Policy-Index': int(policy),
'X-Etag': 'd41d8cd98f00b204e9800998ecf8427e'}),
'obj': 'o',
'account': 'a',
'container': 'c',
'op': 'PUT'})
def test_container_update_as_greenthread(self):
greenthreads = []
saved_spawn_calls = []
called_async_update_args = []
def local_fake_spawn(func, *a, **kw):
saved_spawn_calls.append((func, a, kw))
return mock.MagicMock()
def local_fake_async_update(*a, **kw):
# just capture the args to see that we would have called
called_async_update_args.append([a, kw])
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': '12345',
'Content-Type': 'application/burrito',
'Content-Length': '0',
'X-Backend-Storage-Policy-Index': 0,
'X-Container-Partition': '20',
'X-Container-Host': '1.2.3.4:5',
'X-Container-Device': 'sdb1'})
with mock.patch.object(object_server, 'spawn',
local_fake_spawn):
with mock.patch.object(self.object_controller,
'async_update',
local_fake_async_update):
resp = req.get_response(self.object_controller)
# check the response is completed and successful
self.assertEqual(resp.status_int, 201)
# check that async_update hasn't been called
self.assertFalse(len(called_async_update_args))
# now do the work in greenthreads
for func, a, kw in saved_spawn_calls:
gt = spawn(func, *a, **kw)
greenthreads.append(gt)
# wait for the greenthreads to finish
for gt in greenthreads:
gt.wait()
# check that the calls to async_update have happened
headers_out = {'X-Size': '0',
'X-Content-Type': 'application/burrito',
'X-Timestamp': '0000012345.00000',
'X-Trans-Id': '-',
'Referer': 'PUT http://localhost/sda1/p/a/c/o',
'X-Backend-Storage-Policy-Index': '0',
'X-Etag': 'd41d8cd98f00b204e9800998ecf8427e'}
expected = [('PUT', 'a', 'c', 'o', '1.2.3.4:5', '20', 'sdb1',
headers_out, 'sda1', POLICIES[0]),
{'logger_thread_locals': (None, None)}]
self.assertEqual(called_async_update_args, [expected])
def test_container_update_as_greenthread_with_timeout(self):
'''
give it one container to update (for only one greenthred)
fake the greenthred so it will raise a timeout
test that the right message is logged and the method returns None
'''
called_async_update_args = []
def local_fake_spawn(func, *a, **kw):
m = mock.MagicMock()
def wait_with_error():
raise Timeout()
m.wait = wait_with_error # because raise can't be in a lambda
return m
def local_fake_async_update(*a, **kw):
# just capture the args to see that we would have called
called_async_update_args.append([a, kw])
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': '12345',
'Content-Type': 'application/burrito',
'Content-Length': '0',
'X-Backend-Storage-Policy-Index': 0,
'X-Container-Partition': '20',
'X-Container-Host': '1.2.3.4:5',
'X-Container-Device': 'sdb1'})
with mock.patch.object(object_server, 'spawn',
local_fake_spawn):
with mock.patch.object(self.object_controller,
'container_update_timeout',
1.414213562):
resp = req.get_response(self.object_controller)
# check the response is completed and successful
self.assertEqual(resp.status_int, 201)
# check that the timeout was logged
expected_logged_error = "Container update timeout (1.4142s) " \
"waiting for [('1.2.3.4:5', 'sdb1')]"
self.assertTrue(
expected_logged_error in
self.object_controller.logger.get_lines_for_level('debug'))
def test_container_update_bad_args(self):
policy = random.choice(list(POLICIES))
given_args = []
def fake_async_update(*args):
given_args.extend(args)
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': 1,
'X-Trans-Id': '123',
'X-Container-Host': 'chost,badhost',
'X-Container-Partition': 'cpartition',
'X-Container-Device': 'cdevice',
'X-Backend-Storage-Policy-Index': int(policy)})
with mock.patch.object(self.object_controller, 'async_update',
fake_async_update):
self.object_controller.container_update(
'PUT', 'a', 'c', 'o', req, {
'x-size': '0',
'x-etag': 'd41d8cd98f00b204e9800998ecf8427e',
'x-content-type': 'text/plain', 'x-timestamp': '1'},
'sda1', policy)
self.assertEqual(given_args, [])
errors = self.object_controller.logger.get_lines_for_level('error')
self.assertEqual(len(errors), 1)
msg = errors[0]
self.assertTrue('Container update failed' in msg)
self.assertTrue('different numbers of hosts and devices' in msg)
self.assertTrue('chost,badhost' in msg)
self.assertTrue('cdevice' in msg)
def test_delete_at_update_on_put(self):
# Test how delete_at_update works when issued a delete for old
# expiration info after a new put with no new expiration info.
policy = random.choice(list(POLICIES))
given_args = []
def fake_async_update(*args):
given_args.extend(args)
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': 1,
'X-Trans-Id': '123',
'X-Backend-Storage-Policy-Index': int(policy)})
with mock.patch.object(self.object_controller, 'async_update',
fake_async_update):
self.object_controller.delete_at_update(
'DELETE', 2, 'a', 'c', 'o', req, 'sda1', policy)
self.assertEqual(
given_args, [
'DELETE', '.expiring_objects', '0000000000',
'0000000002-a/c/o', None, None, None,
HeaderKeyDict({
'X-Backend-Storage-Policy-Index': 0,
'x-timestamp': utils.Timestamp('1').internal,
'x-trans-id': '123',
'referer': 'PUT http://localhost/v1/a/c/o'}),
'sda1', policy])
def test_delete_at_negative(self):
# Test how delete_at_update works when issued a delete for old
# expiration info after a new put with no new expiration info.
# Test negative is reset to 0
policy = random.choice(list(POLICIES))
given_args = []
def fake_async_update(*args):
given_args.extend(args)
self.object_controller.async_update = fake_async_update
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': 1,
'X-Trans-Id': '1234', 'X-Backend-Storage-Policy-Index':
int(policy)})
self.object_controller.delete_at_update(
'DELETE', -2, 'a', 'c', 'o', req, 'sda1', policy)
self.assertEqual(given_args, [
'DELETE', '.expiring_objects', '0000000000', '0000000000-a/c/o',
None, None, None,
HeaderKeyDict({
# the expiring objects account is always 0
'X-Backend-Storage-Policy-Index': 0,
'x-timestamp': utils.Timestamp('1').internal,
'x-trans-id': '1234',
'referer': 'PUT http://localhost/v1/a/c/o'}),
'sda1', policy])
def test_delete_at_cap(self):
# Test how delete_at_update works when issued a delete for old
# expiration info after a new put with no new expiration info.
# Test past cap is reset to cap
policy = random.choice(list(POLICIES))
given_args = []
def fake_async_update(*args):
given_args.extend(args)
self.object_controller.async_update = fake_async_update
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': 1,
'X-Trans-Id': '1234',
'X-Backend-Storage-Policy-Index': int(policy)})
self.object_controller.delete_at_update(
'DELETE', 12345678901, 'a', 'c', 'o', req, 'sda1', policy)
expiring_obj_container = given_args.pop(2)
expected_exp_cont = utils.get_expirer_container(
utils.normalize_delete_at_timestamp(12345678901),
86400, 'a', 'c', 'o')
self.assertEqual(expiring_obj_container, expected_exp_cont)
self.assertEqual(given_args, [
'DELETE', '.expiring_objects', '9999999999-a/c/o',
None, None, None,
HeaderKeyDict({
'X-Backend-Storage-Policy-Index': 0,
'x-timestamp': utils.Timestamp('1').internal,
'x-trans-id': '1234',
'referer': 'PUT http://localhost/v1/a/c/o'}),
'sda1', policy])
def test_delete_at_update_put_with_info(self):
# Keep next test,
# test_delete_at_update_put_with_info_but_missing_container, in sync
# with this one but just missing the X-Delete-At-Container header.
policy = random.choice(list(POLICIES))
given_args = []
def fake_async_update(*args):
given_args.extend(args)
self.object_controller.async_update = fake_async_update
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': 1,
'X-Trans-Id': '1234',
'X-Delete-At-Container': '0',
'X-Delete-At-Host': '127.0.0.1:1234',
'X-Delete-At-Partition': '3',
'X-Delete-At-Device': 'sdc1',
'X-Backend-Storage-Policy-Index': int(policy)})
self.object_controller.delete_at_update('PUT', 2, 'a', 'c', 'o',
req, 'sda1', policy)
self.assertEqual(
given_args, [
'PUT', '.expiring_objects', '0000000000', '0000000002-a/c/o',
'127.0.0.1:1234',
'3', 'sdc1', HeaderKeyDict({
# the .expiring_objects account is always policy-0
'X-Backend-Storage-Policy-Index': 0,
'x-size': '0',
'x-etag': 'd41d8cd98f00b204e9800998ecf8427e',
'x-content-type': 'text/plain',
'x-timestamp': utils.Timestamp('1').internal,
'x-trans-id': '1234',
'referer': 'PUT http://localhost/v1/a/c/o'}),
'sda1', policy])
def test_delete_at_update_put_with_info_but_missing_container(self):
# Same as previous test, test_delete_at_update_put_with_info, but just
# missing the X-Delete-At-Container header.
policy = random.choice(list(POLICIES))
given_args = []
def fake_async_update(*args):
given_args.extend(args)
self.object_controller.async_update = fake_async_update
self.object_controller.logger = self.logger
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': 1,
'X-Trans-Id': '1234',
'X-Delete-At-Host': '127.0.0.1:1234',
'X-Delete-At-Partition': '3',
'X-Delete-At-Device': 'sdc1',
'X-Backend-Storage-Policy-Index': int(policy)})
self.object_controller.delete_at_update('PUT', 2, 'a', 'c', 'o',
req, 'sda1', policy)
self.assertEqual(
self.logger.get_lines_for_level('warning'),
['X-Delete-At-Container header must be specified for expiring '
'objects background PUT to work properly. Making best guess as '
'to the container name for now.'])
def test_delete_at_update_delete(self):
policy = random.choice(list(POLICIES))
given_args = []
def fake_async_update(*args):
given_args.extend(args)
self.object_controller.async_update = fake_async_update
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': 1,
'X-Trans-Id': '1234',
'X-Backend-Storage-Policy-Index': int(policy)})
self.object_controller.delete_at_update('DELETE', 2, 'a', 'c', 'o',
req, 'sda1', policy)
self.assertEqual(
given_args, [
'DELETE', '.expiring_objects', '0000000000',
'0000000002-a/c/o', None, None,
None, HeaderKeyDict({
'X-Backend-Storage-Policy-Index': 0,
'x-timestamp': utils.Timestamp('1').internal,
'x-trans-id': '1234',
'referer': 'DELETE http://localhost/v1/a/c/o'}),
'sda1', policy])
def test_delete_backend_replication(self):
# If X-Backend-Replication: True delete_at_update should completely
# short-circuit.
policy = random.choice(list(POLICIES))
given_args = []
def fake_async_update(*args):
given_args.extend(args)
self.object_controller.async_update = fake_async_update
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': 1,
'X-Trans-Id': '1234',
'X-Backend-Replication': 'True',
'X-Backend-Storage-Policy-Index': int(policy)})
self.object_controller.delete_at_update(
'DELETE', -2, 'a', 'c', 'o', req, 'sda1', policy)
self.assertEqual(given_args, [])
def test_POST_calls_delete_at(self):
policy = random.choice(list(POLICIES))
given_args = []
def fake_delete_at_update(*args):
given_args.extend(args)
self.object_controller.delete_at_update = fake_delete_at_update
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(time()),
'Content-Length': '4',
'Content-Type': 'application/octet-stream',
'X-Backend-Storage-Policy-Index': int(policy),
'X-Object-Sysmeta-Ec-Frag-Index': 2})
req.body = 'TEST'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
self.assertEqual(given_args, [])
sleep(.00001)
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': normalize_timestamp(time()),
'Content-Type': 'application/x-test',
'X-Backend-Storage-Policy-Index': int(policy)})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 202)
self.assertEqual(given_args, [])
sleep(.00001)
timestamp1 = normalize_timestamp(time())
delete_at_timestamp1 = str(int(time() + 1000))
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': timestamp1,
'Content-Type': 'application/x-test',
'X-Delete-At': delete_at_timestamp1,
'X-Backend-Storage-Policy-Index': int(policy)})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 202)
self.assertEqual(
given_args, [
'PUT', int(delete_at_timestamp1), 'a', 'c', 'o',
given_args[5], 'sda1', policy])
while given_args:
given_args.pop()
sleep(.00001)
timestamp2 = normalize_timestamp(time())
delete_at_timestamp2 = str(int(time() + 2000))
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': timestamp2,
'Content-Type': 'application/x-test',
'X-Delete-At': delete_at_timestamp2,
'X-Backend-Storage-Policy-Index': int(policy)})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 202)
self.assertEqual(
given_args, [
'PUT', int(delete_at_timestamp2), 'a', 'c', 'o',
given_args[5], 'sda1', policy,
'DELETE', int(delete_at_timestamp1), 'a', 'c', 'o',
given_args[5], 'sda1', policy])
def test_PUT_calls_delete_at(self):
policy = random.choice(list(POLICIES))
given_args = []
def fake_delete_at_update(*args):
given_args.extend(args)
self.object_controller.delete_at_update = fake_delete_at_update
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(time()),
'Content-Length': '4',
'Content-Type': 'application/octet-stream',
'X-Backend-Storage-Policy-Index': int(policy),
'X-Object-Sysmeta-Ec-Frag-Index': 4})
req.body = 'TEST'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
self.assertEqual(given_args, [])
sleep(.00001)
timestamp1 = normalize_timestamp(time())
delete_at_timestamp1 = str(int(time() + 1000))
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp1,
'Content-Length': '4',
'Content-Type': 'application/octet-stream',
'X-Delete-At': delete_at_timestamp1,
'X-Backend-Storage-Policy-Index': int(policy),
'X-Object-Sysmeta-Ec-Frag-Index': 3})
req.body = 'TEST'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
self.assertEqual(
given_args, [
'PUT', int(delete_at_timestamp1), 'a', 'c', 'o',
given_args[5], 'sda1', policy])
while given_args:
given_args.pop()
sleep(.00001)
timestamp2 = normalize_timestamp(time())
delete_at_timestamp2 = str(int(time() + 2000))
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp2,
'Content-Length': '4',
'Content-Type': 'application/octet-stream',
'X-Delete-At': delete_at_timestamp2,
'X-Backend-Storage-Policy-Index': int(policy),
'X-Object-Sysmeta-Ec-Frag-Index': 3})
req.body = 'TEST'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
self.assertEqual(
given_args, [
'PUT', int(delete_at_timestamp2), 'a', 'c', 'o',
given_args[5], 'sda1', policy,
'DELETE', int(delete_at_timestamp1), 'a', 'c', 'o',
given_args[5], 'sda1', policy])
def test_GET_but_expired(self):
test_time = time() + 10000
delete_at_timestamp = int(test_time + 100)
delete_at_container = str(
delete_at_timestamp /
self.object_controller.expiring_objects_container_divisor *
self.object_controller.expiring_objects_container_divisor)
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(test_time - 2000),
'X-Delete-At': str(delete_at_timestamp),
'X-Delete-At-Container': delete_at_container,
'Content-Length': '4',
'Content-Type': 'application/octet-stream'})
req.body = 'TEST'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'},
headers={'X-Timestamp': normalize_timestamp(test_time)})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
orig_time = object_server.time.time
try:
t = time()
object_server.time.time = lambda: t
delete_at_timestamp = int(t + 1)
delete_at_container = str(
delete_at_timestamp /
self.object_controller.expiring_objects_container_divisor *
self.object_controller.expiring_objects_container_divisor)
put_timestamp = normalize_timestamp(test_time - 1000)
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': put_timestamp,
'X-Delete-At': str(delete_at_timestamp),
'X-Delete-At-Container': delete_at_container,
'Content-Length': '4',
'Content-Type': 'application/octet-stream'})
req.body = 'TEST'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'GET'},
headers={'X-Timestamp': normalize_timestamp(test_time)})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
finally:
object_server.time.time = orig_time
orig_time = object_server.time.time
try:
t = time() + 2
object_server.time.time = lambda: t
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'GET'},
headers={'X-Timestamp': normalize_timestamp(t)})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 404)
self.assertEqual(resp.headers['X-Backend-Timestamp'],
utils.Timestamp(put_timestamp))
finally:
object_server.time.time = orig_time
def test_HEAD_but_expired(self):
test_time = time() + 10000
delete_at_timestamp = int(test_time + 100)
delete_at_container = str(
delete_at_timestamp /
self.object_controller.expiring_objects_container_divisor *
self.object_controller.expiring_objects_container_divisor)
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(test_time - 2000),
'X-Delete-At': str(delete_at_timestamp),
'X-Delete-At-Container': delete_at_container,
'Content-Length': '4',
'Content-Type': 'application/octet-stream'})
req.body = 'TEST'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'HEAD'},
headers={'X-Timestamp': normalize_timestamp(test_time)})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
orig_time = object_server.time.time
try:
t = time()
delete_at_timestamp = int(t + 1)
delete_at_container = str(
delete_at_timestamp /
self.object_controller.expiring_objects_container_divisor *
self.object_controller.expiring_objects_container_divisor)
object_server.time.time = lambda: t
put_timestamp = normalize_timestamp(test_time - 1000)
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': put_timestamp,
'X-Delete-At': str(delete_at_timestamp),
'X-Delete-At-Container': delete_at_container,
'Content-Length': '4',
'Content-Type': 'application/octet-stream'})
req.body = 'TEST'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'HEAD'},
headers={'X-Timestamp': normalize_timestamp(test_time)})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
finally:
object_server.time.time = orig_time
orig_time = object_server.time.time
try:
t = time() + 2
object_server.time.time = lambda: t
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'HEAD'},
headers={'X-Timestamp': normalize_timestamp(time())})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 404)
self.assertEqual(resp.headers['X-Backend-Timestamp'],
utils.Timestamp(put_timestamp))
finally:
object_server.time.time = orig_time
def test_POST_but_expired(self):
test_time = time() + 10000
delete_at_timestamp = int(test_time + 100)
delete_at_container = str(
delete_at_timestamp /
self.object_controller.expiring_objects_container_divisor *
self.object_controller.expiring_objects_container_divisor)
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(test_time - 2000),
'X-Delete-At': str(delete_at_timestamp),
'X-Delete-At-Container': delete_at_container,
'Content-Length': '4',
'Content-Type': 'application/octet-stream'})
req.body = 'TEST'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': normalize_timestamp(test_time - 1500)})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 202)
delete_at_timestamp = int(time() + 1)
delete_at_container = str(
delete_at_timestamp /
self.object_controller.expiring_objects_container_divisor *
self.object_controller.expiring_objects_container_divisor)
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(test_time - 1000),
'X-Delete-At': str(delete_at_timestamp),
'X-Delete-At-Container': delete_at_container,
'Content-Length': '4',
'Content-Type': 'application/octet-stream'})
req.body = 'TEST'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
orig_time = object_server.time.time
try:
t = time() + 2
object_server.time.time = lambda: t
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': normalize_timestamp(time())})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 404)
finally:
object_server.time.time = orig_time
def test_DELETE_but_expired(self):
test_time = time() + 10000
delete_at_timestamp = int(test_time + 100)
delete_at_container = str(
delete_at_timestamp /
self.object_controller.expiring_objects_container_divisor *
self.object_controller.expiring_objects_container_divisor)
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(test_time - 2000),
'X-Delete-At': str(delete_at_timestamp),
'X-Delete-At-Container': delete_at_container,
'Content-Length': '4',
'Content-Type': 'application/octet-stream'})
req.body = 'TEST'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
orig_time = object_server.time.time
try:
t = test_time + 100
object_server.time.time = lambda: float(t)
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': normalize_timestamp(time())})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 404)
finally:
object_server.time.time = orig_time
def test_DELETE_if_delete_at_expired_still_deletes(self):
test_time = time() + 10
test_timestamp = normalize_timestamp(test_time)
delete_at_time = int(test_time + 10)
delete_at_timestamp = str(delete_at_time)
delete_at_container = str(
delete_at_time /
self.object_controller.expiring_objects_container_divisor *
self.object_controller.expiring_objects_container_divisor)
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': test_timestamp,
'X-Delete-At': delete_at_timestamp,
'X-Delete-At-Container': delete_at_container,
'Content-Length': '4',
'Content-Type': 'application/octet-stream'})
req.body = 'TEST'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
# sanity
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'},
headers={'X-Timestamp': test_timestamp})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body, 'TEST')
objfile = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.get_data_dir(POLICIES[0]), 'p',
hash_path('a', 'c', 'o')),
utils.Timestamp(test_timestamp).internal + '.data')
self.assertTrue(os.path.isfile(objfile))
# move time past expirery
with mock.patch('swift.obj.diskfile.time') as mock_time:
mock_time.time.return_value = test_time + 100
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'},
headers={'X-Timestamp': test_timestamp})
resp = req.get_response(self.object_controller)
# request will 404
self.assertEqual(resp.status_int, 404)
# but file still exists
self.assertTrue(os.path.isfile(objfile))
# make the x-if-delete-at with some wrong bits
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': delete_at_timestamp,
'X-If-Delete-At': int(time() + 1)})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 412)
self.assertTrue(os.path.isfile(objfile))
# make the x-if-delete-at with all the right bits
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': delete_at_timestamp,
'X-If-Delete-At': delete_at_timestamp})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 204)
self.assertFalse(os.path.isfile(objfile))
# make the x-if-delete-at with all the right bits (again)
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': delete_at_timestamp,
'X-If-Delete-At': delete_at_timestamp})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 412)
self.assertFalse(os.path.isfile(objfile))
# make the x-if-delete-at for some not found
req = Request.blank(
'/sda1/p/a/c/o-not-found',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': delete_at_timestamp,
'X-If-Delete-At': delete_at_timestamp})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 404)
def test_DELETE_if_delete_at(self):
test_time = time() + 10000
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(test_time - 99),
'Content-Length': '4',
'Content-Type': 'application/octet-stream'})
req.body = 'TEST'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': normalize_timestamp(test_time - 98)})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 204)
delete_at_timestamp = int(test_time - 1)
delete_at_container = str(
delete_at_timestamp /
self.object_controller.expiring_objects_container_divisor *
self.object_controller.expiring_objects_container_divisor)
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(test_time - 97),
'X-Delete-At': str(delete_at_timestamp),
'X-Delete-At-Container': delete_at_container,
'Content-Length': '4',
'Content-Type': 'application/octet-stream'})
req.body = 'TEST'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': normalize_timestamp(test_time - 95),
'X-If-Delete-At': str(int(test_time))})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 412)
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': normalize_timestamp(test_time - 95)})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 204)
delete_at_timestamp = int(test_time - 1)
delete_at_container = str(
delete_at_timestamp /
self.object_controller.expiring_objects_container_divisor *
self.object_controller.expiring_objects_container_divisor)
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(test_time - 94),
'X-Delete-At': str(delete_at_timestamp),
'X-Delete-At-Container': delete_at_container,
'Content-Length': '4',
'Content-Type': 'application/octet-stream'})
req.body = 'TEST'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': normalize_timestamp(test_time - 92),
'X-If-Delete-At': str(int(test_time))})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 412)
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': normalize_timestamp(test_time - 92),
'X-If-Delete-At': delete_at_timestamp})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 204)
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': normalize_timestamp(test_time - 92),
'X-If-Delete-At': 'abc'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 400)
def test_DELETE_calls_delete_at(self):
given_args = []
def fake_delete_at_update(*args):
given_args.extend(args)
self.object_controller.delete_at_update = fake_delete_at_update
timestamp1 = normalize_timestamp(time())
delete_at_timestamp1 = int(time() + 1000)
delete_at_container1 = str(
delete_at_timestamp1 /
self.object_controller.expiring_objects_container_divisor *
self.object_controller.expiring_objects_container_divisor)
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp1,
'Content-Length': '4',
'Content-Type': 'application/octet-stream',
'X-Delete-At': str(delete_at_timestamp1),
'X-Delete-At-Container': delete_at_container1})
req.body = 'TEST'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
self.assertEqual(given_args, [
'PUT', int(delete_at_timestamp1), 'a', 'c', 'o',
given_args[5], 'sda1', POLICIES[0]])
while given_args:
given_args.pop()
sleep(.00001)
timestamp2 = normalize_timestamp(time())
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': timestamp2,
'Content-Type': 'application/octet-stream'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(given_args, [
'DELETE', int(delete_at_timestamp1), 'a', 'c', 'o',
given_args[5], 'sda1', POLICIES[0]])
def test_PUT_delete_at_in_past(self):
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(time()),
'X-Delete-At': str(int(time() - 1)),
'Content-Length': '4',
'Content-Type': 'application/octet-stream'})
req.body = 'TEST'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 400)
self.assertTrue('X-Delete-At in past' in resp.body)
def test_POST_delete_at_in_past(self):
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(time()),
'Content-Length': '4',
'Content-Type': 'application/octet-stream'})
req.body = 'TEST'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': normalize_timestamp(time() + 1),
'X-Delete-At': str(int(time() - 1))})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 400)
self.assertTrue('X-Delete-At in past' in resp.body)
def test_REPLICATE_works(self):
def fake_get_hashes(*args, **kwargs):
return 0, {1: 2}
def my_tpool_execute(func, *args, **kwargs):
return func(*args, **kwargs)
was_get_hashes = diskfile.DiskFileManager._get_hashes
was_tpool_exe = tpool.execute
try:
diskfile.DiskFileManager._get_hashes = fake_get_hashes
tpool.execute = my_tpool_execute
req = Request.blank('/sda1/p/suff',
environ={'REQUEST_METHOD': 'REPLICATE'},
headers={})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
p_data = pickle.loads(resp.body)
self.assertEqual(p_data, {1: 2})
finally:
tpool.execute = was_tpool_exe
diskfile.DiskFileManager._get_hashes = was_get_hashes
def test_REPLICATE_timeout(self):
def fake_get_hashes(*args, **kwargs):
raise Timeout()
def my_tpool_execute(func, *args, **kwargs):
return func(*args, **kwargs)
was_get_hashes = diskfile.DiskFileManager._get_hashes
was_tpool_exe = tpool.execute
try:
diskfile.DiskFileManager._get_hashes = fake_get_hashes
tpool.execute = my_tpool_execute
req = Request.blank('/sda1/p/suff',
environ={'REQUEST_METHOD': 'REPLICATE'},
headers={})
self.assertRaises(Timeout, self.object_controller.REPLICATE, req)
finally:
tpool.execute = was_tpool_exe
diskfile.DiskFileManager._get_hashes = was_get_hashes
def test_REPLICATE_insufficient_storage(self):
conf = {'devices': self.testdir, 'mount_check': 'true'}
self.object_controller = object_server.ObjectController(
conf, logger=debug_logger())
self.object_controller.bytes_per_sync = 1
def fake_check_mount(*args, **kwargs):
return False
with mock.patch("swift.obj.diskfile.check_mount", fake_check_mount):
req = Request.blank('/sda1/p/suff',
environ={'REQUEST_METHOD': 'REPLICATE'},
headers={})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 507)
def test_SSYNC_can_be_called(self):
req = Request.blank('/sda1/0',
environ={'REQUEST_METHOD': 'SSYNC'},
headers={})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
def test_PUT_with_full_drive(self):
class IgnoredBody(object):
def __init__(self):
self.read_called = False
def read(self, size=-1):
if not self.read_called:
self.read_called = True
return 'VERIFY'
return ''
def fake_fallocate(fd, size):
raise OSError(errno.ENOSPC, os.strerror(errno.ENOSPC))
orig_fallocate = diskfile.fallocate
try:
diskfile.fallocate = fake_fallocate
timestamp = normalize_timestamp(time())
body_reader = IgnoredBody()
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'PUT',
'wsgi.input': body_reader},
headers={'X-Timestamp': timestamp,
'Content-Length': '6',
'Content-Type': 'application/octet-stream',
'Expect': '100-continue'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 507)
self.assertFalse(body_reader.read_called)
finally:
diskfile.fallocate = orig_fallocate
def test_global_conf_callback_does_nothing(self):
preloaded_app_conf = {}
global_conf = {}
object_server.global_conf_callback(preloaded_app_conf, global_conf)
self.assertEqual(preloaded_app_conf, {})
self.assertEqual(global_conf.keys(), ['replication_semaphore'])
try:
value = global_conf['replication_semaphore'][0].get_value()
except NotImplementedError:
# On some operating systems (at a minimum, OS X) it's not possible
# to introspect the value of a semaphore
raise SkipTest
else:
self.assertEqual(value, 4)
def test_global_conf_callback_replication_semaphore(self):
preloaded_app_conf = {'replication_concurrency': 123}
global_conf = {}
with mock.patch.object(
object_server.multiprocessing, 'BoundedSemaphore',
return_value='test1') as mocked_Semaphore:
object_server.global_conf_callback(preloaded_app_conf, global_conf)
self.assertEqual(preloaded_app_conf, {'replication_concurrency': 123})
self.assertEqual(global_conf, {'replication_semaphore': ['test1']})
mocked_Semaphore.assert_called_once_with(123)
def test_handling_of_replication_semaphore_config(self):
conf = {'devices': self.testdir, 'mount_check': 'false'}
objsrv = object_server.ObjectController(conf)
self.assertTrue(objsrv.replication_semaphore is None)
conf['replication_semaphore'] = ['sema']
objsrv = object_server.ObjectController(conf)
self.assertEqual(objsrv.replication_semaphore, 'sema')
def test_serv_reserv(self):
# Test replication_server flag was set from configuration file.
conf = {'devices': self.testdir, 'mount_check': 'false'}
self.assertEqual(
object_server.ObjectController(conf).replication_server, None)
for val in [True, '1', 'True', 'true']:
conf['replication_server'] = val
self.assertTrue(
object_server.ObjectController(conf).replication_server)
for val in [False, 0, '0', 'False', 'false', 'test_string']:
conf['replication_server'] = val
self.assertFalse(
object_server.ObjectController(conf).replication_server)
def test_list_allowed_methods(self):
# Test list of allowed_methods
obj_methods = ['DELETE', 'PUT', 'HEAD', 'GET', 'POST']
repl_methods = ['REPLICATE', 'SSYNC']
for method_name in obj_methods:
method = getattr(self.object_controller, method_name)
self.assertFalse(hasattr(method, 'replication'))
for method_name in repl_methods:
method = getattr(self.object_controller, method_name)
self.assertEqual(method.replication, True)
def test_correct_allowed_method(self):
# Test correct work for allowed method using
# swift.obj.server.ObjectController.__call__
inbuf = WsgiBytesIO()
errbuf = StringIO()
outbuf = StringIO()
self.object_controller = object_server.app_factory(
{'devices': self.testdir, 'mount_check': 'false',
'replication_server': 'false'})
def start_response(*args):
# Sends args to outbuf
outbuf.writelines(args)
method = 'PUT'
env = {'REQUEST_METHOD': method,
'SCRIPT_NAME': '',
'PATH_INFO': '/sda1/p/a/c/o',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'CONTENT_LENGTH': '0',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': inbuf,
'wsgi.errors': errbuf,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False}
method_res = mock.MagicMock()
mock_method = public(lambda x:
mock.MagicMock(return_value=method_res))
with mock.patch.object(self.object_controller, method,
new=mock_method):
response = self.object_controller(env, start_response)
self.assertEqual(response, method_res)
def test_not_allowed_method(self):
# Test correct work for NOT allowed method using
# swift.obj.server.ObjectController.__call__
inbuf = WsgiBytesIO()
errbuf = StringIO()
outbuf = StringIO()
self.object_controller = object_server.ObjectController(
{'devices': self.testdir, 'mount_check': 'false',
'replication_server': 'false'}, logger=self.logger)
def start_response(*args):
# Sends args to outbuf
outbuf.writelines(args)
method = 'PUT'
env = {'REQUEST_METHOD': method,
'SCRIPT_NAME': '',
'PATH_INFO': '/sda1/p/a/c/o',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'CONTENT_LENGTH': '0',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': inbuf,
'wsgi.errors': errbuf,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False}
answer = ['<html><h1>Method Not Allowed</h1><p>The method is not '
'allowed for this resource.</p></html>']
mock_method = replication(public(lambda x: mock.MagicMock()))
with mock.patch.object(self.object_controller, method,
new=mock_method):
mock_method.replication = True
with mock.patch('time.gmtime',
mock.MagicMock(side_effect=[gmtime(10001.0)])):
with mock.patch('time.time',
mock.MagicMock(side_effect=[10000.0,
10001.0])):
with mock.patch('os.getpid',
mock.MagicMock(return_value=1234)):
response = self.object_controller.__call__(
env, start_response)
self.assertEqual(response, answer)
self.assertEqual(
self.logger.get_lines_for_level('info'),
['None - - [01/Jan/1970:02:46:41 +0000] "PUT'
' /sda1/p/a/c/o" 405 - "-" "-" "-" 1.0000 "-"'
' 1234 -'])
def test_call_incorrect_replication_method(self):
inbuf = StringIO()
errbuf = StringIO()
outbuf = StringIO()
self.object_controller = object_server.ObjectController(
{'devices': self.testdir, 'mount_check': 'false',
'replication_server': 'true'}, logger=FakeLogger())
def start_response(*args):
"""Sends args to outbuf"""
outbuf.writelines(args)
obj_methods = ['DELETE', 'PUT', 'HEAD', 'GET', 'POST', 'OPTIONS']
for method in obj_methods:
env = {'REQUEST_METHOD': method,
'SCRIPT_NAME': '',
'PATH_INFO': '/sda1/p/a/c',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'CONTENT_LENGTH': '0',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': inbuf,
'wsgi.errors': errbuf,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False}
self.object_controller(env, start_response)
self.assertEqual(errbuf.getvalue(), '')
self.assertEqual(outbuf.getvalue()[:4], '405 ')
def test_not_utf8_and_not_logging_requests(self):
inbuf = WsgiBytesIO()
errbuf = StringIO()
outbuf = StringIO()
self.object_controller = object_server.ObjectController(
{'devices': self.testdir, 'mount_check': 'false',
'replication_server': 'false', 'log_requests': 'false'},
logger=FakeLogger())
def start_response(*args):
# Sends args to outbuf
outbuf.writelines(args)
method = 'PUT'
env = {'REQUEST_METHOD': method,
'SCRIPT_NAME': '',
'PATH_INFO': '/sda1/p/a/c/\x00%20/%',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'CONTENT_LENGTH': '0',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': inbuf,
'wsgi.errors': errbuf,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False}
answer = ['Invalid UTF8 or contains NULL']
mock_method = public(lambda x: mock.MagicMock())
with mock.patch.object(self.object_controller, method,
new=mock_method):
response = self.object_controller.__call__(env, start_response)
self.assertEqual(response, answer)
self.assertEqual(self.logger.get_lines_for_level('info'), [])
def test__call__returns_500(self):
inbuf = WsgiBytesIO()
errbuf = StringIO()
outbuf = StringIO()
self.logger = debug_logger('test')
self.object_controller = object_server.ObjectController(
{'devices': self.testdir, 'mount_check': 'false',
'replication_server': 'false', 'log_requests': 'false'},
logger=self.logger)
def start_response(*args):
# Sends args to outbuf
outbuf.writelines(args)
method = 'PUT'
env = {'REQUEST_METHOD': method,
'SCRIPT_NAME': '',
'PATH_INFO': '/sda1/p/a/c/o',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'CONTENT_LENGTH': '0',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': inbuf,
'wsgi.errors': errbuf,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False}
@public
def mock_put_method(*args, **kwargs):
raise Exception()
with mock.patch.object(self.object_controller, method,
new=mock_put_method):
response = self.object_controller.__call__(env, start_response)
self.assertTrue(response[0].startswith(
'Traceback (most recent call last):'))
self.assertEqual(self.logger.get_lines_for_level('error'), [
'ERROR __call__ error with %(method)s %(path)s : ' % {
'method': 'PUT', 'path': '/sda1/p/a/c/o'},
])
self.assertEqual(self.logger.get_lines_for_level('info'), [])
def test_PUT_slow(self):
inbuf = WsgiBytesIO()
errbuf = StringIO()
outbuf = StringIO()
self.object_controller = object_server.ObjectController(
{'devices': self.testdir, 'mount_check': 'false',
'replication_server': 'false', 'log_requests': 'false',
'slow': '10'},
logger=self.logger)
def start_response(*args):
# Sends args to outbuf
outbuf.writelines(args)
method = 'PUT'
env = {'REQUEST_METHOD': method,
'SCRIPT_NAME': '',
'PATH_INFO': '/sda1/p/a/c/o',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'CONTENT_LENGTH': '0',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': inbuf,
'wsgi.errors': errbuf,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False}
mock_method = public(lambda x: mock.MagicMock())
with mock.patch.object(self.object_controller, method,
new=mock_method):
with mock.patch('time.time',
mock.MagicMock(side_effect=[10000.0,
10001.0])):
with mock.patch('swift.obj.server.sleep',
mock.MagicMock()) as ms:
self.object_controller.__call__(env, start_response)
ms.assert_called_with(9)
self.assertEqual(self.logger.get_lines_for_level('info'),
[])
def test_log_line_format(self):
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'HEAD', 'REMOTE_ADDR': '1.2.3.4'})
self.object_controller.logger = self.logger
with mock.patch(
'time.gmtime', mock.MagicMock(side_effect=[gmtime(10001.0)])):
with mock.patch(
'time.time',
mock.MagicMock(side_effect=[10000.0, 10001.0, 10002.0])):
with mock.patch(
'os.getpid', mock.MagicMock(return_value=1234)):
req.get_response(self.object_controller)
self.assertEqual(
self.logger.get_lines_for_level('info'),
['1.2.3.4 - - [01/Jan/1970:02:46:41 +0000] "HEAD /sda1/p/a/c/o" '
'404 - "-" "-" "-" 2.0000 "-" 1234 -'])
@patch_policies([StoragePolicy(0, 'zero', True),
StoragePolicy(1, 'one', False)])
def test_dynamic_datadir(self):
# update router post patch
self.object_controller._diskfile_router = diskfile.DiskFileRouter(
self.conf, self.object_controller.logger)
timestamp = normalize_timestamp(time())
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp,
'Content-Type': 'application/x-test',
'Foo': 'fooheader',
'Baz': 'bazheader',
'X-Backend-Storage-Policy-Index': 1,
'X-Object-Meta-1': 'One',
'X-Object-Meta-Two': 'Two'})
req.body = 'VERIFY'
object_dir = self.testdir + "/sda1/objects-1"
self.assertFalse(os.path.isdir(object_dir))
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
self.assertTrue(os.path.isdir(object_dir))
# make sure no idx in header uses policy 0 data_dir
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp,
'Content-Type': 'application/x-test',
'Foo': 'fooheader',
'Baz': 'bazheader',
'X-Object-Meta-1': 'One',
'X-Object-Meta-Two': 'Two'})
req.body = 'VERIFY'
object_dir = self.testdir + "/sda1/objects"
self.assertFalse(os.path.isdir(object_dir))
with mock.patch.object(POLICIES, 'get_by_index',
lambda _: True):
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
self.assertTrue(os.path.isdir(object_dir))
def test_storage_policy_index_is_validated(self):
# sanity check that index for existing policy is ok
methods = ('PUT', 'POST', 'GET', 'HEAD', 'REPLICATE', 'DELETE')
valid_indices = sorted([int(policy) for policy in POLICIES])
for index in valid_indices:
object_dir = self.testdir + "/sda1/objects"
if index > 0:
object_dir = "%s-%s" % (object_dir, index)
self.assertFalse(os.path.isdir(object_dir))
for method in methods:
headers = {
'X-Timestamp': next(self.ts).internal,
'Content-Type': 'application/x-test',
'X-Backend-Storage-Policy-Index': index}
if POLICIES[index].policy_type == EC_POLICY:
headers['X-Object-Sysmeta-Ec-Frag-Index'] = '2'
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': method},
headers=headers)
req.body = 'VERIFY'
resp = req.get_response(self.object_controller)
self.assertTrue(is_success(resp.status_int),
'%s method failed: %r' % (method, resp.status))
# index for non-existent policy should return 503
index = valid_indices[-1] + 1
for method in methods:
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': method},
headers={
'X-Timestamp': next(self.ts).internal,
'Content-Type': 'application/x-test',
'X-Backend-Storage-Policy-Index': index})
req.body = 'VERIFY'
object_dir = self.testdir + "/sda1/objects-%s" % index
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 503)
self.assertFalse(os.path.isdir(object_dir))
def test_race_doesnt_quarantine(self):
existing_timestamp = normalize_timestamp(time())
delete_timestamp = normalize_timestamp(time() + 1)
put_timestamp = normalize_timestamp(time() + 2)
# make a .ts
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': existing_timestamp})
req.get_response(self.object_controller)
# force a PUT between the listdir and read_metadata of a DELETE
put_once = [False]
orig_listdir = os.listdir
def mock_listdir(path):
listing = orig_listdir(path)
if not put_once[0]:
put_once[0] = True
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': put_timestamp,
'Content-Length': '9',
'Content-Type': 'application/octet-stream'})
req.body = 'some data'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
return listing
with mock.patch('os.listdir', mock_listdir):
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': delete_timestamp})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 404)
qdir = os.path.join(self.testdir, 'sda1', 'quarantined')
self.assertFalse(os.path.exists(qdir))
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['X-Timestamp'], put_timestamp)
def test_multiphase_put_draining(self):
# We want to ensure that we read the whole response body even if
# it's multipart MIME and there's document parts that we don't
# expect or understand. This'll help save our bacon if we ever jam
# more stuff in there.
in_a_timeout = [False]
# inherit from BaseException so we get a stack trace when the test
# fails instead of just a 500
class NotInATimeout(BaseException):
pass
class FakeTimeout(BaseException):
def __enter__(self):
in_a_timeout[0] = True
def __exit__(self, typ, value, tb):
in_a_timeout[0] = False
class PickyWsgiBytesIO(WsgiBytesIO):
def read(self, *a, **kw):
if not in_a_timeout[0]:
raise NotInATimeout()
return WsgiBytesIO.read(self, *a, **kw)
def readline(self, *a, **kw):
if not in_a_timeout[0]:
raise NotInATimeout()
return WsgiBytesIO.readline(self, *a, **kw)
test_data = 'obj data'
footer_meta = {
"X-Object-Sysmeta-Ec-Frag-Index": "7",
"Etag": md5(test_data).hexdigest(),
}
footer_json = json.dumps(footer_meta)
footer_meta_cksum = md5(footer_json).hexdigest()
test_doc = "\r\n".join((
"--boundary123",
"X-Document: object body",
"",
test_data,
"--boundary123",
"X-Document: object metadata",
"Content-MD5: " + footer_meta_cksum,
"",
footer_json,
"--boundary123",
"X-Document: we got cleverer",
"",
"stuff stuff meaningless stuuuuuuuuuuff",
"--boundary123",
"X-Document: we got even cleverer; can you believe it?",
"Waneshaft: ambifacient lunar",
"Casing: malleable logarithmic",
"",
"potato potato potato potato potato potato potato",
"--boundary123--"
))
if six.PY3:
test_doc = test_doc.encode('utf-8')
# phase1 - PUT request with object metadata in footer and
# multiphase commit conversation
put_timestamp = utils.Timestamp(time()).internal
headers = {
'Content-Type': 'text/plain',
'X-Timestamp': put_timestamp,
'Transfer-Encoding': 'chunked',
'Expect': '100-continue',
'X-Backend-Storage-Policy-Index': '1',
'X-Backend-Obj-Content-Length': len(test_data),
'X-Backend-Obj-Metadata-Footer': 'yes',
'X-Backend-Obj-Multipart-Mime-Boundary': 'boundary123',
}
wsgi_input = PickyWsgiBytesIO(test_doc)
req = Request.blank(
"/sda1/0/a/c/o",
environ={'REQUEST_METHOD': 'PUT', 'wsgi.input': wsgi_input},
headers=headers)
app = object_server.ObjectController(self.conf, logger=self.logger)
with mock.patch('swift.obj.server.ChunkReadTimeout', FakeTimeout):
resp = req.get_response(app)
self.assertEqual(resp.status_int, 201) # sanity check
in_a_timeout[0] = True # so we can check without an exception
self.assertEqual(wsgi_input.read(), '') # we read all the bytes
@patch_policies(test_policies)
class TestObjectServer(unittest.TestCase):
def setUp(self):
# dirs
self.tmpdir = tempfile.mkdtemp()
self.tempdir = os.path.join(self.tmpdir, 'tmp_test_obj_server')
self.devices = os.path.join(self.tempdir, 'srv/node')
for device in ('sda1', 'sdb1'):
os.makedirs(os.path.join(self.devices, device))
self.conf = {
'devices': self.devices,
'swift_dir': self.tempdir,
'mount_check': 'false',
}
self.logger = debug_logger('test-object-server')
self.app = object_server.ObjectController(
self.conf, logger=self.logger)
sock = listen(('127.0.0.1', 0))
self.server = spawn(wsgi.server, sock, self.app, utils.NullLogger())
self.port = sock.getsockname()[1]
def tearDown(self):
rmtree(self.tmpdir)
def test_not_found(self):
conn = bufferedhttp.http_connect('127.0.0.1', self.port, 'sda1', '0',
'GET', '/a/c/o')
resp = conn.getresponse()
self.assertEqual(resp.status, 404)
resp.read()
resp.close()
def test_expect_on_put(self):
test_body = 'test'
headers = {
'Expect': '100-continue',
'Content-Length': len(test_body),
'X-Timestamp': utils.Timestamp(time()).internal,
}
conn = bufferedhttp.http_connect('127.0.0.1', self.port, 'sda1', '0',
'PUT', '/a/c/o', headers=headers)
resp = conn.getexpect()
self.assertEqual(resp.status, 100)
conn.send(test_body)
resp = conn.getresponse()
self.assertEqual(resp.status, 201)
resp.read()
resp.close()
def test_expect_on_put_footer(self):
test_body = 'test'
headers = {
'Expect': '100-continue',
'Content-Length': len(test_body),
'X-Timestamp': utils.Timestamp(time()).internal,
'X-Backend-Obj-Metadata-Footer': 'yes',
'X-Backend-Obj-Multipart-Mime-Boundary': 'boundary123',
}
conn = bufferedhttp.http_connect('127.0.0.1', self.port, 'sda1', '0',
'PUT', '/a/c/o', headers=headers)
resp = conn.getexpect()
self.assertEqual(resp.status, 100)
headers = HeaderKeyDict(resp.getheaders())
self.assertEqual(headers['X-Obj-Metadata-Footer'], 'yes')
resp.close()
def test_expect_on_put_conflict(self):
test_body = 'test'
put_timestamp = utils.Timestamp(time())
headers = {
'Expect': '100-continue',
'Content-Length': len(test_body),
'X-Timestamp': put_timestamp.internal,
}
conn = bufferedhttp.http_connect('127.0.0.1', self.port, 'sda1', '0',
'PUT', '/a/c/o', headers=headers)
resp = conn.getexpect()
self.assertEqual(resp.status, 100)
conn.send(test_body)
resp = conn.getresponse()
self.assertEqual(resp.status, 201)
resp.read()
resp.close()
# and again with same timestamp
conn = bufferedhttp.http_connect('127.0.0.1', self.port, 'sda1', '0',
'PUT', '/a/c/o', headers=headers)
resp = conn.getexpect()
self.assertEqual(resp.status, 409)
headers = HeaderKeyDict(resp.getheaders())
self.assertEqual(headers['X-Backend-Timestamp'], put_timestamp)
resp.read()
resp.close()
def test_multiphase_put_no_mime_boundary(self):
test_data = 'obj data'
put_timestamp = utils.Timestamp(time()).internal
headers = {
'Content-Type': 'text/plain',
'X-Timestamp': put_timestamp,
'Transfer-Encoding': 'chunked',
'Expect': '100-continue',
'X-Backend-Obj-Content-Length': len(test_data),
'X-Backend-Obj-Multiphase-Commit': 'yes',
}
conn = bufferedhttp.http_connect('127.0.0.1', self.port, 'sda1', '0',
'PUT', '/a/c/o', headers=headers)
resp = conn.getexpect()
self.assertEqual(resp.status, 400)
resp.read()
resp.close()
def test_expect_on_multiphase_put_diconnect(self):
put_timestamp = utils.Timestamp(time()).internal
headers = {
'Content-Type': 'text/plain',
'X-Timestamp': put_timestamp,
'Transfer-Encoding': 'chunked',
'Expect': '100-continue',
'X-Backend-Obj-Content-Length': 0,
'X-Backend-Obj-Multipart-Mime-Boundary': 'boundary123',
'X-Backend-Obj-Multiphase-Commit': 'yes',
}
conn = bufferedhttp.http_connect('127.0.0.1', self.port, 'sda1', '0',
'PUT', '/a/c/o', headers=headers)
resp = conn.getexpect()
self.assertEqual(resp.status, 100)
headers = HeaderKeyDict(resp.getheaders())
self.assertEqual(headers['X-Obj-Multiphase-Commit'], 'yes')
conn.send('c\r\n--boundary123\r\n')
# disconnect client
conn.sock.fd._sock.close()
for i in range(2):
sleep(0)
self.assertFalse(self.logger.get_lines_for_level('error'))
for line in self.logger.get_lines_for_level('info'):
self.assertIn(' 499 ', line)
def find_files(self):
found_files = defaultdict(list)
for root, dirs, files in os.walk(self.devices):
for filename in files:
_name, ext = os.path.splitext(filename)
file_path = os.path.join(root, filename)
found_files[ext].append(file_path)
return found_files
@contextmanager
def _check_multiphase_put_commit_handling(self,
test_doc=None,
headers=None,
finish_body=True):
"""
This helper will setup a multiphase chunked PUT request and yield at
the context at the commit phase (after getting the second expect-100
continue response.
It can setup a resonable stub request, but you can over-ride some
characteristics of the request via kwargs.
:param test_doc: first part of the mime conversation before the object
server will send the 100-continue, this includes the
object body
:param headers: headers to send along with the initial request; some
object-metadata (e.g. X-Backend-Obj-Content-Length)
is generally expected tomatch the test_doc)
:param finish_body: boolean, if true send "0\r\n\r\n" after test_doc
and wait for 100-continue before yielding context
"""
test_data = 'obj data'
footer_meta = {
"X-Object-Sysmeta-Ec-Frag-Index": "2",
"Etag": md5(test_data).hexdigest(),
}
footer_json = json.dumps(footer_meta)
footer_meta_cksum = md5(footer_json).hexdigest()
test_doc = test_doc or "\r\n".join((
"--boundary123",
"X-Document: object body",
"",
test_data,
"--boundary123",
"X-Document: object metadata",
"Content-MD5: " + footer_meta_cksum,
"",
footer_json,
"--boundary123",
))
# phase1 - PUT request with object metadata in footer and
# multiphase commit conversation
put_timestamp = utils.Timestamp(time())
headers = headers or {
'Content-Type': 'text/plain',
'Transfer-Encoding': 'chunked',
'Expect': '100-continue',
'X-Backend-Storage-Policy-Index': '1',
'X-Backend-Obj-Content-Length': len(test_data),
'X-Backend-Obj-Metadata-Footer': 'yes',
'X-Backend-Obj-Multipart-Mime-Boundary': 'boundary123',
'X-Backend-Obj-Multiphase-Commit': 'yes',
}
put_timestamp = utils.Timestamp(headers.setdefault(
'X-Timestamp', utils.Timestamp(time()).internal))
container_update = \
'swift.obj.server.ObjectController.container_update'
with mock.patch(container_update) as _container_update:
conn = bufferedhttp.http_connect(
'127.0.0.1', self.port, 'sda1', '0',
'PUT', '/a/c/o', headers=headers)
resp = conn.getexpect()
self.assertEqual(resp.status, 100)
expect_headers = HeaderKeyDict(resp.getheaders())
to_send = "%x\r\n%s\r\n" % (len(test_doc), test_doc)
conn.send(to_send)
if finish_body:
conn.send("0\r\n\r\n")
# verify 100-continue response to mark end of phase1
resp = conn.getexpect()
self.assertEqual(resp.status, 100)
# yield relevant context for test
yield {
'conn': conn,
'expect_headers': expect_headers,
'put_timestamp': put_timestamp,
'mock_container_update': _container_update,
}
# give the object server a little time to trampoline enough to
# recognize request has finished, or socket has closed or whatever
sleep(0.1)
def test_multiphase_put_client_disconnect_right_before_commit(self):
with self._check_multiphase_put_commit_handling() as context:
conn = context['conn']
# just bail stright out
conn.sock.fd._sock.close()
put_timestamp = context['put_timestamp']
_container_update = context['mock_container_update']
# and make sure it demonstrates the client disconnect
log_lines = self.logger.get_lines_for_level('info')
self.assertEqual(len(log_lines), 1)
self.assertIn(' 499 ', log_lines[0])
# verify successful object data and durable state file write
found_files = self.find_files()
# .data file is there
self.assertEqual(len(found_files['.data']), 1)
obj_datafile = found_files['.data'][0]
self.assertEqual("%s#2.data" % put_timestamp.internal,
os.path.basename(obj_datafile))
# but .durable isn't
self.assertEqual(found_files['.durable'], [])
# And no container update
self.assertFalse(_container_update.called)
def test_multiphase_put_client_disconnect_in_the_middle_of_commit(self):
with self._check_multiphase_put_commit_handling() as context:
conn = context['conn']
# start commit confirmation to start phase2
commit_confirmation_doc = "\r\n".join((
"X-Document: put commit",
"",
"commit_confirmation",
"--boundary123--",
))
# but don't quite the commit body
to_send = "%x\r\n%s" % \
(len(commit_confirmation_doc), commit_confirmation_doc[:-1])
conn.send(to_send)
# and then bail out
conn.sock.fd._sock.close()
put_timestamp = context['put_timestamp']
_container_update = context['mock_container_update']
# and make sure it demonstrates the client disconnect
log_lines = self.logger.get_lines_for_level('info')
self.assertEqual(len(log_lines), 1)
self.assertIn(' 499 ', log_lines[0])
# verify successful object data and durable state file write
found_files = self.find_files()
# .data file is there
self.assertEqual(len(found_files['.data']), 1)
obj_datafile = found_files['.data'][0]
self.assertEqual("%s#2.data" % put_timestamp.internal,
os.path.basename(obj_datafile))
# but .durable isn't
self.assertEqual(found_files['.durable'], [])
# And no container update
self.assertFalse(_container_update.called)
def test_multiphase_put_no_metadata_replicated(self):
test_data = 'obj data'
test_doc = "\r\n".join((
"--boundary123",
"X-Document: object body",
"",
test_data,
"--boundary123",
))
put_timestamp = utils.Timestamp(time()).internal
headers = {
'Content-Type': 'text/plain',
'X-Timestamp': put_timestamp,
'Transfer-Encoding': 'chunked',
'Expect': '100-continue',
'X-Backend-Obj-Content-Length': len(test_data),
'X-Backend-Obj-Multipart-Mime-Boundary': 'boundary123',
'X-Backend-Obj-Multiphase-Commit': 'yes',
}
with self._check_multiphase_put_commit_handling(
test_doc=test_doc, headers=headers) as context:
expect_headers = context['expect_headers']
self.assertEqual(expect_headers['X-Obj-Multiphase-Commit'], 'yes')
# N.B. no X-Obj-Metadata-Footer header
self.assertNotIn('X-Obj-Metadata-Footer', expect_headers)
conn = context['conn']
# send commit confirmation to start phase2
commit_confirmation_doc = "\r\n".join((
"X-Document: put commit",
"",
"commit_confirmation",
"--boundary123--",
))
to_send = "%x\r\n%s\r\n0\r\n\r\n" % \
(len(commit_confirmation_doc), commit_confirmation_doc)
conn.send(to_send)
# verify success (2xx) to make end of phase2
resp = conn.getresponse()
self.assertEqual(resp.status, 201)
resp.read()
resp.close()
# verify successful object data and durable state file write
put_timestamp = context['put_timestamp']
found_files = self.find_files()
# .data file is there
self.assertEqual(len(found_files['.data']), 1)
obj_datafile = found_files['.data'][0]
self.assertEqual("%s.data" % put_timestamp.internal,
os.path.basename(obj_datafile))
# replicated objects do not have a .durable file
self.assertEqual(found_files['.durable'], [])
# And container update was called
self.assertTrue(context['mock_container_update'].called)
def test_multiphase_put_metadata_footer(self):
with self._check_multiphase_put_commit_handling() as context:
expect_headers = context['expect_headers']
self.assertEqual(expect_headers['X-Obj-Multiphase-Commit'], 'yes')
self.assertEqual(expect_headers['X-Obj-Metadata-Footer'], 'yes')
conn = context['conn']
# send commit confirmation to start phase2
commit_confirmation_doc = "\r\n".join((
"X-Document: put commit",
"",
"commit_confirmation",
"--boundary123--",
))
to_send = "%x\r\n%s\r\n0\r\n\r\n" % \
(len(commit_confirmation_doc), commit_confirmation_doc)
conn.send(to_send)
# verify success (2xx) to make end of phase2
resp = conn.getresponse()
self.assertEqual(resp.status, 201)
resp.read()
resp.close()
# verify successful object data and durable state file write
put_timestamp = context['put_timestamp']
found_files = self.find_files()
# .data file is there
self.assertEqual(len(found_files['.data']), 1)
obj_datafile = found_files['.data'][0]
self.assertEqual("%s#2.data" % put_timestamp.internal,
os.path.basename(obj_datafile))
# .durable file is there
self.assertEqual(len(found_files['.durable']), 1)
durable_file = found_files['.durable'][0]
self.assertEqual("%s.durable" % put_timestamp.internal,
os.path.basename(durable_file))
# And container update was called
self.assertTrue(context['mock_container_update'].called)
def test_multiphase_put_metadata_footer_disconnect(self):
test_data = 'obj data'
test_doc = "\r\n".join((
"--boundary123",
"X-Document: object body",
"",
test_data,
"--boundary123",
))
# eventlet.wsgi won't return < network_chunk_size from a chunked read
self.app.network_chunk_size = 16
with self._check_multiphase_put_commit_handling(
test_doc=test_doc, finish_body=False) as context:
conn = context['conn']
# make footer doc
footer_meta = {
"X-Object-Sysmeta-Ec-Frag-Index": "2",
"Etag": md5(test_data).hexdigest(),
}
footer_json = json.dumps(footer_meta)
footer_meta_cksum = md5(footer_json).hexdigest()
# send most of the footer doc
footer_doc = "\r\n".join((
"X-Document: object metadata",
"Content-MD5: " + footer_meta_cksum,
"",
footer_json,
))
# but don't send final boundary nor last chunk
to_send = "%x\r\n%s\r\n" % \
(len(footer_doc), footer_doc)
conn.send(to_send)
# and then bail out
conn.sock.fd._sock.close()
# and make sure it demonstrates the client disconnect
log_lines = self.logger.get_lines_for_level('info')
self.assertEqual(len(log_lines), 1)
self.assertIn(' 499 ', log_lines[0])
# no artifacts left on disk
found_files = self.find_files()
self.assertEqual(len(found_files['.data']), 0)
self.assertEqual(len(found_files['.durable']), 0)
# ... and no container update
_container_update = context['mock_container_update']
self.assertFalse(_container_update.called)
def test_multiphase_put_ec_fragment_in_headers_no_footers(self):
test_data = 'obj data'
test_doc = "\r\n".join((
"--boundary123",
"X-Document: object body",
"",
test_data,
"--boundary123",
))
# phase1 - PUT request with multiphase commit conversation
# no object metadata in footer
put_timestamp = utils.Timestamp(time()).internal
headers = {
'Content-Type': 'text/plain',
'X-Timestamp': put_timestamp,
'Transfer-Encoding': 'chunked',
'Expect': '100-continue',
# normally the frag index gets sent in the MIME footer (which this
# test doesn't have, see `test_multiphase_put_metadata_footer`),
# but the proxy *could* send the frag index in the headers and
# this test verifies that would work.
'X-Object-Sysmeta-Ec-Frag-Index': '2',
'X-Backend-Storage-Policy-Index': '1',
'X-Backend-Obj-Content-Length': len(test_data),
'X-Backend-Obj-Multipart-Mime-Boundary': 'boundary123',
'X-Backend-Obj-Multiphase-Commit': 'yes',
}
with self._check_multiphase_put_commit_handling(
test_doc=test_doc, headers=headers) as context:
expect_headers = context['expect_headers']
self.assertEqual(expect_headers['X-Obj-Multiphase-Commit'], 'yes')
# N.B. no X-Obj-Metadata-Footer header
self.assertNotIn('X-Obj-Metadata-Footer', expect_headers)
conn = context['conn']
# send commit confirmation to start phase2
commit_confirmation_doc = "\r\n".join((
"X-Document: put commit",
"",
"commit_confirmation",
"--boundary123--",
))
to_send = "%x\r\n%s\r\n0\r\n\r\n" % \
(len(commit_confirmation_doc), commit_confirmation_doc)
conn.send(to_send)
# verify success (2xx) to make end of phase2
resp = conn.getresponse()
self.assertEqual(resp.status, 201)
resp.read()
resp.close()
# verify successful object data and durable state file write
put_timestamp = context['put_timestamp']
found_files = self.find_files()
# .data file is there
self.assertEqual(len(found_files['.data']), 1)
obj_datafile = found_files['.data'][0]
self.assertEqual("%s#2.data" % put_timestamp.internal,
os.path.basename(obj_datafile))
# .durable file is there
self.assertEqual(len(found_files['.durable']), 1)
durable_file = found_files['.durable'][0]
self.assertEqual("%s.durable" % put_timestamp.internal,
os.path.basename(durable_file))
# And container update was called
self.assertTrue(context['mock_container_update'].called)
def test_multiphase_put_bad_commit_message(self):
with self._check_multiphase_put_commit_handling() as context:
conn = context['conn']
# send commit confirmation to start phase2
commit_confirmation_doc = "\r\n".join((
"junkjunk",
"--boundary123--",
))
to_send = "%x\r\n%s\r\n0\r\n\r\n" % \
(len(commit_confirmation_doc), commit_confirmation_doc)
conn.send(to_send)
resp = conn.getresponse()
self.assertEqual(resp.status, 500)
resp.read()
resp.close()
put_timestamp = context['put_timestamp']
_container_update = context['mock_container_update']
# verify that durable file was NOT created
found_files = self.find_files()
# .data file is there
self.assertEqual(len(found_files['.data']), 1)
obj_datafile = found_files['.data'][0]
self.assertEqual("%s#2.data" % put_timestamp.internal,
os.path.basename(obj_datafile))
# but .durable isn't
self.assertEqual(found_files['.durable'], [])
# And no container update
self.assertFalse(_container_update.called)
def test_multiphase_put_drains_extra_commit_junk(self):
with self._check_multiphase_put_commit_handling() as context:
conn = context['conn']
# send commit confirmation to start phase2
commit_confirmation_doc = "\r\n".join((
"X-Document: put commit",
"",
"commit_confirmation",
"--boundary123",
"X-Document: we got cleverer",
"",
"stuff stuff meaningless stuuuuuuuuuuff",
"--boundary123",
"X-Document: we got even cleverer; can you believe it?",
"Waneshaft: ambifacient lunar",
"Casing: malleable logarithmic",
"",
"potato potato potato potato potato potato potato",
"--boundary123--",
))
to_send = "%x\r\n%s\r\n0\r\n\r\n" % \
(len(commit_confirmation_doc), commit_confirmation_doc)
conn.send(to_send)
# verify success (2xx) to make end of phase2
resp = conn.getresponse()
self.assertEqual(resp.status, 201)
resp.read()
# make another request to validate the HTTP protocol state
conn.putrequest('GET', '/sda1/0/a/c/o')
conn.putheader('X-Backend-Storage-Policy-Index', '1')
conn.endheaders()
resp = conn.getresponse()
self.assertEqual(resp.status, 200)
resp.read()
resp.close()
# verify successful object data and durable state file write
put_timestamp = context['put_timestamp']
found_files = self.find_files()
# .data file is there
self.assertEqual(len(found_files['.data']), 1)
obj_datafile = found_files['.data'][0]
self.assertEqual("%s#2.data" % put_timestamp.internal,
os.path.basename(obj_datafile))
# .durable file is there
self.assertEqual(len(found_files['.durable']), 1)
durable_file = found_files['.durable'][0]
self.assertEqual("%s.durable" % put_timestamp.internal,
os.path.basename(durable_file))
# And container update was called
self.assertTrue(context['mock_container_update'].called)
def test_multiphase_put_drains_extra_commit_junk_disconnect(self):
commit_confirmation_doc = "\r\n".join((
"X-Document: put commit",
"",
"commit_confirmation",
"--boundary123",
"X-Document: we got cleverer",
"",
"stuff stuff meaningless stuuuuuuuuuuff",
"--boundary123",
"X-Document: we got even cleverer; can you believe it?",
"Waneshaft: ambifacient lunar",
"Casing: malleable logarithmic",
"",
"potato potato potato potato potato potato potato",
))
# eventlet.wsgi won't return < network_chunk_size from a chunked read
self.app.network_chunk_size = 16
with self._check_multiphase_put_commit_handling() as context:
conn = context['conn']
# send commit confirmation and some other stuff
# but don't send final boundary or last chunk
to_send = "%x\r\n%s\r\n" % \
(len(commit_confirmation_doc), commit_confirmation_doc)
conn.send(to_send)
# and then bail out
conn.sock.fd._sock.close()
# and make sure it demonstrates the client disconnect
log_lines = self.logger.get_lines_for_level('info')
self.assertEqual(len(log_lines), 1)
self.assertIn(' 499 ', log_lines[0])
# verify successful object data and durable state file write
put_timestamp = context['put_timestamp']
found_files = self.find_files()
# .data file is there
self.assertEqual(len(found_files['.data']), 1)
obj_datafile = found_files['.data'][0]
self.assertEqual("%s#2.data" % put_timestamp.internal,
os.path.basename(obj_datafile))
# ... and .durable is there
self.assertEqual(len(found_files['.durable']), 1)
durable_file = found_files['.durable'][0]
self.assertEqual("%s.durable" % put_timestamp.internal,
os.path.basename(durable_file))
# but no container update
self.assertFalse(context['mock_container_update'].called)
@patch_policies
class TestZeroCopy(unittest.TestCase):
"""Test the object server's zero-copy functionality"""
def _system_can_zero_copy(self):
if not splice.available:
return False
try:
utils.get_md5_socket()
except IOError:
return False
return True
def setUp(self):
if not self._system_can_zero_copy():
raise SkipTest("zero-copy support is missing")
self.testdir = mkdtemp(suffix="obj_server_zero_copy")
mkdirs(os.path.join(self.testdir, 'sda1', 'tmp'))
conf = {'devices': self.testdir,
'mount_check': 'false',
'splice': 'yes',
'disk_chunk_size': '4096'}
self.object_controller = object_server.ObjectController(
conf, logger=debug_logger())
self.df_mgr = diskfile.DiskFileManager(
conf, self.object_controller.logger)
listener = listen(('localhost', 0))
port = listener.getsockname()[1]
self.wsgi_greenlet = spawn(
wsgi.server, listener, self.object_controller, NullLogger())
self.http_conn = httplib.HTTPConnection('127.0.0.1', port)
self.http_conn.connect()
def tearDown(self):
"""Tear down for testing swift.object.server.ObjectController"""
self.wsgi_greenlet.kill()
rmtree(self.testdir)
def test_GET(self):
url_path = '/sda1/2100/a/c/o'
self.http_conn.request('PUT', url_path, 'obj contents',
{'X-Timestamp': '127082564.24709'})
response = self.http_conn.getresponse()
self.assertEqual(response.status, 201)
response.read()
self.http_conn.request('GET', url_path)
response = self.http_conn.getresponse()
self.assertEqual(response.status, 200)
contents = response.read()
self.assertEqual(contents, 'obj contents')
def test_GET_big(self):
# Test with a large-ish object to make sure we handle full socket
# buffers correctly.
obj_contents = 'A' * 4 * 1024 * 1024 # 4 MiB
url_path = '/sda1/2100/a/c/o'
self.http_conn.request('PUT', url_path, obj_contents,
{'X-Timestamp': '1402600322.52126'})
response = self.http_conn.getresponse()
self.assertEqual(response.status, 201)
response.read()
self.http_conn.request('GET', url_path)
response = self.http_conn.getresponse()
self.assertEqual(response.status, 200)
contents = response.read()
self.assertEqual(contents, obj_contents)
def test_quarantine(self):
obj_hash = hash_path('a', 'c', 'o')
url_path = '/sda1/2100/a/c/o'
ts = '1402601849.47475'
self.http_conn.request('PUT', url_path, 'obj contents',
{'X-Timestamp': ts})
response = self.http_conn.getresponse()
self.assertEqual(response.status, 201)
response.read()
# go goof up the file on disk
fname = os.path.join(self.testdir, 'sda1', 'objects', '2100',
obj_hash[-3:], obj_hash, ts + '.data')
with open(fname, 'rb+') as fh:
fh.write('XYZ')
self.http_conn.request('GET', url_path)
response = self.http_conn.getresponse()
self.assertEqual(response.status, 200)
contents = response.read()
self.assertEqual(contents, 'XYZ contents')
self.http_conn.request('GET', url_path)
response = self.http_conn.getresponse()
# it was quarantined by the previous request
self.assertEqual(response.status, 404)
response.read()
def test_quarantine_on_well_formed_zero_byte_file(self):
# Make sure we work around an oddity in Linux's hash sockets
url_path = '/sda1/2100/a/c/o'
ts = '1402700497.71333'
self.http_conn.request(
'PUT', url_path, '',
{'X-Timestamp': ts, 'Content-Length': '0'})
response = self.http_conn.getresponse()
self.assertEqual(response.status, 201)
response.read()
self.http_conn.request('GET', url_path)
response = self.http_conn.getresponse()
self.assertEqual(response.status, 200)
contents = response.read()
self.assertEqual(contents, '')
self.http_conn.request('GET', url_path)
response = self.http_conn.getresponse()
self.assertEqual(response.status, 200) # still there
contents = response.read()
self.assertEqual(contents, '')
if __name__ == '__main__':
unittest.main()
|
prashanthpai/swift
|
test/unit/obj/test_server.py
|
Python
|
apache-2.0
| 326,271
|
from subprocess import call
from mininet.net import Mininet
from mininet.link import TCLink
from wireless import WirelessHost
class TsaHost(WirelessHost):
def __init__(self, net, name, *args, **kwargs):
super(TsaHost, self).__init__(name, *args, **kwargs)
self.net = net
self.host = net.addHost(name, isUe=True)
self.tsa_switch = net.addSwitch('s%s' % name, isTsa=True)
self.link = net.addLink(self.host, self.tsa_switch)
self.in_port = self.tsa_switch.ports[self.link.intf2]
self.required_tx_bw = 0
self.required_rx_bw = 0
self.assigned_tx_bw = 0
self.assigned_rx_bw = 0
self.available_tx_bw = 0
self.available_rx_bw = 0
self.selected_ap = None
self.default_ap = None
self.aps_phy_params = []
self.available_aps = dict()
def init_wireless_links(self, ap_limit=4):
phy_params = self.phy_model.get_phy_params(self)
for phy_param in phy_params[0:ap_limit]:
ap_switch = phy_param['ap'].switch
self.net.addLink(self.tsa_switch, ap_switch, cls=TCLink)
self.available_aps[phy_param['ap'].name] = phy_param
# default selection is rssi
self.aps_phy_params = phy_params[0:ap_limit]
self.default_ap = phy_params[0]['ap']
def select_ap(self, ap):
# Configure uplink selection
if self.selected_ap != ap:
self.selected_ap = ap
self.available_tx_bw = self.available_aps[ap.name]['ul_bitrate']
self.available_rx_bw = self.available_aps[ap.name]['dl_bitrate']
src_intf = self.tsa_switch.connectionsTo(ap.switch)[0][0]
port = self.tsa_switch.ports[src_intf]
call(["ovs-ofctl", 'add-flow', self.tsa_switch.name,
"idle_timeout=0,priority=33000,dl_type=0x800,nw_dst=10.0.0.51,actions=output:{port}".format(
port=port)])
call(["ovs-ofctl", 'add-flow', self.tsa_switch.name,
"idle_timeout=0,priority=33000,dl_type=0x800,nw_dst=10.0.0.52,actions=output:{port}".format(
port=port)])
def __repr__(self):
return "name: {name} -> \tselected_ap: {sel_ap}\n" \
"\tavailable_tx: {av_tx} \tavailable_rx: {av_rx}\n" \
"\trequired_tx: {req_tx} \trequired_rx: {req_rx}\n" \
"\tassigned_tx: {as_tx} \tassigned_rx: {as_rx}\n".format(name=self.name, sel_ap=self.selected_ap.name,
av_tx=self.available_tx_bw,
av_rx=self.available_rx_bw,
as_tx=self.assigned_tx_bw,
as_rx=self.assigned_rx_bw,
req_tx=self.required_tx_bw,
req_rx=self.required_rx_bw)
class TsaAp(WirelessHost):
def __init__(self, net, name, *args, **kwargs):
self.tdma_mode = kwargs.pop('tdma', 'roundrobin')
super(TsaAp, self).__init__(name, *args, **kwargs)
self.net = net
self.switch = self.net.addSwitch(name, isAp=True)
self.phy_model.register_ap(self)
self.active_tx_ues = []
self.active_rx_ues = []
def tdma(self):
if self.tdma_mode == 'static':
# def tdma_static_slots(self):
# this model a dumb scheduler assigning the same amount of time to each active user
total_active_users = len(self.active_tx_ues) + len(self.active_rx_ues)
for node in self.active_tx_ues:
node.assigned_tx_bw = node.available_tx_bw / total_active_users
for node in self.active_rx_ues:
node.assigned_rx_bw = node.available_rx_bw / total_active_users
elif self.tdma_mode == 'roundrobin' or self.tdma_mode == 'rr':
# this models a round robin scheduler among active users
total_slots = 1000
for node in self.active_tx_ues:
node.assigned_tx_bw = 0
for node in self.active_rx_ues:
node.assigned_rx_bw = 0
# We assign 1000 time slots:
available_slots = total_slots
while available_slots > 0:
prev_av_slots = available_slots
for node in self.active_tx_ues:
if node.assigned_tx_bw < node.required_tx_bw:
node.assigned_tx_bw += node.available_tx_bw / total_slots
available_slots -= 1
for node in self.active_rx_ues:
if node.assigned_rx_bw < node.required_rx_bw:
node.assigned_rx_bw += node.available_rx_bw / total_slots
available_slots -= 1
if available_slots == prev_av_slots:
break
# Update Link bandwidths accordingly with the assigned_bws
# The connection is done with the host internal tsa_switch
for node in self.active_tx_ues:
src_intf = node.tsa_switch.connectionsTo(self.switch)[0][0]
bw = node.assigned_tx_bw / 1000000.0
src_intf.config(bw=bw)
print "{tx}->{ap} uplink bandwidth = {bw}Mbps".format(tx=node.name, ap=self.name, bw=bw)
for node in self.active_rx_ues:
dst_intf = node.tsa_switch.connectionsTo(self.switch)[0][1]
bw = node.assigned_rx_bw / 1000000.0
dst_intf.config(bw=bw)
print "{tx}->{ap} downlink bandwidth = {bw}Mbps".format(tx=node.name, ap=self.name, bw=bw)
def __repr__(self):
return "ap name: {name} \n" \
"\tactive_tx ues: {tx_ues}" \
"\tactive_rx_ues: {rx_ues}".format(name=self.name, tx_ues=self.active_tx_ues,
rx_ues=self.active_rx_ues)
class TsaNet(Mininet):
def __init__(self, *args, **kwargs):
super(TsaNet, self).__init__(*args, **kwargs)
self.tsa_hosts = []
self.tsa_aps = []
def add_ap(self, ap_name, pos, phy_model, **kwargs):
ap = TsaAp(self, ap_name, pos, phy_model, **kwargs)
self.tsa_aps.append(ap)
return ap
def add_tsa_host(self, host_name, pos, phy_model, **kwargs):
tsa_host = TsaHost(self, host_name, pos, phy_model, **kwargs)
self.tsa_hosts.append(tsa_host)
return tsa_host
|
cgiraldo/tsa-mininet
|
tsanet.py
|
Python
|
apache-2.0
| 6,658
|
#!/usr/bin/env python
"""
Hostname Release
================
This is a simple rule and can be run against the local host
using the following command::
$ insights-run -p examples.rules.hostname_rel
or from the examples/rules directory::
$ ./hostname_rel.py
"""
from insights.core.plugins import make_fail, make_pass, rule
from insights.parsers.hostname import Hostname
from insights.parsers.redhat_release import RedhatRelease
ERROR_KEY_1 = "RELEASE_IS_RHEL"
ERROR_KEY_2 = "RELEASE_IS_NOT_RECOGNIZED"
ERROR_KEY_3 = "RELEASE_CANNOT_BE_DETERMINED"
CONTENT = {
ERROR_KEY_1: "This release is RHEL\nHostname: {{ hostname }}\nRelease: {{ release }}",
ERROR_KEY_2: "This release is not RHEL\nHostname: {{ hostname }}\nRelease: {{ release }}",
ERROR_KEY_3: "This release is not RHEL\nHostname: {{ hostname }}\nRelease: not present"
}
@rule(Hostname, [RedhatRelease])
def report(hostname, release):
if release and release.is_rhel:
return make_pass(ERROR_KEY_1,
hostname=hostname.fqdn,
release=release.version)
elif release:
return make_fail(ERROR_KEY_2,
hostname=hostname.fqdn,
release=release.raw)
else:
return make_fail(ERROR_KEY_3, hostname=hostname.fqdn)
if __name__ == "__main__":
from insights import run
run(report, print_summary=True)
|
RedHatInsights/insights-core
|
examples/rules/hostname_rel.py
|
Python
|
apache-2.0
| 1,406
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Helper functions to generate resource labels strings for GCP entitites
These can be used on MonitoringInfo 'resource' labels.
See example entities:
https://s.apache.org/beam-gcp-debuggability
For GCP entities, populate the RESOURCE label with the aip.dev/122 format:
https://google.aip.dev/122
If an official GCP format does not exist, try to use the following format.
//whatever.googleapis.com/parents/{parentId}/whatevers/{whateverId}
"""
def BigQueryTable(project_id, dataset_id, table_id):
return '//bigquery.googleapis.com/projects/%s/datasets/%s/tables/%s' % (
project_id, dataset_id, table_id)
|
axbaretto/beam
|
sdks/python/apache_beam/io/gcp/resource_identifiers.py
|
Python
|
apache-2.0
| 1,411
|
import os
import errno
import time
import json
import yaml
import boto3
import dulwich
import shutil
import giturlparse
from cStringIO import StringIO
from unidiff import PatchSet
from dulwich import porcelain
from dulwich.contrib.paramiko_vendor import ParamikoSSHVendor
from botocore.exceptions import ClientError
REGION = None
DRYRUN = None
GIT_REPO = None
SSH_KEY_PATH = None
SYSTEM_PARAM_PREFIX = None
PARAM_PREFIX = None
SNS_TOPIC_ARN = None
PATH_TO_REPO = "/tmp/repo"
## used to consturct URL for a git commit
GIT_COMMIT_URL = None
def initialize():
global REGION
global DRYRUN
global GIT_REPO
global SSH_KEY_PATH
global SYSTEM_PARAM_PREFIX
global PARAM_PREFIX
global SNS_TOPIC_ARN
global GIT_COMMIT_URL
PARAM_PREFIX = os.environ.get("PARAM_PREFIX")
SNS_TOPIC_ARN = os.environ.get("SNS_TOPIC_ARN", None)
GIT_REPO = os.environ.get("GIT_REPO")
SYSTEM_PARAM_PREFIX = os.environ.get("SYSTEM_PARAM_PREFIX")
REGION = os.environ.get('REGION', "None")
DRYRUN = os.environ.get('DRYRUN', "true").lower()
SSH_KEY_PATH = os.environ.get("SSH_KEY_PATH", None)
if DRYRUN == "false":
DRYRUN = False
else:
DRYRUN = True
git_url = giturlparse.parse(GIT_REPO)
if git_url.resource == 'github.com' or git_url.resource == 'gitlab.com':
GIT_COMMIT_URL = "https://{}/{}/{}/commit".format(git_url.resource, git_url.owner, git_url.name)
elif git_url.resource == 'bitbucket.org':
GIT_COMMIT_URL = "https://{}/{}/{}/commits".format(git_url.resource, git_url.owner, git_url.name)
## cleanup repo if it exist by some reason
shutil.rmtree(PATH_TO_REPO, ignore_errors=True)
# if SSH_KEY_PATH is not set, then trying to read one from
# ec2 param "SYSTEM_PARAM_PREFIX/ssh-key"
def set_up_ssh_key(ssm):
global SSH_KEY_PATH
if SSH_KEY_PATH is None:
SSH_KEY_PATH = '/tmp/id_rsa'
response = ssm.get_parameter(
Name=os.path.join(SYSTEM_PARAM_PREFIX, "ssh-key"),
WithDecryption=True,
)
if 'Parameter' in response and 'Value' in response['Parameter']:
with open(SSH_KEY_PATH, "w") as text_file:
text_file.write(response['Parameter']['Value'])
def clone_or_pull_repo(git_repo, path_to_repo):
dulwich.client.get_ssh_vendor = KeyParamikoSSHVendor
try:
# clonning git repo
repo = dulwich.porcelain.clone(git_repo, path_to_repo)
except OSError as e:
if e.errno == errno.EEXIST:
repo = dulwich.porcelain.open_repo(path_to_repo)
# pulling changes for existing repo
dulwich.porcelain.pull(repo, git_repo)
else:
raise e
return repo
# list existing parameters in the ec2 param store by given prefix
## this method currently is not used
def get_existing_parameters(ssm, prefix):
parameters = []
is_in = True
req = {'Filters': [{'Key': 'Name', 'Values': [prefix]}], 'MaxResults': 50}
while is_in:
start_time = time.time()
response = ssm.describe_parameters(**req)
if 'Parameters' in response:
parameters += response['Parameters']
if 'NextToken' in response:
req['NextToken'] = response['NextToken']
is_in = 'NextToken' in response and response['NextToken']
print("ExistingParams iteration time", time.time() - start_time)
return parameters
# get latest commit info for
# * repo - when f=None
# * file - when f=[file]
def get_latest_commit(repo, f=None):
w = repo.get_walker(paths=f, max_entries=1)
try:
c = iter(w).next().commit
except StopIteration:
print("No file {} anywhere in history.".format(f))
else:
return c
# Check difference between 2 commits and return
# lists of added_files, modified_files and removed_files
def diff_revisions(repo, commit1, commit2):
print("Comparing commits {} and {}".format(commit1, commit2))
diff_stream= StringIO()
porcelain.diff_tree(repo, repo[commit1.encode('ascii')].tree,repo[commit2.encode('ascii')].tree, outstream=diff_stream)
patch = PatchSet.from_string(diff_stream.getvalue())
diff_stream.close()
# geting added/modified file name from the diff, by getting "target_file" and stripping "a/" prefix
# (source file name will be /dev/null)
added_files = [f.target_file[2:] for f in patch.added_files]
modified_files = [f.target_file[2:] for f in patch.modified_files]
# geting removed files names from the diff, by getting "source_file" and stripping "b/" prefix
# (target file name will be /dev/null)
removed_files = [f.source_file[2:] for f in patch.removed_files]
return added_files, modified_files, removed_files
# list all files in the directory
# excluding some dirs and files like:
# .git, .gitingore, etc
def list_dir(path):
files = []
for dirname, dirnames, filenames in os.walk(path):
if '.git' in dirnames:
# don't go into any .git directories.
dirnames.remove('.git')
if '.gitignore' in filenames:
filenames.remove('.gitignore')
elif 'README.md' in filenames:
filenames.remove('README.md')
# print path to all filenames.
for filename in filenames:
files.append(os.path.join(dirname, filename))
print("Found next files:")
for f in files: print(f)
print
return files
def validate_format(file, filecontent):
name, ext = os.path.splitext(file)
if ext == ".json":
try:
json.loads(filecontent)
except ValueError as exc:
return "JSON format problem: {}".format(str(exc))
return None
elif ext == ".yml" or ext == ".yaml":
try:
yaml.load(filecontent)
except yaml.YAMLError as exc:
return "YAML format problem: {}".format(str(exc))
return None
# find latest revision of the file and upload
# it to the ec2 parameters
# with:
# Name - filepath
# Value - file content
# Description - latest commit id
def upload_as_parameters(ssm, repo, files):
uploaded = []
failed = []
for f in files:
start_time = time.time()
# Param config name should start with the "/"
params_file = os.path.join(PARAM_PREFIX, f)
# getting latest commit for specified file
c = get_latest_commit(repo, f=[f])
# Update param only if its Description differs from latest commit
if not DRYRUN:
update_msg = {"Key":params_file, "Commit":c.id, "Author":c.author, "Time":time.ctime(c.author_time), "Message": c.message}
update_msg["KeyURL"] = "https://console.aws.amazon.com/ec2/v2/home?region={}#Parameters:Name=[Equals]{}".format(REGION, params_file)
if GIT_COMMIT_URL is not None:
update_msg["CommitURL"] = "/".join([GIT_COMMIT_URL, c.id])
# reading content of the file
with open(os.path.join(repo.path,f), 'r') as myfile:
data = myfile.read()
err = validate_format(f, data)
if err is not None:
update_msg['Error'] = err
failed.append(update_msg)
print("ERROR: Problem validating file format. File: {}. Details: {}".format(params_file, err))
continue
print("Updating param {}".format(params_file))
try:
response = ssm.put_parameter(
Name=params_file,
Description=c.id,
Value=data,
Type='SecureString',
# KeyId='string',
Overwrite=True,
# AllowedPattern='string'
)
uploaded.append(update_msg)
except Exception as e:
update_msg['Error'] = "Upload problem: {}".format(e)
failed.append(update_msg)
print("ERROR: Couldn't update param {}. Details: {}".format(params_file, e))
else:
print("Skipping param update for {}".format(params_file))
print("Upload iteration time", time.time() - start_time)
print
return uploaded, failed
# call to delete ec2 parameters
def delete_parameters(ssm, files):
if len(files) == 0:
return None, None
deleted_params = []
invaild_params = []
# getting filename from PatchFile object and converting
# to the array with ec2 params names PREFIX/file
params_files = [os.path.join(PARAM_PREFIX, f) for f in files]
if not DRYRUN:
try:
# deleting params by 10 in one go, because of API limitation
for params_files_chunk in chunks(params_files, 10):
response = ssm.delete_parameters(
Names=params_files_chunk
)
deleted_params.append(response['DeletedParameters'])
invaild_params.append(response['InvalidParameters'])
except Exception as e :
print("ERROR: deleting params: {}".format(e))
print("Deleting params: {}".format(params_files))
return deleted_params, invaild_params
else:
print("Skipping deletion for params: {}".format(params_files))
return None, None
# getting latest revision id from
# ec2 param "SYSTEM_PARAM_PREFIX/revision"
def get_latest_processed_revision(ssm):
try:
# geting latest processed commit id so we can run a diff
response = ssm.get_parameter(
Name=os.path.join(SYSTEM_PARAM_PREFIX, "revision"),
WithDecryption=True,
)
if 'Parameter' in response and 'Value' in response['Parameter']:
return response['Parameter']['Value']
except ClientError as e:
if e.response['Error']['Code'] != 'ParameterNotFound':
raise e
return None
# send SNS messages if function did some upload/removal
# TODO: send sns messages with errors if something went wrong
def send_sns_notification(msg):
print(msg)
if SNS_TOPIC_ARN is not None:
# checking if msg contain any data in it
if any(lst for v in msg.values() if isinstance(v, dict) for lst in v.values()):
sns = boto3.client('sns', region_name=REGION)
# Pushing message to SNS, which will be pushed to hipchat by other lambda function
sns.publish(
TargetArn=SNS_TOPIC_ARN,
Message=json.dumps({'default': json.dumps(msg)}),
MessageStructure='json',
)
# needed for specifying custom ssh key for paramiko ssh
class KeyParamikoSSHVendor(ParamikoSSHVendor):
def __init__(self):
self.ssh_kwargs = {'key_filename': SSH_KEY_PATH}
def lambda_handler(event, context):
# initializaing ENV variales
initialize()
# prepare object with the messages that going to
# be sent to the sns.
# setting "type" key, so we can easier identify message
# in the sns2slack lambda
msg = {'type': 'git2params'}
ssm = boto3.client('ssm', region_name=REGION)
# configuring ssh key for git client
set_up_ssh_key(ssm)
# clonning the git repository
repo = clone_or_pull_repo(GIT_REPO, PATH_TO_REPO)
# geting latest saved revision id from the param store
latest_processed_commit = get_latest_processed_revision(ssm)
# gate latest commit repo wide
latest_commit = get_latest_commit(repo)
if latest_processed_commit == latest_commit.id:
print("No new commits found. Exiting")
return {'statusCode': 200}
# if latest processed commit not found, then treat execution
# like first run (adding new keys and overwriting existing)
if latest_processed_commit is None:
# listing files in the git repository
files = list_dir(PATH_TO_REPO)
msg['added'] = {}
msg['added']['success'], msg['added']['errors'] = upload_as_parameters(
ssm,
repo,
[os.path.relpath(f, PATH_TO_REPO) for f in files]
)
else:
# getting diff of the current and latest processed revisions and:
# * uploading added files
# * uploading modified files
# * deleting removed files
added_files, modified_files, removed_files = diff_revisions(repo, latest_processed_commit, latest_commit.id)
if added_files:
msg['added'] = {}
msg['added']['success'], msg['added']['errors']= upload_as_parameters(
ssm,
repo,
added_files
)
if modified_files:
msg['modified'] = {}
msg['modified']['success'], msg['modified']['errors'] = upload_as_parameters(
ssm,
repo,
modified_files
)
if removed_files:
msg['removed'] = {}
msg['removed']['success'], msg['removed']['errors'] = delete_parameters(
ssm,
removed_files
)
if not DRYRUN:
# uploading latest revision id to the ec2parans
latest_revision_key = os.path.join(SYSTEM_PARAM_PREFIX, "revision")
print("saving latest revision {} to the key {}".format(latest_commit.id,latest_revision_key))
response = ssm.put_parameter(
Name=latest_revision_key,
Description="Latest pulled commit",
Value=latest_commit.id,
Type='SecureString',
# KeyId='string',
Overwrite=True,
# AllowedPattern='string'
)
#sending message to the sns
send_sns_notification(msg)
else:
print("Latest revision {}".format(latest_commit.id))
return {'statusCode': 200}
if __name__ == '__main__':
lambda_handler(None, None)
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
|
getsocial-rnd/git2params
|
handler.py
|
Python
|
apache-2.0
| 13,966
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for GetWebhook
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-dialogflowcx
# [START dialogflow_v3beta1_generated_Webhooks_GetWebhook_sync]
from google.cloud import dialogflowcx_v3beta1
def sample_get_webhook():
# Create a client
client = dialogflowcx_v3beta1.WebhooksClient()
# Initialize request argument(s)
request = dialogflowcx_v3beta1.GetWebhookRequest(
name="name_value",
)
# Make the request
response = client.get_webhook(request=request)
# Handle the response
print(response)
# [END dialogflow_v3beta1_generated_Webhooks_GetWebhook_sync]
|
googleapis/python-dialogflow-cx
|
samples/generated_samples/dialogflow_v3beta1_generated_webhooks_get_webhook_sync.py
|
Python
|
apache-2.0
| 1,458
|
#!/usr/bin/env python
import os
import rpy2.robjects as robjects
import env
import glm
import utils
lus = ['cropland', 'pasture', 'primary', 'secondary', 'urban']
model_dir = utils.lui_model_dir()
for lu in lus:
print('%s:' % lu)
with open(os.path.join(model_dir, lu + '.py'), 'w') as ofile:
fname = "out/_d5ed9724c6cb2c78b59707f69b3044e6/%s.rds" % lu
models = robjects.r('models <- readRDS("%s")' % fname)
for mm in models.items():
print(" %s" % mm[0])
mod = glm.GLM(mm[1])
ofile.write(mod.to_py(mm[0]))
|
ricardog/raster-project
|
projections/tests/ctest.py
|
Python
|
apache-2.0
| 544
|
# Copyright 2015 The Shaderc Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import expect
from glslc_test_framework import inside_glslc_testsuite
from placeholder import FileShader
def simple_vertex_shader():
return """#version 310 es
void main() {
gl_Position = vec4(1., 2., 3., 4.);
}"""
def simple_hlsl_vertex_shader():
return """float4 EntryPoint() : SV_POSITION { return float4(1.0); } """
def simple_fragment_shader():
return """#version 310 es
void main() {
gl_FragDepth = 10.;
}"""
def simple_tessellation_control_shader():
return """#version 440 core
layout(vertices = 3) out;
void main() { }"""
def simple_tessellation_evaluation_shader():
return """#version 440 core
layout(triangles) in;
void main() { }"""
def simple_geometry_shader():
return """#version 150 core
layout (triangles) in;
layout (line_strip, max_vertices = 4) out;
void main() { }"""
def simple_compute_shader():
return """#version 310 es
void main() {
uvec3 temp = gl_WorkGroupID;
}"""
@inside_glslc_testsuite('OptionShaderStage')
class TestShaderStageWithGlslExtension(expect.ValidObjectFile):
"""Tests -fshader-stage with .glsl extension."""
shader = FileShader(simple_vertex_shader(), '.glsl')
glslc_args = ['-c', '-fshader-stage=vertex', shader]
@inside_glslc_testsuite('OptionShaderStage')
class TestShaderStageWithHlslExtension(expect.ValidObjectFile):
"""Tests -fshader-stage with .hlsl extension."""
shader = FileShader(simple_hlsl_vertex_shader(), '.hlsl')
glslc_args = ['-c', '-fshader-stage=vertex', shader]
@inside_glslc_testsuite('OptionShaderStage')
class TestShaderStageWithKnownExtension(expect.ValidObjectFile):
"""Tests -fshader-stage with known extension."""
shader = FileShader(simple_fragment_shader(), '.frag')
glslc_args = ['-c', '-fshader-stage=fragment', shader]
@inside_glslc_testsuite('OptionShaderStage')
class TestShaderStageWithUnknownExtension(expect.ValidObjectFile):
"""Tests -fshader-stage with unknown extension."""
shader = FileShader(simple_vertex_shader(), '.unknown')
glslc_args = ['-c', '-fshader-stage=vertex', shader]
@inside_glslc_testsuite('OptionShaderStage')
class TestShaderStageWithNoExtension(expect.ValidObjectFile):
"""Tests -fshader-stage with no extension."""
shader = FileShader(simple_vertex_shader(), '')
glslc_args = ['-c', '-fshader-stage=vertex', shader]
@inside_glslc_testsuite('OptionShaderStage')
class TestAllShaderStages(expect.ValidObjectFile):
"""Tests all possible -fshader-stage values."""
shader1 = FileShader(simple_vertex_shader(), '.glsl')
shader2 = FileShader(simple_fragment_shader(), '.glsl')
shader3 = FileShader(simple_tessellation_control_shader(), '.glsl')
shader4 = FileShader(simple_tessellation_evaluation_shader(), '.glsl')
shader5 = FileShader(simple_geometry_shader(), '.glsl')
shader6 = FileShader(simple_compute_shader(), '.glsl')
glslc_args = [
'-c',
'-fshader-stage=vertex', shader1,
'-fshader-stage=fragment', shader2,
'-fshader-stage=tesscontrol', shader3,
'-fshader-stage=tesseval', shader4,
'-fshader-stage=geometry', shader5,
'-fshader-stage=compute', shader6]
@inside_glslc_testsuite('OptionShaderStage')
class TestShaderStageOverwriteFileExtension(expect.ValidObjectFile):
"""Tests -fshader-stage has precedence over file extension."""
# a vertex shader camouflaged with .frag extension
shader = FileShader(simple_vertex_shader(), '.frag')
# Command line says it's vertex shader. Should compile successfully
# as a vertex shader.
glslc_args = ['-c', '-fshader-stage=vertex', shader]
@inside_glslc_testsuite('OptionShaderStage')
class TestShaderStageLatterOverwriteFormer(expect.ValidObjectFile):
"""Tests a latter -fshader-stage overwrite a former one."""
shader = FileShader(simple_vertex_shader(), '.glsl')
glslc_args = [
'-c', '-fshader-stage=fragment', '-fshader-stage=vertex', shader]
@inside_glslc_testsuite('OptionShaderStage')
class TestShaderStageWithMultipleFiles(expect.ValidObjectFile):
"""Tests -fshader-stage covers all subsequent files."""
shader1 = FileShader(simple_vertex_shader(), '.glsl')
# a vertex shader with .frag extension
shader2 = FileShader(simple_vertex_shader(), '.frag')
shader3 = FileShader(simple_vertex_shader(), '.a_vert_shader')
glslc_args = ['-c', '-fshader-stage=vertex', shader1, shader2, shader3]
@inside_glslc_testsuite('OptionShaderStage')
class TestShaderStageMultipleShaderStage(expect.ValidObjectFile):
"""Tests multiple -fshader-stage."""
shader1 = FileShader(simple_vertex_shader(), '.glsl')
shader2 = FileShader(simple_fragment_shader(), '.frag')
shader3 = FileShader(simple_vertex_shader(), '.a_vert_shader')
glslc_args = [
'-c',
'-fshader-stage=vertex', shader1,
'-fshader-stage=fragment', shader2,
'-fshader-stage=vertex', shader3]
@inside_glslc_testsuite('OptionShaderStage')
class TestFileExtensionBeforeShaderStage(expect.ValidObjectFile):
"""Tests that file extensions before -fshader-stage are not affected."""
# before -fshader-stage
shader1 = FileShader(simple_vertex_shader(), '.vert')
# after -fshader-stage
shader2 = FileShader(simple_fragment_shader(), '.frag')
shader3 = FileShader(simple_fragment_shader(), '.vert')
glslc_args = ['-c', shader1, '-fshader-stage=fragment', shader2, shader3]
@inside_glslc_testsuite('OptionShaderStage')
class TestShaderStageWrongShaderStageValue(expect.ErrorMessage):
"""Tests that wrong shader stage value results in an error."""
shader = FileShader(simple_vertex_shader(), '.glsl')
glslc_args = ['-c', '-fshader-stage=unknown', shader]
expected_error = ["glslc: error: stage not recognized: 'unknown'\n"]
@inside_glslc_testsuite('OptionShaderStage')
class TestShaderStageGlslExtensionMissingShaderStage(expect.ErrorMessage):
"""Tests that missing -fshader-stage for .glsl extension results in
an error."""
shader = FileShader(simple_vertex_shader(), '.glsl')
glslc_args = ['-c', shader]
expected_error = [
"glslc: error: '", shader,
"': .glsl file encountered but no -fshader-stage specified ahead\n"]
@inside_glslc_testsuite('OptionShaderStage')
class TestShaderStageHlslExtensionMissingShaderStage(expect.ErrorMessage):
"""Tests that missing -fshader-stage for .hlsl extension results in
an error."""
shader = FileShader(simple_hlsl_vertex_shader(), '.hlsl')
glslc_args = ['-c', '-x', 'hlsl', shader]
expected_error = [
"glslc: error: '", shader,
"': .hlsl file encountered but no -fshader-stage specified ahead\n"]
@inside_glslc_testsuite('OptionShaderStage')
class TestShaderStageUnknownExtensionMissingShaderStage(expect.ErrorMessage):
"""Tests that missing -fshader-stage for unknown extension results in
an error."""
shader = FileShader(simple_vertex_shader(), '.a_vert_shader')
glslc_args = ['-c', shader]
expected_error = [
"glslc: error: '", shader,
"': file not recognized: File format not recognized\n"]
|
fuchsia-mirror/third_party-shaderc
|
glslc/test/option_shader_stage.py
|
Python
|
apache-2.0
| 7,802
|
"""
Biothings API
Support running biothings.web as a module
>> python -m biothings.web
>> python -m biothings.web --dir=~/mygene.info/src
>> python -m biothings.web --dir=~/mygene.info/src --conf=config_web
>> python -m biothings.web --conf=biothings.web.settings.default
See more supported parameters in biothings.web.launcher.
"""
from biothings.web.launcher import main
main()
|
biothings/biothings.api
|
biothings/web/__main__.py
|
Python
|
apache-2.0
| 418
|
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
'''messaging based notification driver, with message envelopes'''
from oslo.config import cfg
from mtaaas.openstack.common import context as req_context
from mtaaas.openstack.common.gettextutils import _
from mtaaas.openstack.common import log as logging
from mtaaas.openstack.common import rpc
LOG = logging.getLogger(__name__)
notification_topic_opt = cfg.ListOpt(
'topics', default=['notifications', ],
help='AMQP topic(s) used for openstack notifications')
opt_group = cfg.OptGroup(name='rpc_notifier2',
title='Options for rpc_notifier2')
CONF = cfg.CONF
CONF.register_group(opt_group)
CONF.register_opt(notification_topic_opt, opt_group)
def notify(context, message):
"""Sends a notification via RPC"""
if not context:
context = req_context.get_admin_context()
priority = message.get('priority',
CONF.default_notification_level)
priority = priority.lower()
for topic in CONF.rpc_notifier2.topics:
topic = '%s.%s' % (topic, priority)
try:
rpc.notify(context, topic, message, envelope=True)
except Exception:
LOG.exception(_("Could not send notification to %(topic)s. "
"Payload=%(message)s"), locals())
|
townbull/mtaaas-openstack
|
mtaaas/openstack/common/notifier/rpc_notifier2.py
|
Python
|
apache-2.0
| 1,916
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.23
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V2beta1ContainerResourceMetricSource(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'container': 'str',
'name': 'str',
'target_average_utilization': 'int',
'target_average_value': 'str'
}
attribute_map = {
'container': 'container',
'name': 'name',
'target_average_utilization': 'targetAverageUtilization',
'target_average_value': 'targetAverageValue'
}
def __init__(self, container=None, name=None, target_average_utilization=None, target_average_value=None, local_vars_configuration=None): # noqa: E501
"""V2beta1ContainerResourceMetricSource - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._container = None
self._name = None
self._target_average_utilization = None
self._target_average_value = None
self.discriminator = None
self.container = container
self.name = name
if target_average_utilization is not None:
self.target_average_utilization = target_average_utilization
if target_average_value is not None:
self.target_average_value = target_average_value
@property
def container(self):
"""Gets the container of this V2beta1ContainerResourceMetricSource. # noqa: E501
container is the name of the container in the pods of the scaling target # noqa: E501
:return: The container of this V2beta1ContainerResourceMetricSource. # noqa: E501
:rtype: str
"""
return self._container
@container.setter
def container(self, container):
"""Sets the container of this V2beta1ContainerResourceMetricSource.
container is the name of the container in the pods of the scaling target # noqa: E501
:param container: The container of this V2beta1ContainerResourceMetricSource. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and container is None: # noqa: E501
raise ValueError("Invalid value for `container`, must not be `None`") # noqa: E501
self._container = container
@property
def name(self):
"""Gets the name of this V2beta1ContainerResourceMetricSource. # noqa: E501
name is the name of the resource in question. # noqa: E501
:return: The name of this V2beta1ContainerResourceMetricSource. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this V2beta1ContainerResourceMetricSource.
name is the name of the resource in question. # noqa: E501
:param name: The name of this V2beta1ContainerResourceMetricSource. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def target_average_utilization(self):
"""Gets the target_average_utilization of this V2beta1ContainerResourceMetricSource. # noqa: E501
targetAverageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. # noqa: E501
:return: The target_average_utilization of this V2beta1ContainerResourceMetricSource. # noqa: E501
:rtype: int
"""
return self._target_average_utilization
@target_average_utilization.setter
def target_average_utilization(self, target_average_utilization):
"""Sets the target_average_utilization of this V2beta1ContainerResourceMetricSource.
targetAverageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. # noqa: E501
:param target_average_utilization: The target_average_utilization of this V2beta1ContainerResourceMetricSource. # noqa: E501
:type: int
"""
self._target_average_utilization = target_average_utilization
@property
def target_average_value(self):
"""Gets the target_average_value of this V2beta1ContainerResourceMetricSource. # noqa: E501
targetAverageValue is the target value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type. # noqa: E501
:return: The target_average_value of this V2beta1ContainerResourceMetricSource. # noqa: E501
:rtype: str
"""
return self._target_average_value
@target_average_value.setter
def target_average_value(self, target_average_value):
"""Sets the target_average_value of this V2beta1ContainerResourceMetricSource.
targetAverageValue is the target value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type. # noqa: E501
:param target_average_value: The target_average_value of this V2beta1ContainerResourceMetricSource. # noqa: E501
:type: str
"""
self._target_average_value = target_average_value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V2beta1ContainerResourceMetricSource):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V2beta1ContainerResourceMetricSource):
return True
return self.to_dict() != other.to_dict()
|
kubernetes-client/python
|
kubernetes/client/models/v2beta1_container_resource_metric_source.py
|
Python
|
apache-2.0
| 7,856
|
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ryu import cfg
from ryu.base import app_manager
from ryu.lib import hub
from ryu.lib.xflow import sflow
opts = [cfg.StrOpt('address', default='0.0.0.0',
help='sFlow Collector bind address'),
cfg.IntOpt('port', default=6343,
help='sFlow Collector port'),
cfg.IntOpt('max_udp_msg_size', default=1472,
help='Maximum size of UDP messages')]
cfg.CONF.register_opts(opts, 'plow')
class SFlow(app_manager.RyuApp):
def __init__(self, *args, **kwargs):
super(SFlow, self).__init__(*args, **kwargs)
self._address = self.CONF.plow.address
self._port = self.CONF.plow.port
self._udp_msg_size = self.CONF.plow.max_udp_msg_size
self._udp_sock = None
def _handle(self, buf, addr):
packet = sflow.sFlow.parser(buf)
if not packet:
return
print packet.__dict__
def _recv_loop(self):
self.logger.info('Listening on %s:%s for sflow agents' %
(self._address, self._port))
while True:
buf, addr = self._udp_sock.recvfrom(self._udp_msg_size)
t = hub.spawn(self._handle, buf, addr)
self.threads.append(t)
def start(self):
self._udp_sock = hub.socket.socket(hub.socket.AF_INET,
hub.socket.SOCK_DGRAM)
self._udp_sock.bind((self._address, self._port))
t = hub.spawn(self._recv_loop)
super(SFlow, self).start()
return t
|
jkoelker/plow
|
plow/manager.py
|
Python
|
apache-2.0
| 2,106
|
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generic testing for the orchestrator."""
import threading
import time
import drydock_provisioner.orchestrator.orchestrator as orch
import drydock_provisioner.objects.fields as hd_fields
class TestClass(object):
def test_task_complete(self, deckhand_ingester, input_files, setup,
blank_state, mock_get_build_data):
input_file = input_files.join("deckhand_fullsite.yaml")
design_ref = "file://%s" % str(input_file)
orchestrator = orch.Orchestrator(
state_manager=blank_state, ingester=deckhand_ingester)
orch_task = orchestrator.create_task(
action=hd_fields.OrchestratorAction.Noop, design_ref=design_ref)
orch_task.set_status(hd_fields.TaskStatus.Queued)
orch_task.save()
orch_thread = threading.Thread(target=orchestrator.watch_for_tasks)
orch_thread.start()
try:
time.sleep(10)
orch_task = blank_state.get_task(orch_task.get_id())
assert orch_task.get_status() == hd_fields.TaskStatus.Complete
finally:
orchestrator.stop_orchestrator()
orch_thread.join(10)
def test_task_termination(self, input_files, deckhand_ingester, setup,
blank_state):
input_file = input_files.join("deckhand_fullsite.yaml")
design_ref = "file://%s" % str(input_file)
orchestrator = orch.Orchestrator(
state_manager=blank_state, ingester=deckhand_ingester)
orch_task = orchestrator.create_task(
action=hd_fields.OrchestratorAction.Noop, design_ref=design_ref)
orch_task.set_status(hd_fields.TaskStatus.Queued)
orch_task.save()
orch_thread = threading.Thread(target=orchestrator.watch_for_tasks)
orch_thread.start()
try:
time.sleep(2)
orchestrator.terminate_task(orch_task)
time.sleep(10)
orch_task = blank_state.get_task(orch_task.get_id())
assert orch_task.get_status() == hd_fields.TaskStatus.Terminated
finally:
orchestrator.stop_orchestrator()
orch_thread.join(10)
|
att-comdev/drydock
|
tests/integration/postgres/test_orch_generic.py
|
Python
|
apache-2.0
| 2,797
|
from django.http import HttpResponse
from django.core.exceptions import PermissionDenied
from django.dispatch import receiver
from django.shortcuts import render, redirect
from django.contrib.auth.decorators import login_required
from django.views.decorators.http import require_POST
from gui.decorators import ajax_required
from gui.vm.utils import get_vm
from gui.vm.replication.forms import ServerReplicaForm
from gui.signals import view_vm_details
from gui.utils import context_list_append
@login_required
@ajax_required
@require_POST
def replication_form(request, hostname):
"""
Ajax page for managing server replication.
"""
if not request.user.is_admin(request): # can_edit permission
raise PermissionDenied
vm = get_vm(request, hostname)
if vm.slave_vms:
slave_vm = vm.slave_vm.select_related('master_vm', 'vm', 'vm__node').exclude(name='').first()
else:
slave_vm = None
form = ServerReplicaForm(request, vm, slave_vm, request.POST, prefix='rep')
if form.is_valid():
status = form.save(args=(vm.hostname, form.cleaned_data['repname']))
if status == 205:
# The replica configuration has changed in DB, but does not affect the VM on compute node
return redirect('vm_details', hostname=vm.hostname)
elif 200 <= status < 400:
return HttpResponse(None, status=204) # Just hide the modal (socket.io callbacks will do the job)
return render(request, 'replication/vm_details_replica_form.html', {'form': form, 'vm': vm})
# noinspection PyUnusedLocal
@receiver(view_vm_details)
def vm_details(sender, request, context, **kwargs):
dc_settings = request.dc.settings
context['replication_enabled'] = dc_settings.VMS_VM_REPLICATION_ENABLED
if dc_settings.VMS_VM_REPLICATION_ENABLED and context.get('can_edit'):
context['replicaform'] = ServerReplicaForm(request, context['vm'], context['slave_vm'], prefix='rep',
vm_nodes=context['settingsform'].vm_nodes,
initial={'repname': 'replica1', 'sleep_time': 60,
'enabled': True, 'reserve_resources':
dc_settings.VMS_VM_REPLICA_RESERVATION_DEFAULT})
context_list_append(context, 'include_modals', 'replication/vm_details_modal.html')
context_list_append(context, 'include_details', 'replication/vm_details_status_row.html')
context_list_append(context, 'include_buttons', 'replication/vm_details_button.html')
|
erigones/esdc-ce
|
gui/vm/replication/views.py
|
Python
|
apache-2.0
| 2,656
|
# Copyright (c) 2016-2019 Chris Reed
#
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import string
import functools
from collections import (defaultdict, namedtuple)
from .bitstring import bitstring
from .utilities import (bytes_to_le16, hamming_weight)
from .formatter import Formatter
##
# @brief Base class for a decoded instruction.
class Instruction(object):
def __init__(self, mnemonic, word, is32bit):
self._mnemonic = mnemonic
self._word = word
self._is32bit = is32bit
self._address = 0
self.operands = []
@property
def mnemonic(self):
return self._mnemonic
@property
def size(self):
return 4 if self._is32bit else 2
@property
def address(self):
return self._address
@address.setter
def address(self, value):
self._address = value
@property
def bytes(self):
return bytearray((self._word >> (8 * i)) & 0xff for i in range(self.size))
def _eval(self, cpu):
cpu.pc += self.size
def execute(self, cpu):
return self._eval(cpu)
def __repr__(self):
i = (" %08x" if self._is32bit else " %04x") % self._word
return "<Instruction@0x%x %s %s>" % (id(self), self._mnemonic, i)
# A node of the decoder tree pairs a mask with a dictionary of child nodes. The dict
# keys are the unique values for the mask. If the mask value is 0, then the node is a
# leaf node and there is only one child.
DecoderTreeNode = namedtuple('DecoderTreeNode', 'mask children')
##
# @brief Exception raised when an instruction cannot be decoded successfully.
class UndefinedInstructionError(Exception):
pass
##
# @brief Selected decoder doesn't match, move on.
class DecodeError(Exception):
pass
##
# @brief Effects of an instruction encoding are unpredictable.
class UnpredictableError(Exception):
pass
##
# @brief Interface for decoding instruction byte sequences.
#
# Tree-based instruction decoding algorithm borrowed from Amoco project by Axel Tillequin
# (bdcht3@gmail.com) and re-written.
class DecoderTree(object):
_32bitMask = 0xf800
_32bitPrefixes = [0xf800, 0xf000, 0xe800]
def __init__(self):
self._decoders16 = []
self._decoders32 = []
self._tree16 = None
self._tree32 = None
def add_decoder(self, decoder):
if decoder.is32bit:
self._decoders32.append(decoder)
else:
self._decoders16.append(decoder)
def build(self):
self._tree16 = self._build_tree(self._decoders16)
self._tree32 = self._build_tree(self._decoders32)
def decode(self, data, dataAddress=0):
# Figure out if this is a 16-bit or 32-bit instruction and select the
# appropriate decoder tree.
assert len(data) >= 2
hw1 = bytes_to_le16(data)
is32bit = hw1 & self._32bitMask in self._32bitPrefixes
if is32bit:
if len(data) < 4:
raise UndefinedInstructionError()
hw2 = bytes_to_le16(data, 2)
word = hw1 | (hw2 << 16)
node = self._tree32
else:
word = hw1
node = self._tree16
while True:
if node.mask:
try:
node = node.children[word & node.mask]
except KeyError:
# Couldn't find a matching instruction.
raise UndefinedInstructionError()
else:
for d in node.children:
try:
if d.check(word):
return d.decode(word, address=dataAddress)
except DecodeError:
continue
# None of the decoders matched.
raise UndefinedInstructionError()
def _build_tree(self, decoders):
# Sort decoders in descending order of number of bits set in the mask.
# This sorting is required for proper computation of the common mask.
decoders = sorted(decoders, key=lambda d:hamming_weight(d._mask), reverse=True)
# If there is only one decoder at this level, there is nothing left to do.
if len(decoders) < 2:
return DecoderTreeNode(mask=0, children=decoders)
# Compute the mask of common bits that all decoders at this level have set.
commonMask = functools.reduce(lambda a, b: a & b, [d._mask for d in decoders])
if commonMask == 0:
return DecoderTreeNode(mask=commonMask, children=decoders)
# Find all decoders that have the same match values masked by the common mask.
children = defaultdict(list)
for decoder in decoders:
children[decoder._match & commonMask].append(decoder)
# If there is only one element in the children dict, then all decoders at this
# level have the same value under the common mask.
if len(children) == 1:
return DecoderTreeNode(mask=0, children=list(children.values())[0])
# Recursively process each group of children with the same match value at this level.
for k, subdecoders in children.items():
children[k] = self._build_tree(subdecoders)
return DecoderTreeNode(mask=commonMask, children=children)
def dump(self, t=None, depth=0):
if t is None:
print("16-bit instructions:")
self.dump(self._tree16)
print("32-bit instructions:")
self.dump(self._tree32)
else:
mask, nodes = t.mask, t.children
print(" " * depth, hex(mask), "=>")
if type(nodes) is list:
for i,d in enumerate(nodes):
print(" " * (depth + 1), i, ":", d)
else:
for i,k in enumerate(nodes.iterkeys()):
print(" " * (depth + 1), i, ":", hex(k))
self.dump(nodes[k], depth+2)
DECODER_TREE = DecoderTree()
##
# @brief
class Decoder(object):
def __init__(self, handler, mnemonic, klass, spec, spec2=None, **kwargs):
self._handler = handler
self._mnemonic = mnemonic
self._klass = klass
self.spec = spec
self.spec2 = spec2
self.args = kwargs
fmt = parse_spec(self.spec)
fmt.reverse()
self._mask, self._match, self._attrs = self.process_fmt(fmt)
if self.spec2 is not None:
fmt2 = parse_spec(self.spec2)
fmt2.reverse()
mask2, match2, attrs2 = self.process_fmt(fmt2, offset=16)
self._mask |= mask2
self._match |= match2
self._attrs.update(attrs2)
self.is32bit = True
else:
self.is32bit = False
def check(self, word):
return (word & self._mask) == self._match
def decode(self, word, address=0):
# Read bitfields from the instruction.
attrs = {}
for n,f in self._attrs.items():
attrs[n] = f(word)
# Create instruction object.
i = self._klass(self._mnemonic, word, self.is32bit)
i.address = address
for k, v in self.args.items():
setattr(i, k, v)
# Call handler to further decode instruction.
self._handler(i, **attrs)
return i
def __repr__(self):
return "<Decoder@0x%x %s %x/%x %s>" % (id(self), self._mnemonic, self._mask, self._match, self._attrs.keys())
def process_fmt(self, fmt, offset=0):
i = 0
mask = 0
match = 0
d = {}
for f in fmt:
if f in (0, 1):
# Update mask and match values with fixed bit.
mask |= 1 << i+offset
match |= f << i+offset
i += 1
elif type(f) is tuple:
name, value = f
if isinstance(value, bitstring):
mask |= value.mask << i+offset
match |= value.unsigned << i+offset
size = value.width
else:
size = value
# Put a lambda to extract this named field from the instruction word into the d dict.
d[name] = lambda b,i=i+offset,size=size: bitstring(b >> i, size)
i += size
else:
raise ValueError("unexpected format element in spec: %s" % f)
assert i == 16, "format was not exactly 16 bits (was %d)" % i
return mask, match, d
##
# @brief Decorator to build Decoder object from instruction format strings.
def instr(mnemonic, klass, spec, spec2=None, **kwargs):
def doit(fn):
DECODER_TREE.add_decoder(Decoder(fn, mnemonic, klass, spec, spec2, **kwargs))
return fn
return doit
# Grammar:
#
# start => field*
#
# field => bit | value
#
# bit => '0' | '1'
#
# value => ident ( '(' intlit ')' )?
# => ident '=' bits
#
# bits => bit+ ; terminates on first non-bit char
#
# ident => /[a-zA-Z][a-zA-Z0-9]*/
#
# intlit => /[0-9]+/
#
def parse_spec(spec):
result = []
i = 0
state = 0
ident = ''
bitcount = ''
bits = ''
expectingBitcount = False
while i < len(spec):
c = spec[i]
# Default state.
if state == 0:
if c in ('0', '1'):
if ident:
result.append((ident, 1))
ident = ''
result.append(int(c))
expectingBitcount = False
elif c in string.ascii_letters:
if ident:
result.append((ident, 1))
ident = ''
ident = c
state = 1
elif c == '(' and expectingBitcount:
state = 2
elif c in string.whitespace:
pass
else:
raise ValueError("unexpected character '%s' at position %d" % (c, i))
# Ident state.
elif state == 1:
if c == '(':
state = 2
elif c == '=':
bits = ''
state = 5
elif c not in string.ascii_letters + string.digits:
# Switch to default state and back up.
state = 0
i -= 1
expectingBitcount = True
else:
ident += c
# Enter bitcount state.
elif state == 2:
if c in string.digits:
bitcount = c
state = 3
elif c == ')':
bitcount = '1'
state = 0
elif c in string.whitespace:
pass
else:
raise ValueError("unexpected character '%s' at position %d" % (c, i))
# Bitcount state.
elif state == 3:
if c == ')':
result.append((ident, int(bitcount)))
ident = ''
bitcount = ''
state = 0
elif c not in string.digits:
state = 4
else:
bitcount += c
# Close bitcount state.
elif state == 4:
if c == ')':
result.append((ident, int(bitcount)))
ident = ''
bitcount = ''
state = 0
elif c in string.whitespace:
pass
else:
raise ValueError("unexpected character '%s' at position %d" % (c, i))
# Fixed value state.
elif state == 5:
if c in ('0', '1'):
bits += c
else:
result.append((ident, bitstring(bits)))
bits = ''
ident = ''
state = 0
i += 1
if ident:
if bits:
result.append((ident, bitstring(bits)))
else:
if not bitcount:
bitcount = '1'
result.append((ident, int(bitcount)))
return result
|
flit/cmdis
|
cmdis/decoder.py
|
Python
|
apache-2.0
| 12,607
|
# Copyright 2013 - Noorul Islam K M
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from zun.api import hooks
# Pecan Application Configurations
app = {
'root': 'zun.api.controllers.root.RootController',
'modules': ['zun'],
'hooks': [
hooks.ContextHook(),
hooks.NoExceptionTracebackHook(),
hooks.RPCHook(),
],
'debug': False,
}
|
kevin-zhaoshuai/zun
|
zun/api/config.py
|
Python
|
apache-2.0
| 873
|
# -*- coding: utf-8 -*-
"""
Created on Mar 13, 2012
@author: moloch
Copyright 2012 Root the Box
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
----------------------------------------------------------------------------
This file contains code for managing user accounts
"""
# pylint: disable=no-member
try:
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
try:
import urllib.request as urlrequest
except ImportError:
import urllib2 as urlrequest
import logging
import tornado
import json
from models.Theme import Theme
from models.User import User
from libs.EventManager import EventManager
from libs.ValidationError import ValidationError
from libs.SecurityDecorators import authenticated
from libs.XSSImageCheck import IMG_FORMATS
from builtins import str
from .BaseHandlers import BaseHandler
from tornado.options import options
RECAPTCHA_URL = "https://www.google.com/recaptcha/api/siteverify"
class HomeHandler(BaseHandler):
"""Allow for public view of user page if scoreboard set to public"""
def get(self, *args, **kwargs):
""" Display the default user page """
user = self.get_current_user()
if user:
admin = user.is_admin()
else:
admin = False
uuid = self.get_argument("id", None)
display_user = User.by_uuid(uuid)
visitor = False
if not user and (options.scoreboard_visibility != "public" or not display_user):
self.redirect("/login")
return
elif display_user and (not user or display_user != user):
user = display_user
visitor = True
if not user:
self.redirect("/login")
return
if uuid is None and user.is_admin():
self.timer()
self.render("admin/home.html", user=user)
else:
game_started = self.application.settings["game_started"] or user.is_admin()
self.render(
"user/home.html", user=user, game_started=game_started, visitor=visitor
)
class SettingsHandler(BaseHandler):
""" Modify user controlled attributes """
@authenticated
def get(self, *args, **kwargs):
""" Display the user settings """
self.render_page()
@authenticated
def post(self, *args, **kwargs):
""" Calls function based on parameter """
post_functions = {
"user_avatar": self.post_avatar,
"team_avatar": self.post_team_avatar,
"password": self.post_password,
"bank_password": self.post_bankpassword,
"theme": self.post_theme,
"motto": self.post_motto,
"email": self.post_email,
}
if len(args) == 1 and args[0] in post_functions:
post_functions[args[0]]()
else:
self.render_page()
def render_page(self, errors=[], success=[]):
""" Small wrap for self.render to cut down on lengthy params """
user = self.get_current_user()
self.add_content_policy("script", "'unsafe-eval'")
current_theme = Theme.by_id(self.session["theme_id"])
self.add_content_policy("script", "www.google.com")
self.add_content_policy("img", "www.google.com")
self.render(
"user/settings.html",
errors=errors,
success=success,
current_theme=current_theme,
user=user,
)
def post_avatar(self, *args, **kwargs):
"""
Saves avatar - Reads file header an only allows approved formats
"""
user = self.get_current_user()
if self.get_argument("user_avatar_select", None):
avatar = self.get_argument("user_avatar_select", "")
if avatar.lower().endswith(tuple(IMG_FORMATS)):
user._avatar = avatar
self.dbsession.add(user)
self.dbsession.commit()
self.render_page(success=["Updated avatar"])
elif hasattr(self.request, "files") and "user_avatar" in self.request.files:
try:
user.avatar = self.request.files["user_avatar"][0]["body"]
self.dbsession.add(user)
self.dbsession.commit()
self.render_page(success=["Updated avatar"])
except ValidationError as error:
self.render_page(errors=[str(error)])
else:
self.render_page(errors=["Please provide an image"])
def post_team_avatar(self, *args, **kwargs):
"""
Saves team avatar - Reads file header an only allows approved formats
"""
user = self.get_current_user()
if not user.team:
self.render_page(errors=["Not assigned to a team"])
elif self.get_argument("team_avatar_select", None):
avatar = self.get_argument("team_avatar_select", "")
if avatar.lower().endswith(tuple(IMG_FORMATS)):
user.team._avatar = avatar
self.dbsession.add(user)
self.dbsession.commit()
if self.config.teams:
self.render_page(success=["Updated team avatar"])
else:
self.render_page(success=["Updated avatar"])
elif hasattr(self.request, "files") and "team_avatar" in self.request.files:
try:
if user.team is None:
self.render_page(errors=["You do not belong to a team!"])
else:
user.team.avatar = self.request.files["team_avatar"][0]["body"]
self.dbsession.add(user)
self.dbsession.commit()
if self.config.teams:
self.render_page(success=["Updated team avatar"])
else:
self.render_page(success=["Updated avatar"])
except ValidationError as error:
self.render_page(errors=[str(error)])
else:
self.render_page(errors=["Please provide an image"])
def post_theme(self, *args, **kwargs):
""" Change per-user theme """
if not options.allow_user_to_change_theme:
self.render_page(errors=["Users are not allowed to change themes"])
return
theme = Theme.by_uuid(self.get_argument("theme_uuid", ""))
if theme is not None:
self.session["theme_id"] = theme.id
self.session["theme"] = [str(f) for f in theme.files]
self.session.save()
user = self.get_current_user()
user.theme_id = theme.id
self.dbsession.add(user)
self.dbsession.commit()
self.render_page()
else:
self.render_page(errors=["Theme does not exist."])
def post_motto(self, *args, **kwargs):
""" Change team motto """
user = self.get_current_user()
if not user.team:
self.render_page(errors=["Not assigned to a team"])
else:
user.team.motto = self.get_argument("motto", "")
self.dbsession.add(user)
self.dbsession.commit()
self.render_page(success=["Successfully updated Motto."])
def post_email(self, *args, **kwargs):
""" Change user email """
user = self.get_current_user()
user.email = self.get_argument("email", "")
self.dbsession.add(user)
self.dbsession.commit()
self.render_page(success=["Successfully updated email address."])
def post_password(self, *args, **kwargs):
""" Called on POST request for password change """
self.set_password(
self.get_current_user(),
self.get_argument("old_password", ""),
self.get_argument("new_password", ""),
self.get_argument("new_password2", ""),
)
def set_password(self, user, old_password, new_password, new_password2):
""" Sets a users password """
if user.validate_password(old_password):
if new_password == new_password2:
if (
len(new_password) >= options.min_user_password_length
or self.config.debug
):
user.password = new_password
self.dbsession.add(user)
self.dbsession.commit()
self.render_page(success=["Successfully updated password"])
else:
self.render_page(
errors=[
"Password must be at least %d characters "
% (options.min_user_password_length,)
]
)
else:
self.render_page(errors=["New password's didn't match"])
else:
self.render_page(errors=["Invalid old password"])
def post_bankpassword(self):
""" Update user's bank password """
old_bankpw = self.get_argument("old_bpassword", "")
user = self.get_current_user()
if user.validate_bank_password(old_bankpw):
if self.config.use_recaptcha:
self.verify_recaptcha()
else:
self.set_bankpassword()
else:
self.render_page(errors=["Invalid old password."])
def set_bankpassword(self):
user = self.get_current_user()
new_bankpw = self.get_argument("new_bpassword", "")
if 0 < len(new_bankpw) <= options.max_password_length:
user.bank_password = new_bankpw
self.dbsession.add(user)
self.dbsession.commit()
self.render_page(success=["Successfully updated bank password"])
else:
self.render_page(
errors=[
"Invalid password - max length %s."
% str(options.max_password_length)
]
)
def verify_recaptcha(self):
""" Checks recaptcha """
recaptcha_response = self.get_argument("g-recaptcha-response", None)
if recaptcha_response:
recaptcha_req_data = {
"secret": self.config.recaptcha_secret_key,
"remoteip": self.request.remote_ip,
"response": recaptcha_response,
}
try:
recaptcha_req_body = urlencode(recaptcha_req_data).encode("utf-8")
reqquest = urlrequest.Request(RECAPTCHA_URL, recaptcha_req_body)
response = urlrequest.urlopen(reqquest)
self.recaptcha_callback(response)
except tornado.httpclient.HTTPError:
logging.exception("Recaptcha AsyncHTTP request threw an exception")
self.recaptcha_callback(None)
self.render_page(errors=["Error making backend recaptcha request"])
else:
self.render_page(errors=["Invalid captcha, try again"])
def recaptcha_callback(self, response):
"""
Validates recaptcha response
Recaptcha docs: https://developers.google.com/recaptcha/docs/verify
"""
if response:
result = json.loads(response.read())
if result["success"]:
self.set_bankpassword()
return
self.render_page(errors=["Invalid captcha, try again"])
class LogoutHandler(BaseHandler):
""" Log user out of current session """
def get(self, *args, **kwargs):
""" Redirect """
if self.session is not None:
self.redirect("/user")
else:
self.redirect("/login")
def post(self, *args, **kwargs):
""" Clears cookies and session data """
if self.session is not None:
user = self.get_current_user()
EventManager.instance().deauth(user)
self.session.delete()
self.clear_all_cookies()
self.redirect("/")
|
moloch--/RootTheBox
|
handlers/UserHandlers.py
|
Python
|
apache-2.0
| 12,489
|
# Copyright (c) 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import socket
import requests
from starlette.concurrency import run_in_threadpool
from platformio import util
from platformio.compat import IS_WINDOWS
from platformio.proc import where_is_program
class AsyncSession(requests.Session):
async def request( # pylint: disable=signature-differs,invalid-overridden-method
self, *args, **kwargs
):
func = super(AsyncSession, self).request
return await run_in_threadpool(func, *args, **kwargs)
@util.memoized(expire="60s")
def requests_session():
return AsyncSession()
@util.memoized(expire="60s")
def get_core_fullpath():
return where_is_program("platformio" + (".exe" if IS_WINDOWS else ""))
def is_port_used(host, port):
socket.setdefaulttimeout(1)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if IS_WINDOWS:
try:
s.bind((host, port))
s.close()
return False
except (OSError, socket.error):
pass
else:
try:
s.connect((host, port))
s.close()
except socket.error:
return False
return True
|
platformio/platformio-core
|
platformio/commands/home/helpers.py
|
Python
|
apache-2.0
| 1,738
|
__author__ = 'Joe Linn'
from pylastica.param import Param
class Suggest(Param):
"""
@see: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters.html
"""
def __init__(self, suggestion=None):
"""
@param suggestion: optional
@type suggestion: pylastica.suggest.abstract.AbstractSuggestion
"""
super(Suggest, self).__init__()
if suggestion is not None:
self.add_suggestion(suggestion)
def set_global_text(self, text):
"""
Set the global text for this suggester
@param text:
@type text: str
@rtype: self
"""
return self.set_param("text", text)
def add_suggestion(self, suggestion):
"""
Add a suggestion to this suggest clause
@param suggestion:
@type suggestion: pylastica.suggest.abstract.AbstractSuggestion
@return:
@rtype: self
"""
self.set_param(suggestion.name, suggestion.to_dict())
|
jlinn/pylastica
|
pylastica/suggest/suggest.py
|
Python
|
apache-2.0
| 1,028
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.14 on 2018-08-10 07:29
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('project', '0069_auto_20180809_1159'),
]
operations = [
migrations.AddField(
model_name='application',
name='decision_maker',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='application_decisions', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='application',
name='win_date',
field=models.DateField(blank=True, null=True),
),
migrations.AddField(
model_name='application',
name='win_decision_maker',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='application_selections', to=settings.AUTH_USER_MODEL),
),
]
|
unicef/un-partner-portal
|
backend/unpp_api/apps/project/migrations/0070_auto_20180810_0729.py
|
Python
|
apache-2.0
| 1,173
|
import pytest
import aiohttp
from aiohttp import content_disposition_filename, parse_content_disposition
class TestParseContentDisposition:
# http://greenbytes.de/tech/tc2231/
def test_parse_empty(self) -> None:
disptype, params = parse_content_disposition(None)
assert disptype is None
assert {} == params
def test_inlonly(self) -> None:
disptype, params = parse_content_disposition('inline')
assert 'inline' == disptype
assert {} == params
def test_inlonlyquoted(self) -> None:
with pytest.warns(aiohttp.BadContentDispositionHeader):
disptype, params = parse_content_disposition('"inline"')
assert disptype is None
assert {} == params
def test_semicolon(self) -> None:
disptype, params = parse_content_disposition(
'form-data; name="data"; filename="file ; name.mp4"')
assert disptype == 'form-data'
assert params == {'name': 'data', 'filename': 'file ; name.mp4'}
def test_inlwithasciifilename(self) -> None:
disptype, params = parse_content_disposition(
'inline; filename="foo.html"')
assert 'inline' == disptype
assert {'filename': 'foo.html'} == params
def test_inlwithfnattach(self) -> None:
disptype, params = parse_content_disposition(
'inline; filename="Not an attachment!"')
assert 'inline' == disptype
assert {'filename': 'Not an attachment!'} == params
def test_attonly(self) -> None:
disptype, params = parse_content_disposition('attachment')
assert 'attachment' == disptype
assert {} == params
def test_attonlyquoted(self) -> None:
with pytest.warns(aiohttp.BadContentDispositionHeader):
disptype, params = parse_content_disposition('"attachment"')
assert disptype is None
assert {} == params
def test_attonlyucase(self) -> None:
disptype, params = parse_content_disposition('ATTACHMENT')
assert 'attachment' == disptype
assert {} == params
def test_attwithasciifilename(self) -> None:
disptype, params = parse_content_disposition(
'attachment; filename="foo.html"')
assert 'attachment' == disptype
assert {'filename': 'foo.html'} == params
def test_inlwithasciifilenamepdf(self) -> None:
disptype, params = parse_content_disposition(
'attachment; filename="foo.pdf"')
assert 'attachment' == disptype
assert {'filename': 'foo.pdf'} == params
def test_attwithasciifilename25(self) -> None:
disptype, params = parse_content_disposition(
'attachment; filename="0000000000111111111122222"')
assert 'attachment' == disptype
assert {'filename': '0000000000111111111122222'} == params
def test_attwithasciifilename35(self) -> None:
disptype, params = parse_content_disposition(
'attachment; filename="00000000001111111111222222222233333"')
assert 'attachment' == disptype
assert {'filename': '00000000001111111111222222222233333'} == params
def test_attwithasciifnescapedchar(self) -> None:
disptype, params = parse_content_disposition(
r'attachment; filename="f\oo.html"')
assert 'attachment' == disptype
assert {'filename': 'foo.html'} == params
def test_attwithasciifnescapedquote(self) -> None:
disptype, params = parse_content_disposition(
'attachment; filename="\"quoting\" tested.html"')
assert 'attachment' == disptype
assert {'filename': '"quoting" tested.html'} == params
@pytest.mark.skip('need more smart parser which respects quoted text')
def test_attwithquotedsemicolon(self) -> None:
disptype, params = parse_content_disposition(
'attachment; filename="Here\'s a semicolon;.html"')
assert 'attachment' == disptype
assert {'filename': 'Here\'s a semicolon;.html'} == params
def test_attwithfilenameandextparam(self) -> None:
disptype, params = parse_content_disposition(
'attachment; foo="bar"; filename="foo.html"')
assert 'attachment' == disptype
assert {'filename': 'foo.html', 'foo': 'bar'} == params
def test_attwithfilenameandextparamescaped(self) -> None:
disptype, params = parse_content_disposition(
'attachment; foo="\"\\";filename="foo.html"')
assert 'attachment' == disptype
assert {'filename': 'foo.html', 'foo': '"\\'} == params
def test_attwithasciifilenameucase(self) -> None:
disptype, params = parse_content_disposition(
'attachment; FILENAME="foo.html"')
assert 'attachment' == disptype
assert {'filename': 'foo.html'} == params
def test_attwithasciifilenamenq(self) -> None:
disptype, params = parse_content_disposition(
'attachment; filename=foo.html')
assert 'attachment' == disptype
assert {'filename': 'foo.html'} == params
def test_attwithtokfncommanq(self) -> None:
with pytest.warns(aiohttp.BadContentDispositionHeader):
disptype, params = parse_content_disposition(
'attachment; filename=foo,bar.html')
assert disptype is None
assert {} == params
def test_attwithasciifilenamenqs(self) -> None:
with pytest.warns(aiohttp.BadContentDispositionHeader):
disptype, params = parse_content_disposition(
'attachment; filename=foo.html ;')
assert disptype is None
assert {} == params
def test_attemptyparam(self) -> None:
with pytest.warns(aiohttp.BadContentDispositionHeader):
disptype, params = parse_content_disposition(
'attachment; ;filename=foo')
assert disptype is None
assert {} == params
def test_attwithasciifilenamenqws(self) -> None:
with pytest.warns(aiohttp.BadContentDispositionHeader):
disptype, params = parse_content_disposition(
'attachment; filename=foo bar.html')
assert disptype is None
assert {} == params
def test_attwithfntokensq(self) -> None:
disptype, params = parse_content_disposition(
"attachment; filename='foo.html'")
assert 'attachment' == disptype
assert {'filename': "'foo.html'"} == params
def test_attwithisofnplain(self) -> None:
disptype, params = parse_content_disposition(
'attachment; filename="foo-ä.html"')
assert 'attachment' == disptype
assert {'filename': 'foo-ä.html'} == params
def test_attwithutf8fnplain(self) -> None:
disptype, params = parse_content_disposition(
'attachment; filename="foo-ä.html"')
assert 'attachment' == disptype
assert {'filename': 'foo-ä.html'} == params
def test_attwithfnrawpctenca(self) -> None:
disptype, params = parse_content_disposition(
'attachment; filename="foo-%41.html"')
assert 'attachment' == disptype
assert {'filename': 'foo-%41.html'} == params
def test_attwithfnusingpct(self) -> None:
disptype, params = parse_content_disposition(
'attachment; filename="50%.html"')
assert 'attachment' == disptype
assert {'filename': '50%.html'} == params
def test_attwithfnrawpctencaq(self) -> None:
disptype, params = parse_content_disposition(
r'attachment; filename="foo-%\41.html"')
assert 'attachment' == disptype
assert {'filename': r'foo-%41.html'} == params
def test_attwithnamepct(self) -> None:
disptype, params = parse_content_disposition(
'attachment; filename="foo-%41.html"')
assert 'attachment' == disptype
assert {'filename': 'foo-%41.html'} == params
def test_attwithfilenamepctandiso(self) -> None:
disptype, params = parse_content_disposition(
'attachment; filename="ä-%41.html"')
assert 'attachment' == disptype
assert {'filename': 'ä-%41.html'} == params
def test_attwithfnrawpctenclong(self) -> None:
disptype, params = parse_content_disposition(
'attachment; filename="foo-%c3%a4-%e2%82%ac.html"')
assert 'attachment' == disptype
assert {'filename': 'foo-%c3%a4-%e2%82%ac.html'} == params
def test_attwithasciifilenamews1(self) -> None:
disptype, params = parse_content_disposition(
'attachment; filename ="foo.html"')
assert 'attachment' == disptype
assert {'filename': 'foo.html'} == params
def test_attwith2filenames(self) -> None:
with pytest.warns(aiohttp.BadContentDispositionHeader):
disptype, params = parse_content_disposition(
'attachment; filename="foo.html"; filename="bar.html"')
assert disptype is None
assert {} == params
def test_attfnbrokentoken(self) -> None:
with pytest.warns(aiohttp.BadContentDispositionHeader):
disptype, params = parse_content_disposition(
'attachment; filename=foo[1](2).html')
assert disptype is None
assert {} == params
def test_attfnbrokentokeniso(self) -> None:
with pytest.warns(aiohttp.BadContentDispositionHeader):
disptype, params = parse_content_disposition(
'attachment; filename=foo-ä.html')
assert disptype is None
assert {} == params
def test_attfnbrokentokenutf(self) -> None:
with pytest.warns(aiohttp.BadContentDispositionHeader):
disptype, params = parse_content_disposition(
'attachment; filename=foo-ä.html')
assert disptype is None
assert {} == params
def test_attmissingdisposition(self) -> None:
with pytest.warns(aiohttp.BadContentDispositionHeader):
disptype, params = parse_content_disposition(
'filename=foo.html')
assert disptype is None
assert {} == params
def test_attmissingdisposition2(self) -> None:
with pytest.warns(aiohttp.BadContentDispositionHeader):
disptype, params = parse_content_disposition(
'x=y; filename=foo.html')
assert disptype is None
assert {} == params
def test_attmissingdisposition3(self) -> None:
with pytest.warns(aiohttp.BadContentDispositionHeader):
disptype, params = parse_content_disposition(
'"foo; filename=bar;baz"; filename=qux')
assert disptype is None
assert {} == params
def test_attmissingdisposition4(self) -> None:
with pytest.warns(aiohttp.BadContentDispositionHeader):
disptype, params = parse_content_disposition(
'filename=foo.html, filename=bar.html')
assert disptype is None
assert {} == params
def test_emptydisposition(self) -> None:
with pytest.warns(aiohttp.BadContentDispositionHeader):
disptype, params = parse_content_disposition(
'; filename=foo.html')
assert disptype is None
assert {} == params
def test_doublecolon(self) -> None:
with pytest.warns(aiohttp.BadContentDispositionHeader):
disptype, params = parse_content_disposition(
': inline; attachment; filename=foo.html')
assert disptype is None
assert {} == params
def test_attandinline(self) -> None:
with pytest.warns(aiohttp.BadContentDispositionHeader):
disptype, params = parse_content_disposition(
'inline; attachment; filename=foo.html')
assert disptype is None
assert {} == params
def test_attandinline2(self) -> None:
with pytest.warns(aiohttp.BadContentDispositionHeader):
disptype, params = parse_content_disposition(
'attachment; inline; filename=foo.html')
assert disptype is None
assert {} == params
def test_attbrokenquotedfn(self) -> None:
with pytest.warns(aiohttp.BadContentDispositionHeader):
disptype, params = parse_content_disposition(
'attachment; filename="foo.html".txt')
assert disptype is None
assert {} == params
def test_attbrokenquotedfn2(self) -> None:
with pytest.warns(aiohttp.BadContentDispositionHeader):
disptype, params = parse_content_disposition(
'attachment; filename="bar')
assert disptype is None
assert {} == params
def test_attbrokenquotedfn3(self) -> None:
with pytest.warns(aiohttp.BadContentDispositionHeader):
disptype, params = parse_content_disposition(
'attachment; filename=foo"bar;baz"qux')
assert disptype is None
assert {} == params
def test_attmultinstances(self) -> None:
with pytest.warns(aiohttp.BadContentDispositionHeader):
disptype, params = parse_content_disposition(
'attachment; filename=foo.html, attachment; filename=bar.html')
assert disptype is None
assert {} == params
def test_attmissingdelim(self) -> None:
with pytest.warns(aiohttp.BadContentDispositionHeader):
disptype, params = parse_content_disposition(
'attachment; foo=foo filename=bar')
assert disptype is None
assert {} == params
def test_attmissingdelim2(self) -> None:
with pytest.warns(aiohttp.BadContentDispositionHeader):
disptype, params = parse_content_disposition(
'attachment; filename=bar foo=foo')
assert disptype is None
assert {} == params
def test_attmissingdelim3(self) -> None:
with pytest.warns(aiohttp.BadContentDispositionHeader):
disptype, params = parse_content_disposition(
'attachment filename=bar')
assert disptype is None
assert {} == params
def test_attreversed(self) -> None:
with pytest.warns(aiohttp.BadContentDispositionHeader):
disptype, params = parse_content_disposition(
'filename=foo.html; attachment')
assert disptype is None
assert {} == params
def test_attconfusedparam(self) -> None:
disptype, params = parse_content_disposition(
'attachment; xfilename=foo.html')
assert 'attachment' == disptype
assert {'xfilename': 'foo.html'} == params
def test_attabspath(self) -> None:
disptype, params = parse_content_disposition(
'attachment; filename="/foo.html"')
assert 'attachment' == disptype
assert {'filename': 'foo.html'} == params
def test_attabspathwin(self) -> None:
disptype, params = parse_content_disposition(
'attachment; filename="\\foo.html"')
assert 'attachment' == disptype
assert {'filename': 'foo.html'} == params
def test_attcdate(self) -> None:
disptype, params = parse_content_disposition(
'attachment; creation-date="Wed, 12 Feb 1997 16:29:51 -0500"')
assert 'attachment' == disptype
assert {'creation-date': 'Wed, 12 Feb 1997 16:29:51 -0500'} == params
def test_attmdate(self) -> None:
disptype, params = parse_content_disposition(
'attachment; modification-date="Wed, 12 Feb 1997 16:29:51 -0500"')
assert 'attachment' == disptype
assert {'modification-date':
'Wed, 12 Feb 1997 16:29:51 -0500'} == params
def test_dispext(self) -> None:
disptype, params = parse_content_disposition('foobar')
assert 'foobar' == disptype
assert {} == params
def test_dispextbadfn(self) -> None:
disptype, params = parse_content_disposition(
'attachment; example="filename=example.txt"')
assert 'attachment' == disptype
assert {'example': 'filename=example.txt'} == params
def test_attwithisofn2231iso(self) -> None:
disptype, params = parse_content_disposition(
"attachment; filename*=iso-8859-1''foo-%E4.html")
assert 'attachment' == disptype
assert {'filename*': 'foo-ä.html'} == params
def test_attwithfn2231utf8(self) -> None:
disptype, params = parse_content_disposition(
"attachment; filename*=UTF-8''foo-%c3%a4-%e2%82%ac.html")
assert 'attachment' == disptype
assert {'filename*': 'foo-ä-€.html'} == params
def test_attwithfn2231noc(self) -> None:
disptype, params = parse_content_disposition(
"attachment; filename*=''foo-%c3%a4-%e2%82%ac.html")
assert 'attachment' == disptype
assert {'filename*': 'foo-ä-€.html'} == params
def test_attwithfn2231utf8comp(self) -> None:
disptype, params = parse_content_disposition(
"attachment; filename*=UTF-8''foo-a%cc%88.html")
assert 'attachment' == disptype
assert {'filename*': 'foo-ä.html'} == params
@pytest.mark.skip('should raise decoding error: %82 is invalid for latin1')
def test_attwithfn2231utf8_bad(self) -> None:
with pytest.warns(aiohttp.BadContentDispositionParam):
disptype, params = parse_content_disposition(
"attachment; filename*=iso-8859-1''foo-%c3%a4-%e2%82%ac.html")
assert 'attachment' == disptype
assert {} == params
@pytest.mark.skip('should raise decoding error: %E4 is invalid for utf-8')
def test_attwithfn2231iso_bad(self) -> None:
with pytest.warns(aiohttp.BadContentDispositionParam):
disptype, params = parse_content_disposition(
"attachment; filename*=utf-8''foo-%E4.html")
assert 'attachment' == disptype
assert {} == params
def test_attwithfn2231ws1(self) -> None:
with pytest.warns(aiohttp.BadContentDispositionParam):
disptype, params = parse_content_disposition(
"attachment; filename *=UTF-8''foo-%c3%a4.html")
assert 'attachment' == disptype
assert {} == params
def test_attwithfn2231ws2(self) -> None:
disptype, params = parse_content_disposition(
"attachment; filename*= UTF-8''foo-%c3%a4.html")
assert 'attachment' == disptype
assert {'filename*': 'foo-ä.html'} == params
def test_attwithfn2231ws3(self) -> None:
disptype, params = parse_content_disposition(
"attachment; filename* =UTF-8''foo-%c3%a4.html")
assert 'attachment' == disptype
assert {'filename*': 'foo-ä.html'} == params
def test_attwithfn2231quot(self) -> None:
with pytest.warns(aiohttp.BadContentDispositionParam):
disptype, params = parse_content_disposition(
"attachment; filename*=\"UTF-8''foo-%c3%a4.html\"")
assert 'attachment' == disptype
assert {} == params
def test_attwithfn2231quot2(self) -> None:
with pytest.warns(aiohttp.BadContentDispositionParam):
disptype, params = parse_content_disposition(
"attachment; filename*=\"foo%20bar.html\"")
assert 'attachment' == disptype
assert {} == params
def test_attwithfn2231singleqmissing(self) -> None:
with pytest.warns(aiohttp.BadContentDispositionParam):
disptype, params = parse_content_disposition(
"attachment; filename*=UTF-8'foo-%c3%a4.html")
assert 'attachment' == disptype
assert {} == params
@pytest.mark.skip('urllib.parse.unquote is tolerate to standalone % chars')
def test_attwithfn2231nbadpct1(self) -> None:
with pytest.warns(aiohttp.BadContentDispositionParam):
disptype, params = parse_content_disposition(
"attachment; filename*=UTF-8''foo%")
assert 'attachment' == disptype
assert {} == params
@pytest.mark.skip('urllib.parse.unquote is tolerate to standalone % chars')
def test_attwithfn2231nbadpct2(self) -> None:
with pytest.warns(aiohttp.BadContentDispositionParam):
disptype, params = parse_content_disposition(
"attachment; filename*=UTF-8''f%oo.html")
assert 'attachment' == disptype
assert {} == params
def test_attwithfn2231dpct(self) -> None:
disptype, params = parse_content_disposition(
"attachment; filename*=UTF-8''A-%2541.html")
assert 'attachment' == disptype
assert {'filename*': 'A-%41.html'} == params
def test_attwithfn2231abspathdisguised(self) -> None:
disptype, params = parse_content_disposition(
"attachment; filename*=UTF-8''%5cfoo.html")
assert 'attachment' == disptype
assert {'filename*': '\\foo.html'} == params
def test_attfncont(self) -> None:
disptype, params = parse_content_disposition(
'attachment; filename*0="foo."; filename*1="html"')
assert 'attachment' == disptype
assert {'filename*0': 'foo.',
'filename*1': 'html'} == params
def test_attfncontqs(self) -> None:
disptype, params = parse_content_disposition(
r'attachment; filename*0="foo"; filename*1="\b\a\r.html"')
assert 'attachment' == disptype
assert {'filename*0': 'foo',
'filename*1': 'bar.html'} == params
def test_attfncontenc(self) -> None:
disptype, params = parse_content_disposition(
'attachment; filename*0*=UTF-8''foo-%c3%a4; filename*1=".html"')
assert 'attachment' == disptype
assert {'filename*0*': 'UTF-8''foo-%c3%a4',
'filename*1': '.html'} == params
def test_attfncontlz(self) -> None:
disptype, params = parse_content_disposition(
'attachment; filename*0="foo"; filename*01="bar"')
assert 'attachment' == disptype
assert {'filename*0': 'foo',
'filename*01': 'bar'} == params
def test_attfncontnc(self) -> None:
disptype, params = parse_content_disposition(
'attachment; filename*0="foo"; filename*2="bar"')
assert 'attachment' == disptype
assert {'filename*0': 'foo',
'filename*2': 'bar'} == params
def test_attfnconts1(self) -> None:
disptype, params = parse_content_disposition(
'attachment; filename*0="foo."; filename*2="html"')
assert 'attachment' == disptype
assert {'filename*0': 'foo.',
'filename*2': 'html'} == params
def test_attfncontord(self) -> None:
disptype, params = parse_content_disposition(
'attachment; filename*1="bar"; filename*0="foo"')
assert 'attachment' == disptype
assert {'filename*0': 'foo',
'filename*1': 'bar'} == params
def test_attfnboth(self) -> None:
disptype, params = parse_content_disposition(
'attachment; filename="foo-ae.html";'
" filename*=UTF-8''foo-%c3%a4.html")
assert 'attachment' == disptype
assert {'filename': 'foo-ae.html',
'filename*': 'foo-ä.html'} == params
def test_attfnboth2(self) -> None:
disptype, params = parse_content_disposition(
"attachment; filename*=UTF-8''foo-%c3%a4.html;"
' filename="foo-ae.html"')
assert 'attachment' == disptype
assert {'filename': 'foo-ae.html',
'filename*': 'foo-ä.html'} == params
def test_attfnboth3(self) -> None:
disptype, params = parse_content_disposition(
"attachment; filename*0*=ISO-8859-15''euro-sign%3d%a4;"
" filename*=ISO-8859-1''currency-sign%3d%a4")
assert 'attachment' == disptype
assert {'filename*': 'currency-sign=¤',
'filename*0*': "ISO-8859-15''euro-sign%3d%a4"} == params
def test_attnewandfn(self) -> None:
disptype, params = parse_content_disposition(
'attachment; foobar=x; filename="foo.html"')
assert 'attachment' == disptype
assert {'foobar': 'x',
'filename': 'foo.html'} == params
def test_attrfc2047token(self) -> None:
with pytest.warns(aiohttp.BadContentDispositionHeader):
disptype, params = parse_content_disposition(
'attachment; filename==?ISO-8859-1?Q?foo-=E4.html?=')
assert disptype is None
assert {} == params
def test_attrfc2047quoted(self) -> None:
disptype, params = parse_content_disposition(
'attachment; filename="=?ISO-8859-1?Q?foo-=E4.html?="')
assert 'attachment' == disptype
assert {'filename': '=?ISO-8859-1?Q?foo-=E4.html?='} == params
def test_bad_continuous_param(self) -> None:
with pytest.warns(aiohttp.BadContentDispositionParam):
disptype, params = parse_content_disposition(
'attachment; filename*0=foo bar')
assert 'attachment' == disptype
assert {} == params
class TestContentDispositionFilename:
# http://greenbytes.de/tech/tc2231/
def test_no_filename(self) -> None:
assert content_disposition_filename({}) is None
assert content_disposition_filename({'foo': 'bar'}) is None
def test_filename(self) -> None:
params = {'filename': 'foo.html'}
assert 'foo.html' == content_disposition_filename(params)
def test_filename_ext(self) -> None:
params = {'filename*': 'файл.html'}
assert 'файл.html' == content_disposition_filename(params)
def test_attfncont(self) -> None:
params = {'filename*0': 'foo.', 'filename*1': 'html'}
assert 'foo.html' == content_disposition_filename(params)
def test_attfncontqs(self) -> None:
params = {'filename*0': 'foo', 'filename*1': 'bar.html'}
assert 'foobar.html' == content_disposition_filename(params)
def test_attfncontenc(self) -> None:
params = {'filename*0*': "UTF-8''foo-%c3%a4",
'filename*1': '.html'}
assert 'foo-ä.html' == content_disposition_filename(params)
def test_attfncontlz(self) -> None:
params = {'filename*0': 'foo',
'filename*01': 'bar'}
assert 'foo' == content_disposition_filename(params)
def test_attfncontnc(self) -> None:
params = {'filename*0': 'foo',
'filename*2': 'bar'}
assert 'foo' == content_disposition_filename(params)
def test_attfnconts1(self) -> None:
params = {'filename*1': 'foo',
'filename*2': 'bar'}
assert content_disposition_filename(params) is None
def test_attfnboth(self) -> None:
params = {'filename': 'foo-ae.html',
'filename*': 'foo-ä.html'}
assert 'foo-ä.html' == content_disposition_filename(params)
def test_attfnboth3(self) -> None:
params = {'filename*0*': "ISO-8859-15''euro-sign%3d%a4",
'filename*': 'currency-sign=¤'}
assert 'currency-sign=¤' == content_disposition_filename(params)
def test_attrfc2047quoted(self) -> None:
params = {'filename': '=?ISO-8859-1?Q?foo-=E4.html?='}
assert '=?ISO-8859-1?Q?foo-=E4.html?=' == content_disposition_filename(
params)
|
arthurdarcet/aiohttp
|
tests/test_multipart_helpers.py
|
Python
|
apache-2.0
| 27,327
|