code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from __future__ import unicode_literals
from collections import OrderedDict as odict
from copy import deepcopy
from functools import partial
import sys
import bindings as bi
from custom import get_customizations_for, reformat_block
PY3 = sys.version_info[0] == 3
str_type = str if PY3 else (str, unicode)
get_customizations_for = partial(get_customizations_for, 'R')
def get_customizations_or_defaults_for(algo, prop, default=None):
return get_customizations_for(algo, prop, get_customizations_for('defaults', prop, default))
# ----------------------------------------------------------------------------------------------------------------------
# Generate per-model classes
# ----------------------------------------------------------------------------------------------------------------------
def gen_module(schema, algo, module):
# print(str(schema))
rest_api_version = get_customizations_for(algo, 'rest_api_version', 3)
doc_preamble = get_customizations_for(algo, 'doc.preamble')
doc_returns = get_customizations_for(algo, 'doc.returns')
doc_seealso = get_customizations_for(algo, 'doc.seealso')
doc_references = get_customizations_for(algo, 'doc.references')
doc_examples = get_customizations_for(algo, 'doc.examples')
required_params = get_customizations_or_defaults_for(algo, 'extensions.required_params', [])
extra_params = get_customizations_or_defaults_for(algo, 'extensions.extra_params', [])
ellipsis_param = get_customizations_for(algo, 'extensions.ellipsis_param')
model_name = algo_to_modelname(algo)
update_param_defaults = get_customizations_for('defaults', 'update_param')
update_param = get_customizations_for(algo, 'update_param')
yield "# This file is auto-generated by h2o-3/h2o-bindings/bin/gen_R.py"
yield "# Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details) \n#'"
yield "# -------------------------- %s -------------------------- #" % model_name
# start documentation
if doc_preamble:
yield "#'"
yield reformat_block(doc_preamble, prefix="#' ")
yield "#'"
# start doc for signature
required_params = odict([(p[0] if isinstance(p, tuple) else p, p[1] if isinstance(p, tuple) else None)
for p in required_params])
schema_params = odict([(p['name'], p)
for p in schema['parameters']])
extra_params = odict([(p[0] if isinstance(p, tuple) else p, p[1] if isinstance(p, tuple) else None)
for p in extra_params])
all_params = list(required_params.keys()) + list(schema_params.keys()) + list(extra_params.keys())
def get_schema_params(pname):
param = deepcopy(schema_params[pname])
updates = None
for update_fn in [update_param, update_param_defaults]:
if callable(update_fn):
updates = update_fn(pname, param)
if updates is not None:
param = updates
break
return param if isinstance(param, (list, tuple)) else [param] # always return array to support deprecated aliases
tag = "@param"
pdocs = odict()
for pname in all_params:
if pname in pdocs: # avoid duplicates (esp. if already included in required_params)
continue
if pname in schema_params:
for param in get_schema_params(pname): # retrieve potential aliases
pname = param.get('name')
if pname:
pdocs[pname] = get_customizations_or_defaults_for(algo, 'doc.params.'+pname, get_help(param, indent=len(tag)+4))
else:
pdocs[pname] = get_customizations_or_defaults_for(algo, 'doc.params.'+pname)
if ellipsis_param is not None:
pdocs['...'] = get_customizations_or_defaults_for(algo, 'doc.params._ellipsis_')
for pname, pdoc in pdocs.items():
if pdoc:
yield reformat_block("%s %s %s" % (tag, pname, pdoc.lstrip('\n')), indent=len(tag)+1, indent_first=False, prefix="#' ")
if doc_returns:
tag = "@return"
yield reformat_block("%s %s" % (tag, doc_returns.lstrip('\n')), indent=len(tag)+1, indent_first=False, prefix="#' ")
if doc_seealso:
tag = "@seealso"
yield reformat_block("%s %s" % (tag, doc_seealso.lstrip('\n')), indent=len(tag)+1, indent_first=False, prefix="#' ")
if doc_references:
tag = "@references"
yield reformat_block("%s %s" % (tag, doc_references.lstrip('\n')), indent=len(tag)+1, indent_first=False, prefix="#' ")
if doc_examples:
yield "#' @examples"
yield "#' \dontrun{"
yield reformat_block(doc_examples, prefix="#' ")
yield "#' }"
yield "#' @export"
# start function signature
sig_pnames = []
sig_params = []
for k, v in required_params.items():
sig_pnames.append(k)
sig_params.append(k if v is None else '%s = %s' % (k, v))
for pname in schema_params:
params = get_schema_params(pname)
for param in params:
pname = param.get('name') # override local var as param can be an alias of pname
if pname in required_params or not pname: # skip schema params already added by required_params, and those explicitly removed
continue
sig_pnames.append(pname)
sig_params.append("%s = %s" % (pname, get_customizations_or_defaults_for(algo, 'doc.signatures.' + pname, get_sig_default_value(param))))
for k, v in extra_params.items():
sig_pnames.append(k)
sig_params.append("%s = %s" % (k, v))
if ellipsis_param is not None:
sig_params.append("...")
param_indent = len("h2o.%s <- function(" % module)
yield reformat_block("h2o.%s <- function(%s)" % (module, ',\n'.join(sig_params)), indent=param_indent, indent_first=False)
# start function body
yield "{"
yield '\n'.join(gen_set_params(algo, sig_pnames, schema_params, required_params, ellipsis_param=ellipsis_param))
yield ""
yield " # Error check and build model"
verbose = 'verbose' if 'verbose' in extra_params else 'FALSE'
yield " model <- .h2o.modelJob('%s', parms, h2oRestApiVersion=%d, verbose=%s)" % (algo, rest_api_version, verbose)
with_model = get_customizations_for(algo, 'extensions.with_model')
if with_model:
yield ""
yield reformat_block(with_model, indent=2)
yield " return(model)"
yield "}"
bulk_pnames_skip = ["model_id",
"verbose",
"destination_key"] # destination_key is only for SVD
bulk_params = list(zip(*filter(lambda t: not t[0] in bulk_pnames_skip, zip(sig_pnames, sig_params))))
bulk_pnames = list(bulk_params[0])
sig_bulk_params = list(bulk_params[1])
sig_bulk_params.append("segment_columns = NULL")
sig_bulk_params.append("segment_models_id = NULL")
sig_bulk_params.append("parallelism = 1")
if ellipsis_param is not None:
sig_bulk_params.append("...")
if algo != "generic":
#
# Segment model building
#
bulk_param_indent = len(".h2o.train_segments_%s <- function(" % module)
yield reformat_block(".h2o.train_segments_%s <- function(%s)" % (module, ',\n'.join(sig_bulk_params)), indent=bulk_param_indent, indent_first=False)
# start train_segments-function body
yield "{"
yield '\n'.join(gen_set_params(algo, bulk_pnames, schema_params, required_params, skip_params=bulk_pnames_skip, ellipsis_param=ellipsis_param))
yield ""
yield " # Build segment-models specific parameters"
yield " segment_parms <- list()"
yield " if (!missing(segment_columns))"
yield " segment_parms$segment_columns <- segment_columns"
yield " if (!missing(segment_models_id))"
yield " segment_parms$segment_models_id <- segment_models_id"
yield " segment_parms$parallelism <- parallelism"
yield ""
yield " # Error check and build segment models"
yield " segment_models <- .h2o.segmentModelsJob('%s', segment_parms, parms, h2oRestApiVersion=%d)" % (algo, rest_api_version)
yield " return(segment_models)"
yield "}"
#
# Additional functions
#
module_extensions = get_customizations_for(algo, 'extensions.module')
if module_extensions:
yield ""
yield module_extensions
def gen_set_params(algo, pnames, schema_params, required_params, skip_params=None, ellipsis_param=None):
if ellipsis_param:
yield reformat_block(ellipsis_param, indent=2)
if skip_params:
yield " # formally define variables that were excluded from function parameters"
for pname in skip_params:
yield " %s <- NULL" % pname
validate_frames = get_customizations_or_defaults_for(algo, 'extensions.validate_frames')
if validate_frames:
yield " # Validate required training_frame first and other frame args: should be a valid key or an H2OFrame object"
yield reformat_block(validate_frames, indent=2)
else:
frames = get_customizations_or_defaults_for(algo, 'extensions.frame_params', [])
if frames:
yield " # Validate required training_frame first and other frame args: should be a valid key or an H2OFrame object"
for frame in frames:
if frame in pnames:
required_val = str(frame in required_params).upper()
yield " {frame} <- .validate.H2OFrame({frame}, required={required})".format(frame=frame, required=required_val)
validate_required_params = get_customizations_or_defaults_for(algo, 'extensions.validate_required_params')
if validate_required_params:
yield ""
yield " # Validate other required args"
yield reformat_block(validate_required_params, indent=2)
validate_params = get_customizations_or_defaults_for(algo, 'extensions.validate_params')
if validate_params:
yield ""
yield " # Validate other args"
yield reformat_block(validate_params, indent=2)
yield ""
yield " # Build parameter list to send to model builder"
yield " parms <- list()"
set_required_params = get_customizations_or_defaults_for(algo, 'extensions.set_required_params')
if set_required_params:
yield reformat_block(set_required_params, indent=2)
skip_default_set_params = get_customizations_or_defaults_for(algo, 'extensions.skip_default_set_params_for', [])
yield ""
for pname in schema_params:
if pname in skip_default_set_params or (skip_params and pname in skip_params):
continue
# leave the special handling of 'loss' param here for now as it is used by several algos
if pname == "loss":
yield " if(!missing(loss)) {"
yield " if(loss == \"MeanSquare\") {"
yield " warning(\"Loss name 'MeanSquare' is deprecated; please use 'Quadratic' instead.\")"
yield " parms$loss <- \"Quadratic\""
yield " } else "
yield " parms$loss <- loss"
yield " }"
else:
yield " if (!missing(%s))" % pname
yield " parms$%s <- %s" % (pname, pname)
set_params = get_customizations_or_defaults_for(algo, 'extensions.set_params')
if set_params:
yield ""
yield reformat_block(set_params, indent=2)
def algo_to_modelname(algo):
if algo == "aggregator": return "H2O Aggregator Model"
if algo == "deeplearning": return "Deep Learning - Neural Network"
if algo == "xgboost": return "XGBoost"
if algo == "drf": return "Random Forest Model in H2O"
if algo == "upliftdrf": return "Uplift Random Forest Model in H2O"
if algo == "gbm": return "Gradient Boosting Machine"
if algo == "glm": return "H2O Generalized Linear Models"
if algo == "glrm": return "Generalized Low Rank Model"
if algo == "kmeans": return "KMeans Model in H2O"
if algo == "naivebayes": return "Naive Bayes Model in H2O"
if algo == "pca": return "Principal Components Analysis"
if algo == "svd": return "Singular Value Decomposition"
if algo == "stackedensemble": return "H2O Stacked Ensemble"
if algo == "psvm": return "Support Vector Machine"
if algo == "anovaglm": return "ANOVA GLM"
if algo == "targetencoder": return "Target Encoder"
if algo == "gam": return "Generalized Additive Model"
if algo == "modelselection": return "Model Selection"
if algo == "infogram": return "Infogram"
return algo
def get_help(param, indent=0):
pname = param.get('name')
ptype = param.get('type')
pvalues = param.get('values')
pdefault = param.get('default_value')
phelp = param.get('help')
if not phelp:
return
if ptype == 'boolean':
phelp = "\code{Logical}. " + phelp
if pvalues:
phelp += " Must be one of: %s." % ", ".join('"%s"' % v for v in pvalues)
if pdefault is not None:
phelp += " Defaults to %s." % get_doc_default_value(param)
return bi.wrap(phelp, width=120-indent)
def get_doc_default_value(param):
ptype = param['type']
ptype = 'str' if ptype.startswith('enum') else ptype # for doc, default value is actually a str for enum types.
return as_R_repr(ptype, param.get('default_value'))
def get_sig_default_value(param):
ptype = param['type']
value = (param.get('values') if ptype.startswith('enum') # for signature, default value is whole enum (to provide parameter hint).
else param.get('default_value'))
return as_R_repr(ptype, value)
def as_R_repr(ptype, value):
if value is None:
return (0 if ptype in ['short', 'int', 'long', 'double']
else "list()" if ptype == 'list'
else 'NULL')
if ptype == 'boolean':
return str(value).upper()
if ptype == 'double':
return '%.10g' % value
if ptype == 'list':
return "list(%s)" % ', '.join('"%s"' % v for v in value)
if ptype.startswith('enum'):
return "c(%s)" % ', '.join('"%s"' % v for v in value)
if ptype.endswith('[]'):
return "c(%s)" % ', '.join('%s' % v for v in value)
return value
# ----------------------------------------------------------------------------------------------------------------------
# MAIN:
# ----------------------------------------------------------------------------------------------------------------------
def main():
bi.init("R", "../../../h2o-r/h2o-package/R", clear_dir=False)
for name, mb in bi.model_builders().items():
module = name
file_name = name
if name == "drf":
module = "randomForest"
file_name = "randomforest"
if name == "upliftdrf":
module = "upliftRandomForest"
file_name = "upliftrandomforest"
if name == "isolationforest": module = "isolationForest"
if name == "extendedisolationforest": module = "extendedIsolationForest"
if name == "naivebayes": module = "naiveBayes"
if name == "stackedensemble": module = "stackedEnsemble"
if name == "pca": module = "prcomp"
if name == "modelselection": module = "modelSelection"
bi.vprint("Generating model: " + name)
bi.write_to_file("%s.R" % file_name, gen_module(mb, name, module))
if __name__ == "__main__":
main()
|
h2oai/h2o-3
|
h2o-bindings/bin/gen_R.py
|
Python
|
apache-2.0
| 15,516
|
# ===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
import os
# ============= standard library imports ========================
# ============= local library imports ==========================
from pychron.canvas.canvas2D.scene.scene_canvas import SceneCanvas
from pychron.canvas.canvas2D.scene.uv_mask_scene import UVMaskScene
from pychron.paths import paths
class UVMaskCanvas(SceneCanvas):
def load_scene(self):
p = os.path.join(paths.canvas2D_dir, 'uv_mask_canvas.txt')
self.scene = UVMaskScene()
self.scene.load(p)
# ============= EOF =============================================
|
UManPychron/pychron
|
pychron/canvas/canvas2D/uv_mask_canvas.py
|
Python
|
apache-2.0
| 1,409
|
__all__ = ["AnalyzeScope"]
|
MaxMorgenstern/EmeraldAI
|
EmeraldAI/Pipelines/ScopeAnalyzer/__init__.py
|
Python
|
apache-2.0
| 27
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, unicode_literals
import functools
import os
import uuid
import github3
import six
from artman.tasks import task_base
from artman.utils.logger import logger
class CreateGitHubBranch(task_base.TaskBase):
"""Create a new branch on GitHub with the appropriate GAPIC.
This task requires WRITE access to the applicable repository.
"""
default_provides = 'branch_name'
def execute(self, git_repo, api_name, api_version, language, github,
output_dir, gapic_code_dir, grpc_code_dir=None):
"""Clone a repository from GitHub into a temporary location.
Args:
git_repo (dict): Information about the git repository.
api_name (str): The name of the API.
api_version (str): The version of the API.
language (str): The name of the language.
github (dict): github config including username and token.
output_dir (str): The name of the original output directory.
This directory is removed entirely.
gapic_code_dir (str): The location of the GAPIC code.
grpc_code_dir (str): The location of the GRPC code, if any.
Returns:
str: The name of the branch.
"""
# Determine where the repository should go.
tmp = os.environ.get('ARTMAN_TEMP_DIR', '%stmp' % os.path.sep)
repo_temp_dir = os.path.join(tmp, str(uuid.uuid4())[0:8])
# Ensure we know where we are, so we can make this task not
# ultimately mutate the working directory.
original_directory = os.curdir
# Track our code directories, and use absolute paths, since we will
# be moving around.
code_dirs = {'gapic': os.path.abspath(gapic_code_dir)}
if grpc_code_dir:
code_dirs['grpc'] = os.path.abspath(grpc_code_dir)
# Check out the code from GitHub.
repo = git_repo['location']
logger.info('Checking out fresh clone of %s.' % repo)
try:
if repo.startswith('git@github.com:'):
repo = 'https://%s:%s@github.com/%s' % (
github['username'], github['token'], repo[15:])
self.exec_command(['git', 'clone', repo, repo_temp_dir])
# Create a new branch for this API.
branch_name = '{api_name}-{language}-{api_version}-{salt}'.format(
api_name=api_name.lower(),
api_version=api_version.lower(),
language=language.lower(),
salt=str(uuid.uuid4())[0:8],
)
os.chdir(repo_temp_dir)
# If there is a base branch, switch to it.
#
# This command naively assumes that the default branch is named
# "master", which is not a guarantee, but this is good enough
# for now.
if git_repo.get('branch', 'master') != 'master':
baseline = git_repo['branch']
logger.info('Checking out the {0} branch to use as a '
'baseline.'.format(baseline))
self.exec_command(['git', 'checkout', '--track', '-b',
baseline, 'origin/%s' % baseline])
# Create the new branch off of the base branch.
logger.info('Creating the {0} branch.'.format(branch_name))
self.exec_command(['git', 'checkout', '-b', branch_name])
# Copy the previously-generated GAPIC into the temporary
# repository.
for path in git_repo.get('paths', ['.']):
# Piece together where we are copying code from and to.
if isinstance(path, (six.text_type, six.binary_type)):
path = {'dest': path}
src = path.get('src', '.')
dest = path.get('dest', '.')
artifact = path.get('artifact', 'gapic')
# We need a full absolute path for the source, based on
# the code's original output location.
src = os.path.abspath(os.path.join(code_dirs[artifact], src))
# Actually copy the code.
self.exec_command(['git', 'rm', '-r', '--force',
'--ignore-unmatch', dest])
self.exec_command(['cp', '-rf', src, dest])
self.exec_command(['git', 'add', dest])
# Commit the GAPIC.
self.exec_command(['git', 'commit', '--allow-empty', '-m',
'{language} GAPIC: {api_name} {api_version}'.format(
api_name=api_name.capitalize(),
api_version=api_version,
language=language.capitalize(),
),
])
# Push the branch to GitHub.
self.exec_command(['git', 'push', 'origin', branch_name])
logger.info('Code pushed to GitHub as `%s` branch.' % branch_name)
# Remove the original output directory.
self.exec_command(['rm', '-rf', output_dir])
# Return the branch name. This is needed in order to create a
# pull request from that branch.
return branch_name
finally:
# Ensure we clean up after ourselves by removing the temporary
# repository directory.
self.exec_command(['rm', '-rf', repo_temp_dir])
# Change the working directory back to where we started.
os.chdir(original_directory)
class CreateGitHubPullRequest(task_base.TaskBase):
"""Create a pull request on GitHub to merge the branch.
This task requires WRITE access to the repository.
"""
default_provides = 'pull_request'
def execute(self, git_repo, github, branch_name, api_name, api_version,
language):
"""Create a pull request on GitHub.
Args:
api_version (str): The version of the API. Used in the title of
the pull request.
language (str): The name of the language. Used in the title
of the pull request.
"""
# Determine the pull request title.
pr_title = '{language} GAPIC: {api_name} {api_version}'.format(
api_name=api_name.capitalize(),
api_version=api_version,
language=language.capitalize(),
)
# Determine the repo owner and name from the location, which is how
# this API expects to receive this data.
repo_loc = git_repo['location'].rstrip('/')
repo_owner, repo_name = repo_loc.split(':')[-1].split('/')[-2:]
if repo_name.endswith('.git'):
repo_name = repo_name[:-4]
# Instantiate the repo object.
gh = github3.login(github['username'], github['token'])
repo = gh.repository(repo_owner, repo_name)
# Create the pull request.
pr = repo.create_pull(
base=git_repo.get('branch', 'master'),
body='This pull request was generated by artman. '
'Please review it thoroughly before merging.',
head=branch_name,
title=pr_title,
)
# If we did not successfully create a pull request, this is an
# error.
if not pr:
logger.error('Failed to create a pull request. You will need to '
'create a PR manually.')
raise RuntimeError('Pull request creation failed.')
# Log that the PR was created.
logger.success('Pull request created: {url}'.format(
url=pr.html_url,
))
# Return back the pull request object.
return pr
TASKS = (
CreateGitHubBranch,
CreateGitHubPullRequest,
)
|
shinfan/artman
|
artman/tasks/publish/github.py
|
Python
|
apache-2.0
| 8,365
|
# Copyright 2018 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for t2t_benchmark."""
import os
import unittest
import mock
from perfkitbenchmarker import test_util
from perfkitbenchmarker.linux_benchmarks import t2t_benchmark
from perfkitbenchmarker.sample import Sample
class Tensor2TensorBenchmarkTestCase(unittest.TestCase,
test_util.SamplesTestMixin):
@mock.patch('time.time', mock.MagicMock(return_value=0))
def testT2TTpuOutput(self):
self.maxDiff = None
path = os.path.join(
os.path.dirname(__file__), '..', 'data', 't2t_tpu_output.txt')
with open(path) as fp:
t2t_contents = fp.read()
samples = t2t_benchmark._MakeSamplesFromOutput({
'use_tpu': True
}, t2t_contents)
golden = [
Sample(
metric='Global Steps Per Second',
value=1.85777,
unit='global_steps/sec',
metadata={
'use_tpu': True,
'index': 0
},
timestamp=0),
Sample(
metric='Global Steps Per Second',
value=5.06989,
unit='global_steps/sec',
metadata={
'use_tpu': True,
'index': 1
},
timestamp=0),
Sample(
metric='Examples Per Second',
value=118.897,
unit='examples/sec',
metadata={
'use_tpu': True,
'index': 0
},
timestamp=0),
Sample(
metric='Examples Per Second',
value=324.473,
unit='examples/sec',
metadata={
'use_tpu': True,
'index': 1
},
timestamp=0),
Sample(
metric='Eval Loss',
value=3.9047337,
unit='',
metadata={
'use_tpu': True,
'step': 1000
},
timestamp=0),
Sample(
metric='Accuracy',
value=32.064167,
unit='%',
metadata={
'use_tpu': True,
'step': 1000
},
timestamp=0),
Sample(
metric='Accuracy Per Sequence',
value=0.0,
unit='%',
metadata={
'use_tpu': True,
'step': 1000
},
timestamp=0),
Sample(
metric='Negative Log Perplexity',
value=-4.501835,
unit='perplexity',
metadata={
'use_tpu': True,
'step': 1000
},
timestamp=0),
Sample(
metric='Top 5 Accuracy',
value=50.96436,
unit='%',
metadata={
'use_tpu': True,
'step': 1000
},
timestamp=0),
Sample(
metric='Eval Loss',
value=3.7047337,
unit='',
metadata={
'use_tpu': True,
'step': 1200
},
timestamp=0),
Sample(
metric='Accuracy',
value=33.064167,
unit='%',
metadata={
'use_tpu': True,
'step': 1200
},
timestamp=0),
Sample(
metric='Accuracy Per Sequence',
value=0.0,
unit='%',
metadata={
'use_tpu': True,
'step': 1200
},
timestamp=0),
Sample(
metric='Negative Log Perplexity',
value=-4.101835,
unit='perplexity',
metadata={
'use_tpu': True,
'step': 1200
},
timestamp=0),
Sample(
metric='Top 5 Accuracy',
value=55.96436,
unit='%',
metadata={
'use_tpu': True,
'step': 1200
},
timestamp=0)
]
self.assertEqual(samples, golden)
@mock.patch('time.time', mock.MagicMock(return_value=0))
def testT2TGpuOutput(self):
self.maxDiff = None
path = os.path.join(
os.path.dirname(__file__), '..', 'data', 't2t_gpu_output.txt')
with open(path) as fp:
t2t_contents = fp.read()
samples = t2t_benchmark._MakeSamplesFromOutput({
'use_tpu': False
}, t2t_contents)
golden = [
Sample(
metric='Global Steps Per Second',
value=3.04983,
unit='global_steps/sec',
metadata={
'index': 0,
'use_tpu': False
},
timestamp=0),
Sample(
metric='Global Steps Per Second',
value=4.12771,
unit='global_steps/sec',
metadata={
'index': 1,
'use_tpu': False
},
timestamp=0),
Sample(
metric='Global Steps Per Second',
value=4.11027,
unit='global_steps/sec',
metadata={
'index': 2,
'use_tpu': False
},
timestamp=0),
Sample(
metric='Global Steps Per Second',
value=4.10924,
unit='global_steps/sec',
metadata={
'index': 3,
'use_tpu': False
},
timestamp=0),
Sample(
metric='Global Steps Per Second',
value=4.12186,
unit='global_steps/sec',
metadata={
'index': 4,
'use_tpu': False
},
timestamp=0),
Sample(
metric='Global Steps Per Second',
value=4.08434,
unit='global_steps/sec',
metadata={
'index': 5,
'use_tpu': False
},
timestamp=0),
Sample(
metric='Global Steps Per Second',
value=4.10174,
unit='global_steps/sec',
metadata={
'index': 6,
'use_tpu': False
},
timestamp=0),
Sample(
metric='Global Steps Per Second',
value=4.11809,
unit='global_steps/sec',
metadata={
'index': 7,
'use_tpu': False
},
timestamp=0),
Sample(
metric='Global Steps Per Second',
value=4.10496,
unit='global_steps/sec',
metadata={
'index': 8,
'use_tpu': False
},
timestamp=0),
Sample(
metric='Eval Loss',
value=7.2263174,
unit='',
metadata={
'use_tpu': False,
'step': 1000
},
timestamp=0),
Sample(
metric='Accuracy',
value=13.972055999999998,
unit='%',
metadata={
'use_tpu': False,
'step': 1000
},
timestamp=0),
Sample(
metric='Accuracy Per Sequence',
value=0.0,
unit='%',
metadata={
'use_tpu': False,
'step': 1000
},
timestamp=0),
Sample(
metric='Negative Log Perplexity',
value=-7.2263174,
unit='perplexity',
metadata={
'use_tpu': False,
'step': 1000
},
timestamp=0),
Sample(
metric='Top 5 Accuracy',
value=24.800399000000002,
unit='%',
metadata={
'use_tpu': False,
'step': 1000
},
timestamp=0)
]
self.assertEqual(samples, golden)
if __name__ == '__main__':
unittest.main()
|
GoogleCloudPlatform/PerfKitBenchmarker
|
tests/linux_benchmarks/t2t_benchmark_test.py
|
Python
|
apache-2.0
| 8,587
|
# Copyright 2016 by Raytheon BBN Technologies Corp. All Rights Reserved.
from qgl2.qgl2 import qgl2decl, qreg, QRegister
from qgl2.qgl1 import Id, X, MEAS, X90, flat_top_gaussian_edge, echoCR, Y90m
from qgl2.basic_sequences.helpers import create_cal_seqs, measConcurrently, cal_descriptor, delay_descriptor
from qgl2.util import init
from itertools import product
import numpy as np
@qgl2decl
def PiRabi(controlQ: qreg, targetQ: qreg, lengths, riseFall=40e-9, amp=1, phase=0, calRepeats=2):
"""
Variable length CX experiment.
Parameters
----------
controlQ : logical channel for the control qubit (LogicalChannel)
targetQ: logical channel for the target qubit (LogicalChannel)
lengths : pulse lengths of the CR pulse to sweep over (iterable)
riseFall : rise/fall time of the CR pulse (s)
amp : amplitude of the CR pulse
phase : phase of the CR pulse (rad)
calRepeats : number repetitions of calibration sequences (int)
"""
# Rather than do EdgeFactory and regular flat_top_gaussian,
# define a new QGL2 stub where the QGL1 implementation does that,
# so QGL2 can avoid dealing with the edge
# CRchan = EdgeFactory(controlQ, targetQ)
# flat_top_gaussian is an addition of 3 UTheta pulses
cNt = QRegister(controlQ, targetQ)
# Sequence 1: Id(control), gaussian(l), measure both
for l in lengths:
init(cNt)
Id(controlQ)
flat_top_gaussian_edge(controlQ, targetQ, riseFall, length=l, amp=amp, phase=phase)
measConcurrently(cNt)
# Sequence 2: X(control), gaussian(l), X(control), measure both
for l in lengths:
init(cNt)
X(controlQ)
flat_top_gaussian_edge(controlQ, targetQ, riseFall, length=l, amp=amp, phase=phase)
X(controlQ)
measConcurrently(cNt)
# Then do calRepeats calibration sequences
create_cal_seqs(cNt, calRepeats)
# metafile = compile_to_hardware(seqs, 'PiRabi/PiRabi',
# axis_descriptor=[
# delay_descriptor(np.concatenate((lengths, lengths))),
# cal_descriptor((controlQ, targetQ), calRepeats)
# ])
@qgl2decl
def EchoCRLen(controlQ: qreg, targetQ: qreg, lengths, riseFall=40e-9, amp=1, phase=0, calRepeats=2, canc_amp=0, canc_phase=np.pi/2):
"""
Variable length CX experiment, with echo pulse sandwiched between two CR opposite-phase pulses.
Parameters
----------
controlQ : logical channel for the control qubit (LogicalChannel)
targetQ: logical channel for the target qubit (LogicalChannel)
lengths : pulse lengths of the CR pulse to sweep over (iterable)
riseFall : rise/fall time of the CR pulse (s)
amp : amplitude of the CR pulse
phase : phase of the CR pulse (rad)
calRepeats : number of repetitions of readout calibrations for each 2-qubit state
"""
# Original:
# seqs = [[Id(controlQ)] + echoCR(controlQ, targetQ, length=l, phase=phase, riseFall=riseFall) + [Id(controlQ), MEAS(targetQ)*MEAS(controlQ)] \
# for l in lengths]+ [[X(controlQ)] + echoCR(controlQ, targetQ, length=l, phase= phase, riseFall=riseFall) + [X(controlQ), MEAS(targetQ)*MEAS(controlQ)] \
# for l in lengths] + create_cal_seqs((targetQ,controlQ), calRepeats, measChans=(targetQ,controlQ))
cNt = QRegister(controlQ, targetQ)
# Sequence1:
for l in lengths:
init(cNt)
Id(controlQ)
echoCR(controlQ, targetQ, length=l, phase=phase, amp=amp,
riseFall=riseFall, canc_amp=canc_amp, canc_phase=canc_phase)
Id(controlQ)
measConcurrently(cNt)
# Sequence 2
for l in lengths:
init(cNt)
X(controlQ)
echoCR(controlQ, targetQ, length=l, phase=phase, amp=amp,
riseFall=riseFall, canc_amp=canc_amp, canc_phase=canc_phase)
X(controlQ)
measConcurrently(cNt)
# Then do calRepeats calibration sequences
create_cal_seqs(cNt, calRepeats)
# metafile = compile_to_hardware(seqs, 'EchoCR/EchoCR',
# axis_descriptor=[
# delay_descriptor(np.concatenate((lengths, lengths))),
# cal_descriptor((controlQ, targetQ), calRepeats)
# ])
@qgl2decl
def EchoCRPhase(controlQ: qreg, targetQ: qreg, phases, riseFall=40e-9, amp=1, length=100e-9, calRepeats=2, canc_amp=0, canc_phase=np.pi/2):
"""
Variable phase CX experiment, with echo pulse sandwiched between two CR opposite-phase pulses.
Parameters
----------
controlQ : logical channel for the control qubit (LogicalChannel)
targetQ : logical channel for the cross-resonance pulse (LogicalChannel)
phases : pulse phases of the CR pulse to sweep over (iterable)
riseFall : rise/fall time of the CR pulse (s)
amp : amplitude of the CR pulse
length : duration of each of the two flat parts of the CR pulse (s)
calRepeats : number of repetitions of readout calibrations for each 2-qubit state
"""
# Original:
# seqs = [[Id(controlQ)] + echoCR(controlQ, targetQ, length=length, phase=ph, riseFall=riseFall) + [X90(targetQ)*Id(controlQ), MEAS(targetQ)*MEAS(controlQ)] \
# for ph in phases]+[[X(controlQ)] + echoCR(controlQ, targetQ, length=length, phase= ph, riseFall = riseFall) + [X90(targetQ)*X(controlQ), MEAS(targetQ)*MEAS(controlQ)] \
# for ph in phases]+create_cal_seqs((targetQ,controlQ), calRepeats, measChans=(targetQ,controlQ))
cNt = QRegister(controlQ, targetQ)
# Sequence 1
for ph in phases:
init(cNt)
Id(controlQ)
echoCR(controlQ, targetQ, length=length, phase=ph,
riseFall=riseFall, canc_amp=canc_amp, canc_phase=canc_phase)
Barrier(cNt)
X90(targetQ)
Id(controlQ)
measConcurrently(cNt)
# Sequence 2
for ph in phases:
init(cNt)
X(controlQ)
echoCR(controlQ, targetQ, length=length, phase=ph,
riseFall=riseFall, canc_amp=canc_amp, canc_phase=canc_phase)
Barrier(cNt)
X90(targetQ)
X(controlQ)
measConcurrently(cNt)
# Then do calRepeats calibration sequences
create_cal_seqs(cNt, calRepeats)
# axis_descriptor = [
# {
# 'name': 'phase',
# 'unit': 'radians',
# 'points': list(phases)+list(phases),
# 'partition': 1
# },
# cal_descriptor((controlQ, targetQ), calRepeats)
# ]
#
# metafile = compile_to_hardware(seqs, 'EchoCR/EchoCR',
# axis_descriptor=axis_descriptor)
@qgl2decl
def EchoCRAmp(controlQ: qreg,
targetQ: qreg,
amps,
riseFall=40e-9,
length=50e-9,
phase=0,
calRepeats=2):
"""
Variable amplitude CX experiment, with echo pulse sandwiched between two CR opposite-phase pulses.
Parameters
----------
controlQ : logical channel for the control qubit (LogicalChannel)
targetQ: logical channel for the target qubit (LogicalChannel)
amps : pulse amplitudes of the CR pulse to sweep over (iterable)
riseFall : rise/fall time of the CR pulse (s)
length : duration of each of the two flat parts of the CR pulse (s)
phase : phase of the CR pulse (rad)
calRepeats : number of repetitions of readout calibrations for each 2-qubit state
"""
cNt = QRegister(controlQ, targetQ)
# Sequence 1
for a in amps:
init(cNt)
Id(controlQ)
echoCR(controlQ, targetQ, length=length, phase=phase, riseFall=riseFall,amp=a)
Id(controlQ)
measConcurrently(cNt)
# Sequence 2
for a in amps:
init(cNt)
X(controlQ)
echoCR(controlQ, targetQ, length=length, phase= phase, riseFall=riseFall,amp=a)
X(controlQ)
measConcurrently(cNt)
# Then do calRepeats calibration sequences
create_cal_seqs(cNt, calRepeats)
# axis_descriptor = [
# {
# 'name': 'amplitude',
# 'unit': None,
# 'points': list(amps)+list(amps),
# 'partition': 1
# },
# cal_descriptor((controlQ, targetQ), calRepeats)
# ]
# metafile = compile_to_hardware(seqs, 'EchoCR/EchoCR',
# axis_descriptor=axis_descriptor)
@qgl2decl
def CRtomo_seq(controlQ: qreg, targetQ: qreg, lengths, ph, amp=0.8, riseFall=20e-9):
"""
Variable length CX experiment, for Hamiltonian tomography.
Parameters
----------
controlQ : logical channel for the control qubit (LogicalChannel)
targetQ: logical channel for the target qubit (LogicalChannel)
lengths : pulse lengths of the CR pulse to sweep over (iterable)
riseFall : rise/fall time of the CR pulse (s)
ph : phase of the CR pulse (rad)
"""
# Rather than do EdgeFactory and regular flat_top_gaussian,
# define a new QGL2 stub where the QGL1 implementation does that,
# so QGL2 can avoid dealing with the edge
# CRchan = EdgeFactory(controlQ, targetQ)
# flat_top_gaussian is an addition of 3 UTheta pulses
cNt = QRegister(controlQ, targetQ)
tomo_pulses = [Y90m, X90, Id]
# Sequence 1
for l, tomo_pulse in product(lengths, tomo_pulses):
init(cNt)
Id(controlQ)
flat_top_gaussian_edge(controlQ, targetQ, riseFall=riseFall, length=l, amp=amp, phase=ph, label="CR")
Barrier(cNt)
Id(controlQ)
tomo_pulse(targetQ)
MEAS(targetQ)
# Sequence 2
for l, tomo_pulse in product(lengths, tomo_pulses):
init(cNt)
X(controlQ)
flat_top_gaussian_edge(controlQ, targetQ, riseFall=riseFall, length=l, amp=amp, phase=ph, label="CR")
Barrier(cNt)
X(controlQ)
tomo_pulse(targetQ)
MEAS(targetQ)
create_cal_seqs(targetQ, 2)
# metafile = compile_to_hardware(seqs, 'CR/CR',
# axis_descriptor=[
# delay_descriptor(np.concatenate((np.repeat(lengths,3), np.repeat(lengths,3)))),
# cal_descriptor((targetQ,), 2)
# ])
# A main for running the sequences here with some typical argument values
# Here it runs all of them; could do a parse_args like main.py
def main():
from pyqgl2.qreg import QRegister
import pyqgl2.test_cl
from pyqgl2.main import compile_function, qgl2_compile_to_hardware
toHW = True
plotPulses = False
pyqgl2.test_cl.create_default_channelLibrary(toHW, True)
# # To turn on verbose logging in compile_function
# from pyqgl2.ast_util import NodeError
# from pyqgl2.debugmsg import DebugMsg
# NodeError.MUTE_ERR_LEVEL = NodeError.NODE_ERROR_NONE
# DebugMsg.set_level(0)
# Now compile the QGL2 to produce the function that would generate the expected sequence.
# Supply the path to the QGL2, the main function in that file, and a list of the args to that function.
# Can optionally supply saveOutput=True to save the qgl1.py
# file,
# and intermediate_output="path-to-output-file" to save
# intermediate products
# Pass in QRegister(s) NOT real Qubits
q1 = QRegister("q1")
q2 = QRegister("q2")
# Axis Descriptor generator functions here
# This is ugly; they're method dependent, but I can't do them in the QGL2 itself
# Additionally, each uses values from the args to the function
# So here we make those arguments be constants so we can use them twice
# without rewriting the values
pirlengths = np.linspace(0, 4e-6, 11) # Lengths arg to PiRabi
eclLengths = np.linspace(0, 2e-6, 11) # Lengths arg to EchoCRLen
trisefall = 40e-9 # riseFall arg for many
tamp = 1 # amp arg for many
t2amp = 0.8 # amp arg for CRTomo
tphase = 0 # phase arg for many
tcalr = 2 # calRepeats arg for many
ecpPhases = np.linspace(0, np.pi/2, 11) # phases arg for EchoCRPhase
ecaAmps = np.linspace(0, 5e-6, 11) # amps arg for echoCRAmp
crtLengths = np.linspace(0, 2e-6, 11) # lengths arg for CRtomo_seq
def getPRAxisDesc(lengths, calRepeats):
return [
delay_descriptor(np.concatenate((lengths, lengths))),
# Hard code there are 2 qubits
cal_descriptor(('c', 't'), calRepeats)
]
def getECLAxisDesc(lengths, calRepeats):
return [
delay_descriptor(np.concatenate((lengths, lengths))),
# Hard code there are 2 qubits
cal_descriptor(('controlQ', 'targetQ'), calRepeats)
]
def getECPAxisDesc(phases, calRepeats):
return [
{
'name': 'phase',
'unit': 'radians',
'points': list(phases)+list(phases),
'partition': 1
},
cal_descriptor(('controlQ', 'targetQ'), calRepeats)
]
def getECAAxisDesc(amps, calRepeats):
return [
{
'name': 'amplitude',
'unit': None,
'points': list(amps)+list(amps),
'partition': 1
},
cal_descriptor(('controlQ', 'targetQ'), calRepeats)
]
def getCRtAxisDesc(lengths):
return [
delay_descriptor(np.concatenate((np.repeat(lengths,3), np.repeat(lengths,3)))),
cal_descriptor(('targetQ',), 2)
]
# FIXME: See issue #44: Must supply all args to qgl2main for now
# for func, args, label, axisDesc in [("PiRabi", (q1, q2, pirlengths), "PiRabi", getPRAxisDesc(pirlengths, tcalr)),
# ("EchoCRLen", (q1, q2, np.linspace(0, 2e-6, 11)), "EchoCR", getECLAxisDesc(eclLengths, tcalr)),
# ("EchoCRPhase", (q1, q2, np.linspace(0, np.pi/2, 11)), "EchoCR", getECPAxisDesc(ecpPhases, tcalr)),
# ("EchoCRAmp", (q1, q2, np.linspace(0, 5e-6, 11)), "EchoCR", getECAAxisDesc(ecaAmps, tcalr)), # FIXME: Right values?
# ("CRtomo_seq", (q1, q2, np.linspace(0, 2e-6, 11), 0), "CR", getCRtAxisDesc(crtLengths)) # FIXME: Right values?
# ]:
for func, args, label, axisDesc in [("PiRabi", (q1, q2, pirlengths, trisefall,tamp,tphase,tcalr), "PiRabi", getPRAxisDesc(pirlengths, tcalr)),
("EchoCRLen", (q1, q2, eclLengths, trisefall, tamp, tphase, tcalr, 0, np.pi/2), "EchoCR", getECLAxisDesc(eclLengths, tcalr)),
("EchoCRPhase", (q1, q2, ecpPhases, trisefall,tamp,100e-9,tcalr,0,np.pi/2), "EchoCR", getECPAxisDesc(ecpPhases, tcalr)),
("EchoCRAmp", (q1, q2, ecaAmps, trisefall,50e-9,tphase,tcalr), "EchoCR", getECAAxisDesc(ecaAmps, tcalr)), # FIXME: Right values?
("CRtomo_seq", (q1, q2, crtLengths, 0, t2amp,20e-9), "CR", getCRtAxisDesc(crtLengths)) # FIXME: Right values?
]:
print(f"\nRun {func}...")
# Here we know the function is in the current file
# You could use os.path.dirname(os.path.realpath(__file)) to find files relative to this script,
# Or os.getcwd() to get files relative to where you ran from. Or always use absolute paths.
resFunc = compile_function(__file__, func, args)
# Run the QGL2. Note that the generated function takes no arguments itself
seq = resFunc()
if toHW:
print(f"Compiling {func} sequences to hardware\n")
fileNames = qgl2_compile_to_hardware(seq, filename=f'{label}/{label}', axis_descriptor=axisDesc)
print(f"Compiled sequences; metafile = {fileNames}")
if plotPulses:
from QGL.PulseSequencePlotter import plot_pulse_files
# FIXME: As called, this returns a graphical object to display
plot_pulse_files(fileNames)
else:
print(f"\nGenerated {func} sequences:\n")
from QGL.Scheduler import schedule
scheduled_seq = schedule(seq)
from IPython.lib.pretty import pretty
print(pretty(scheduled_seq))
if __name__ == "__main__":
main()
|
BBN-Q/pyqgl2
|
src/python/qgl2/basic_sequences/CR.py
|
Python
|
apache-2.0
| 15,939
|
from functools import wraps
from iso8601 import parse_date
from munch import munchify
from restkit import BasicAuth, errors, request, Resource
from retrying import retry
from simplejson import dumps, loads
from urlparse import parse_qs, urlparse
import logging
logger = logging.getLogger(__name__)
IGNORE_PARAMS = ('uri', 'path')
def verify_file(fn):
@wraps(fn)
def wrapper(self, file_, *args, **kwargs):
if isinstance(file_, str):
file_ = open(file_, 'rb')
if hasattr(file_, 'read'):
# A file-like object must have 'read' method
return fn(self, file_, *args, **kwargs)
else:
raise TypeError('Expected either a string '
'containing a path to file or a '
'file-like object, got {}'.format(type(file_)))
return wrapper
class InvalidResponse(Exception):
pass
class NoToken(Exception):
pass
class Client(Resource):
"""docstring for API"""
def __init__(self, key,
host_url="https://api-sandbox.openprocurement.org",
api_version='0.8',
resource='tenders',
params=None,
**kwargs):
super(Client, self).__init__(
host_url,
filters=[BasicAuth(key, "")],
**kwargs
)
self.prefix_path = '/api/{}/{}'.format(api_version, resource)
if not isinstance(params, dict):
params = {"mode": "_all_"}
self.params = params
self.headers = {"Content-Type": "application/json"}
# To perform some operations (e.g. create a tender)
# we first need to obtain a cookie. For that reason,
# here we send a HEAD request to a neutral URL.
self.head('/api/{}/spore'.format(api_version))
def request(self, method, path=None, payload=None, headers=None,
params_dict=None, **params):
_headers = dict(self.headers)
_headers.update(headers or {})
try:
response = super(Client, self).request(
method, path=path, payload=payload, headers=_headers,
params_dict=params_dict, **params
)
if 'Set-Cookie' in response.headers:
self.headers['Cookie'] = response.headers['Set-Cookie']
return response
except errors.ResourceNotFound as e:
if 'Set-Cookie' in e.response.headers:
self.headers['Cookie'] = e.response.headers['Set-Cookie']
raise e
def patch(self, path=None, payload=None, headers=None,
params_dict=None, **params):
""" HTTP PATCH
- payload: string passed to the body of the request
- path: string additionnal path to the uri
- headers: dict, optionnal headers that will
be added to HTTP request.
- params: Optionnal parameterss added to the request
"""
return self.request("PATCH", path=path, payload=payload,
headers=headers, params_dict=params_dict, **params)
def delete(self, path=None, headers=None):
""" HTTP DELETE
- path: string additionnal path to the uri
- headers: dict, optionnal headers that will
be added to HTTP request.
- params: Optionnal parameterss added to the request
"""
return self.request("DELETE", path=path, headers=headers)
def _update_params(self, params):
for key in params:
if key not in IGNORE_PARAMS:
self.params[key] = params[key]
###########################################################################
# GET ITEMS LIST API METHODS
###########################################################################
@retry(stop_max_attempt_number=5)
def get_tenders(self, params={}, feed='changes'):
params['feed'] = feed
try:
self._update_params(params)
response = self.get(
self.prefix_path,
params_dict=self.params)
if response.status_int == 200:
tender_list = munchify(loads(response.body_string()))
self._update_params(tender_list.next_page)
return tender_list.data
except errors.ResourceNotFound:
del self.params['offset']
raise
raise InvalidResponse
def get_latest_tenders(self, date, tender_id):
iso_dt = parse_date(date)
dt = iso_dt.strftime("%Y-%m-%d")
tm = iso_dt.strftime("%H:%M:%S")
response = self._get_resource_item(
'{}?offset={}T{}&opt_fields=tender_id&mode=test'.format(
self.prefix_path,
dt,
tm
)
)
if response.status_int == 200:
tender_list = munchify(loads(response.body_string()))
self._update_params(tender_list.next_page)
return tender_list.data
raise InvalidResponse
def _get_tender_resource_list(self, tender_id, items_name, access_token=None):
if not access_token:
access_token = ""
return self._get_resource_item(
'{}/{}/{}'.format(self.prefix_path, tender_id, items_name),
headers={'X-Access-Token':access_token}
)
def get_questions(self, tender_id, params={}, access_token=None):
return self._get_tender_resource_list(tender_id, "questions", access_token)
def get_documents(self, tender_id, params={}, access_token=None):
return self._get_tender_resource_list(tender_id, "documents", access_token)
def get_awards(self, tender_id, params={}, access_token=None):
return self._get_tender_resource_list(tender_id, "awards", access_token)
def get_lots(self, tender_id, params={}, access_token=None):
return self._get_tender_resource_list(tender_id, "lots", access_token)
###########################################################################
# CREATE ITEM API METHODS
###########################################################################
def _create_resource_item(self, url, payload, headers={}):
headers.update(self.headers)
response_item = self.post(
url, headers=headers, payload=dumps(payload)
)
if response_item.status_int == 201:
return munchify(loads(response_item.body_string()))
raise InvalidResponse
def _create_tender_resource_item(self, tender_id, item_obj, items_name, access_token=None):
if not access_token:
access_token = ""
return self._create_resource_item(
'{}/{}/{}'.format(self.prefix_path, tender_id, items_name),
item_obj,
headers={'X-Access-Token':access_token}
)
def create_tender(self, tender_id):
return self._create_resource_item(self.prefix_path, tender)
def create_question(self, tender_id, question, access_token=None):
return self._create_tender_resource_item(tender_id, question, "questions", access_token)
def create_bid(self, tender_id, bid, access_token=None):
return self._create_tender_resource_item(tender_id, bid, "bids", access_token)
def create_lot(self, tender_id, lot, access_token=None):
return self._create_tender_resource_item(tender_id, lot, "lots", access_token)
def create_award(self, tender_id, award, access_token=None):
return self._create_tender_resource_item(tender_id, award, "awards", access_token)
def create_cancellation(self, tender_id, cancellation, access_token=None):
return self._create_tender_resource_item(tender_id, cancellation, "cancellations", access_token)
###########################################################################
# GET ITEM API METHODS
###########################################################################
def _get_resource_item(self, url, headers={}):
headers.update(self.headers)
response_item = self.get(url, headers=headers)
if response_item.status_int == 200:
return munchify(loads(response_item.body_string()))
raise InvalidResponse
def get_tender(self, id):
return self._get_resource_item('{}/{}'.format(self.prefix_path, id))
def _get_tender_resource_item(self, tender_id, item_id, items_name,
access_token=None):
if not access_token:
access_token = ""
return self._get_resource_item(
'{}/{}/{}/{}'.format(self.prefix_path,
tender_id,
items_name,
item_id),
headers={'X-Access-Token': access_token}
)
def get_question(self, tender_id, question_id, access_token=None):
return self._get_tender_resource_item(tender_id, question_id, "questions", access_token)
def get_bid(self, tender_id, bid_id, access_token=None):
return self._get_tender_resource_item(tender_id, bid_id, "bids", access_token)
def get_lot(self, tender_id, lot_id, access_token=None):
return self._get_tender_resource_item(tender_id, lot_id, "lots", access_token)
def get_file(self, tender, url, access_token):
logger.info("get_file is deprecated. In next update this function will no takes tender.")
parsed_url = urlparse(url)
if access_token:
headers = {'X-Access-Token': access_token}
else:
raise NoToken
headers.update(self.headers)
response_item = self.get(parsed_url.path,
headers=headers,
params_dict=parse_qs(parsed_url.query))
if response_item.status_int == 302:
response_obj = request(response_item.headers['location'])
if response_obj.status_int == 200:
return response_obj.body_string(), \
response_obj.headers['Content-Disposition'] \
.split(";")[1].split('"')[1]
raise InvalidResponse
###########################################################################
# PATCH ITEM API METHODS
###########################################################################
def _patch_resource_item(self, url, payload, headers={}):
headers.update(self.headers)
response_item = self.patch(
url, headers=headers, payload=dumps(payload)
)
if response_item.status_int == 200:
return munchify(loads(response_item.body_string()))
raise InvalidResponse
def _patch_tender_resource_item(self, tender_id, item_obj, items_name, access_token):
return self._patch_resource_item(
'{}/{}/{}/{}'.format(
self.prefix_path, tender_id, items_name, item_obj['data']['id']
),
payload=item_obj,
headers={'X-Access-Token':access_token}
)
def patch_tender(self, tender):
return self._patch_resource_item(
'{}/{}'.format(self.prefix_path, tender["data"]["id"]),
payload=tender,
headers={'X-Access-Token':
getattr(getattr(tender, 'access', ''), 'token', '')}
)
def patch_question(self, tender_id, question, access_token):
return self._patch_tender_resource_item(tender_id, question, "questions", access_token)
def patch_bid(self, tender_id, bid, access_token):
return self._patch_tender_resource_item(tender_id, bid, "bids", access_token)
def patch_qualification(self, tender_id, qualification, access_token):
return self._patch_tender_resource_item(tender_id, qualification, "qualifications", access_token)
def patch_award(self, tender_id, award, access_token):
return self._patch_tender_resource_item(tender_id, award, "awards", access_token)
def patch_cancellation(self, tender_id, cancellation, access_token):
return self._patch_tender_resource_item(tender_id, cancellation, "cancellations", access_token)
def patch_cancellation_document(self, tender, cancellation_data, cancel_num, doc_num):
cancel_num = int(cancel_num)
doc_num = int(doc_num)
return self._patch_resource_item(
'{}/{}/{}/{}/documents/{}'.format(
self.prefix_path, tender.data.id, "cancellations", tender['data']['cancellations'][cancel_num]['id'], tender['data']['cancellations'][cancel_num]['documents'][doc_num]['id']
),
payload=cancellation_data,
headers={'X-Access-Token':
getattr(getattr(tender, 'access', ''), 'token', '')}
)
def patch_lot(self, tender_id, lot, access_token):
return self._patch_tender_resource_item(tender_id, lot, "lots", access_token)
def patch_document(self, tender_id, document, access_token):
return self._patch_tender_resource_item(tender_id, document, "documents", access_token)
def patch_contract(self, tender_id, contract, access_token):
return self._patch_tender_resource_item(tender_id, contract, "contracts", access_token)
###########################################################################
# UPLOAD FILE API METHODS
###########################################################################
def _upload_resource_file(self, url, data, headers={}, method='post'):
file_headers = {}
file_headers.update(self.headers)
file_headers.update(headers)
file_headers['Content-Type'] = "multipart/form-data"
response_item = getattr(self, method)(
url, headers=file_headers, payload=data
)
if response_item.status_int in (201, 200):
return munchify(loads(response_item.body_string()))
raise InvalidResponse
@verify_file
def upload_document(self, file_, tender_id, access_token):
return self._upload_resource_file(
'{}/{}/documents'.format(
self.prefix_path,
tender_id
),
data={"file": file_},
headers={'X-Access-Token':access_token}
)
@verify_file
def upload_bid_document(self, file_, tender_id, bid_id, access_token):
return self._upload_resource_file(
'{}/{}/bids/{}/documents'.format(
self.prefix_path,
tender_id,
bid_id
),
data={"file": file_},
headers={'X-Access-Token':access_token}
)
@verify_file
def update_bid_document(self, file_, tender_id, bid_id, document_id, access_token):
return self._upload_resource_file(
'{}/{}/bids/{}/documents/{}'.format(
self.prefix_path,
tender_id,
bid_id,
document_id
),
data={"file": file_},
headers={'X-Access-Token':access_token},
method='put'
)
@verify_file
def upload_cancellation_document(self, file_, tender_id, cancellation_id, access_token):
return self._upload_resource_file(
'{}/{}/cancellations/{}/documents'.format(
self.prefix_path,
tender_id,
cancellation_id
),
data={"file": file_},
headers={'X-Access-Token':access_token}
)
@verify_file
def update_cancellation_document(self, file_, tender_id, cancellation_id, document_id, access_token):
return self._upload_resource_file(
'{}/{}/cancellations/{}/documents/{}'.format(
self.prefix_path,
tender_id,
cancellation_id,
document_id
),
data={"file": file_},
headers={'X-Access-Token':access_token},
method='put'
)
###########################################################################
# DELETE ITEMS LIST API METHODS
###########################################################################
def _delete_resource_item(self, url, headers={}):
response_item = self.delete(url, headers=headers)
if response_item.status_int == 200:
return munchify(loads(response_item.body_string()))
raise InvalidResponse
def delete_bid(self, tender_id, bid_id, access_token):
return self._delete_resource_item(
'{}/{}/bids/{}'.format(
self.prefix_path,
tender_id,
bid_id
),
headers={'X-Access-Token': access_token}
)
def delete_lot(self, tender_id, lot_id, access_token):
return self._delete_resource_item(
'{}/{}/lots/{}'.format(
self.prefix_path,
tender_id,
lot_id
),
headers={'X-Access-Token':access_token}
)
###########################################################################
|
Leits/openprocurement.client.python
|
openprocurement_client/client.py
|
Python
|
apache-2.0
| 17,177
|
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Demonstrate Keras preprocessing layers applied inside a Model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
from keras.integration_test import preprocessing_test_utils as utils
ds_combinations = tf.__internal__.distribute.combinations
multi_process_runner = tf.__internal__.distribute.multi_process_runner
test_combinations = tf.__internal__.test.combinations
# Note: Strategy combinations are not (yet) public APIs, so they are subject
# to API changes and backward-compatibility is not guaranteed.
STRATEGIES = [
ds_combinations.default_strategy,
ds_combinations.mirrored_strategy_with_cpu_1_and_2,
ds_combinations.mirrored_strategy_with_two_gpus,
# TODO(b/183044870) TPU strategies with soft placement do not yet work.
# ds_combinations.tpu_strategy,
# ds_combinations.cloud_tpu_strategy,
ds_combinations.parameter_server_strategy_3worker_2ps_cpu,
ds_combinations.parameter_server_strategy_3worker_2ps_1gpu,
ds_combinations.multi_worker_mirrored_2x1_cpu,
ds_combinations.multi_worker_mirrored_2x2_gpu,
ds_combinations.central_storage_strategy_with_two_gpus,
]
@ds_combinations.generate(
test_combinations.combine(strategy=STRATEGIES, mode="eager"))
class PreprocessingAppliedInModelTest(tf.test.TestCase):
"""Demonstrate Keras preprocessing layers applied inside a Model."""
def testDistributedModelFit(self, strategy):
if (not tf.__internal__.tf2.enabled()
and isinstance(strategy,
tf.distribute.experimental.ParameterServerStrategy)):
self.skipTest(
"Parameter Server strategy with dataset creator need to be run when "
"eager execution is enabled.")
with strategy.scope():
preprocessing_model = utils.make_preprocessing_model(self.get_temp_dir())
training_model = utils.make_training_model()
# Merge the two separate models into a single model for training.
inputs = preprocessing_model.inputs
outputs = training_model(preprocessing_model(inputs))
merged_model = tf.keras.Model(inputs, outputs)
merged_model.compile(optimizer="sgd", loss="binary_crossentropy")
def dataset_fn(input_context):
dataset = utils.make_dataset()
dataset = dataset.shard(input_context.num_input_pipelines,
input_context.input_pipeline_id)
batch_size = input_context.get_per_replica_batch_size(
global_batch_size=utils.BATCH_SIZE)
return dataset.batch(batch_size).repeat().prefetch(2)
dataset_creator = tf.keras.utils.experimental.DatasetCreator(dataset_fn)
merged_model.fit(dataset_creator, epochs=2, steps_per_epoch=utils.STEPS)
if __name__ == "__main__":
multi_process_runner.test_main()
|
keras-team/keras
|
keras/integration_test/preprocessing_applied_in_model_test.py
|
Python
|
apache-2.0
| 3,512
|
"""HighThroughputExecutor builds on the Swift/T EMEWS architecture to use MPI for fast task distribution
"""
from concurrent.futures import Future
import logging
import threading
import queue
import pickle
from multiprocessing import Process, Queue
from ipyparallel.serialize import pack_apply_message # ,unpack_apply_message
from ipyparallel.serialize import deserialize_object # ,serialize_object
from parsl.executors.high_throughput import zmq_pipes
from parsl.executors.high_throughput import interchange
from parsl.executors.errors import *
from parsl.executors.base import ParslExecutor
from parsl.dataflow.error import ConfigurationError
from parsl.utils import RepresentationMixin
from parsl.providers import LocalProvider
logger = logging.getLogger(__name__)
BUFFER_THRESHOLD = 1024 * 1024
ITEM_THRESHOLD = 1024
class HighThroughputExecutor(ParslExecutor, RepresentationMixin):
"""Executor designed for cluster-scale
The HighThroughputExecutor system has the following components:
1. The HighThroughputExecutor instance which is run as part of the Parsl script.
2. The Interchange which is acts as a load-balancing proxy between workers and Parsl
3. The multiprocessing based worker pool which coordinates task execution over several
cores on a node.
4. ZeroMQ pipes connect the HighThroughputExecutor, Interchange and the process_worker_pool
Here is a diagram
.. code:: python
| Data | Executor | Interchange | External Process(es)
| Flow | | |
Task | Kernel | | |
+----->|-------->|------------>|->outgoing_q---|-> process_worker_pool
| | | | batching | | |
Parsl<---Fut-| | | load-balancing| result exception
^ | | | watchdogs | | |
| | | Q_mngmnt | | V V
| | | Thread<--|-incoming_q<---|--- +---------+
| | | | | |
| | | | | |
+----update_fut-----+
Parameters
----------
provider : :class:`~parsl.providers.provider_base.ExecutionProvider`
Provider to access computation resources. Can be one of :class:`~parsl.providers.aws.aws.EC2Provider`,
:class:`~parsl.providers.cobalt.cobalt.Cobalt`,
:class:`~parsl.providers.condor.condor.Condor`,
:class:`~parsl.providers.googlecloud.googlecloud.GoogleCloud`,
:class:`~parsl.providers.gridEngine.gridEngine.GridEngine`,
:class:`~parsl.providers.jetstream.jetstream.Jetstream`,
:class:`~parsl.providers.local.local.Local`,
:class:`~parsl.providers.sge.sge.GridEngine`,
:class:`~parsl.providers.slurm.slurm.Slurm`, or
:class:`~parsl.providers.torque.torque.Torque`.
label : str
Label for this executor instance.
launch_cmd : str
Command line string to launch the process_worker_pool from the provider. The command line string
will be formatted with appropriate values for the following values (debug, task_url, result_url,
cores_per_worker, nodes_per_block, heartbeat_period ,heartbeat_threshold, logdir). For eg:
launch_cmd="process_worker_pool.py {debug} -c {cores_per_worker} --task_url={task_url} --result_url={result_url}"
address : string
An address to connect to the main Parsl process which is reachable from the network in which
workers will be running. This can be either a hostname as returned by `hostname` or an
IP address. Most login nodes on clusters have several network interfaces available, only
some of which can be reached from the compute nodes. Some trial and error might be
necessary to indentify what addresses are reachable from compute nodes.
worker_ports : (int, int)
Specify the ports to be used by workers to connect to Parsl. If this option is specified,
worker_port_range will not be honored.
worker_port_range : (int, int)
Worker ports will be chosen between the two integers provided.
interchange_port_range : (int, int)
Port range used by Parsl to communicate with the Interchange.
working_dir : str
Working dir to be used by the executor.
worker_debug : Bool
Enables worker debug logging.
managed : Bool
If this executor is managed by the DFK or externally handled.
cores_per_worker : float
cores to be assigned to each worker. Oversubscription is possible
by setting cores_per_worker < 1.0. Default=1
max_workers : int
Caps the number of workers launched by the manager. Default: infinity
suppress_failure : Bool
If set, the interchange will suppress failures rather than terminate early. Default: False
heartbeat_threshold : int
Seconds since the last message from the counterpart in the communication pair:
(interchange, manager) after which the counterpart is assumed to be un-available. Default:120s
heartbeat_period : int
Number of seconds after which a heartbeat message indicating liveness is sent to the
counterpart (interchange, manager). Default:30s
"""
def __init__(self,
label='HighThroughputExecutor',
provider=LocalProvider(),
launch_cmd=None,
address="127.0.0.1",
worker_ports=None,
worker_port_range=(54000, 55000),
interchange_port_range=(55000, 56000),
storage_access=None,
working_dir=None,
worker_debug=False,
cores_per_worker=1.0,
max_workers=float('inf'),
heartbeat_threshold=120,
heartbeat_period=30,
suppress_failure=False,
managed=True):
logger.debug("Initializing HighThroughputExecutor")
self.label = label
self.launch_cmd = launch_cmd
self.provider = provider
self.worker_debug = worker_debug
self.storage_access = storage_access if storage_access is not None else []
if len(self.storage_access) > 1:
raise ConfigurationError('Multiple storage access schemes are not supported')
self.working_dir = working_dir
self.managed = managed
self.blocks = []
self.tasks = {}
self.cores_per_worker = cores_per_worker
self.max_workers = max_workers
self._task_counter = 0
self.address = address
self.worker_ports = worker_ports
self.worker_port_range = worker_port_range
self.interchange_port_range = interchange_port_range
self.heartbeat_threshold = heartbeat_threshold
self.heartbeat_period = heartbeat_period
self.suppress_failure = suppress_failure
self.run_dir = '.'
if not launch_cmd:
self.launch_cmd = ("process_worker_pool.py {debug} {max_workers} "
"-c {cores_per_worker} "
"--task_url={task_url} "
"--result_url={result_url} "
"--logdir={logdir} "
"--hb_period={heartbeat_period} "
"--hb_threshold={heartbeat_threshold} ")
def initialize_scaling(self):
""" Compose the launch command and call the scale_out
This should be implemented in the child classes to take care of
executor specific oddities.
"""
debug_opts = "--debug" if self.worker_debug else ""
max_workers = "" if self.max_workers == float('inf') else "--max_workers={}".format(self.max_workers)
l_cmd = self.launch_cmd.format(debug=debug_opts,
task_url=self.worker_task_url,
result_url=self.worker_result_url,
cores_per_worker=self.cores_per_worker,
max_workers=max_workers,
nodes_per_block=self.provider.nodes_per_block,
heartbeat_period=self.heartbeat_period,
heartbeat_threshold=self.heartbeat_threshold,
logdir="{}/{}".format(self.run_dir, self.label))
self.launch_cmd = l_cmd
logger.debug("Launch command: {}".format(self.launch_cmd))
self._scaling_enabled = self.provider.scaling_enabled
logger.debug("Starting HighThroughputExecutor with provider:\n%s", self.provider)
if hasattr(self.provider, 'init_blocks'):
try:
self.scale_out(blocks=self.provider.init_blocks)
except Exception as e:
logger.error("Scaling out failed: {}".format(e))
raise e
def start(self):
"""Create the Interchange process and connect to it.
"""
self.outgoing_q = zmq_pipes.TasksOutgoing("127.0.0.1", self.interchange_port_range)
self.incoming_q = zmq_pipes.ResultsIncoming("127.0.0.1", self.interchange_port_range)
self.command_client = zmq_pipes.CommandClient("127.0.0.1", self.interchange_port_range)
self.is_alive = True
self._executor_bad_state = threading.Event()
self._executor_exception = None
self._queue_management_thread = None
self._start_queue_management_thread()
self._start_local_queue_process()
logger.debug("Created management thread: {}".format(self._queue_management_thread))
if self.provider:
self.initialize_scaling()
else:
self._scaling_enabled = False
logger.debug("Starting HighThroughputExecutor with no provider")
def _queue_management_worker(self):
"""Listen to the queue for task status messages and handle them.
Depending on the message, tasks will be updated with results, exceptions,
or updates. It expects the following messages:
.. code:: python
{
"task_id" : <task_id>
"result" : serialized result object, if task succeeded
... more tags could be added later
}
{
"task_id" : <task_id>
"exception" : serialized exception object, on failure
}
We do not support these yet, but they could be added easily.
.. code:: python
{
"task_id" : <task_id>
"cpu_stat" : <>
"mem_stat" : <>
"io_stat" : <>
"started" : tstamp
}
The `None` message is a die request.
"""
logger.debug("[MTHREAD] queue management worker starting")
while not self._executor_bad_state.is_set():
try:
msgs = self.incoming_q.get(timeout=1)
# logger.debug("[MTHREAD] get has returned {}".format(len(msgs)))
except queue.Empty:
logger.debug("[MTHREAD] queue empty")
# Timed out.
pass
except IOError as e:
logger.exception("[MTHREAD] Caught broken queue with exception code {}: {}".format(e.errno, e))
return
except Exception as e:
logger.exception("[MTHREAD] Caught unknown exception: {}".format(e))
return
else:
if msgs is None:
logger.debug("[MTHREAD] Got None, exiting")
return
else:
for serialized_msg in msgs:
try:
msg = pickle.loads(serialized_msg)
tid = msg['task_id']
except pickle.UnpicklingError:
raise BadMessage("Message received could not be unpickled")
except Exception:
raise BadMessage("Message received does not contain 'task_id' field")
if tid == -1 and 'exception' in msg:
logger.warning("Executor shutting down due to version mismatch in interchange")
self._executor_exception, _ = deserialize_object(msg['exception'])
logger.exception("Exception: {}".format(self._executor_exception))
# Set bad state to prevent new tasks from being submitted
self._executor_bad_state.set()
# We set all current tasks to this exception to make sure that
# this is raised in the main context.
for task in self.tasks:
self.tasks[task].set_exception(self._executor_exception)
break
task_fut = self.tasks[tid]
if 'result' in msg:
result, _ = deserialize_object(msg['result'])
task_fut.set_result(result)
elif 'exception' in msg:
try:
s, _ = deserialize_object(msg['exception'])
# s should be a RemoteExceptionWrapper... so we can reraise it
try:
s.reraise()
except Exception as e:
task_fut.set_exception(e)
except Exception as e:
# TODO could be a proper wrapped exception?
task_fut.set_exception(
DeserializationError("Received exception, but handling also threw an exception: {}".format(e)))
else:
raise BadMessage("Message received is neither result or exception")
if not self.is_alive:
break
logger.info("[MTHREAD] queue management worker finished")
# When the executor gets lost, the weakref callback will wake up
# the queue management thread.
def weakref_cb(self, q=None):
"""We do not use this yet."""
q.put(None)
def _start_local_queue_process(self):
""" Starts the interchange process locally
Starts the interchange process locally and uses an internal command queue to
get the worker task and result ports that the interchange has bound to.
"""
comm_q = Queue(maxsize=10)
self.queue_proc = Process(target=interchange.starter,
args=(comm_q,),
kwargs={"client_ports": (self.outgoing_q.port,
self.incoming_q.port,
self.command_client.port),
"worker_ports": self.worker_ports,
"worker_port_range": self.worker_port_range,
"logdir": "{}/{}".format(self.run_dir, self.label),
"suppress_failure": self.suppress_failure,
"heartbeat_threshold": self.heartbeat_threshold,
"logging_level": logging.DEBUG if self.worker_debug else logging.INFO
},
)
self.queue_proc.start()
try:
(worker_task_port, worker_result_port) = comm_q.get(block=True, timeout=120)
except queue.Empty:
logger.error("Interchange has not completed initialization in 120s. Aborting")
raise Exception("Interchange failed to start")
self.worker_task_url = "tcp://{}:{}".format(self.address, worker_task_port)
self.worker_result_url = "tcp://{}:{}".format(self.address, worker_result_port)
def _start_queue_management_thread(self):
"""Method to start the management thread as a daemon.
Checks if a thread already exists, then starts it.
Could be used later as a restart if the management thread dies.
"""
if self._queue_management_thread is None:
logger.debug("Starting queue management thread")
self._queue_management_thread = threading.Thread(target=self._queue_management_worker)
self._queue_management_thread.daemon = True
self._queue_management_thread.start()
logger.debug("Started queue management thread")
else:
logger.debug("Management thread already exists, returning")
def hold_worker(self, worker_id):
"""Puts a worker on hold, preventing scheduling of additional tasks to it.
This is called "hold" mostly because this only stops scheduling of tasks,
and does not actually kill the worker.
Parameters
----------
worker_id : str
Worker id to be put on hold
"""
c = self.command_client.run("HOLD_WORKER;{}".format(worker_id))
logger.debug("Sent hold request to worker: {}".format(worker_id))
return c
@property
def outstanding(self):
outstanding_c = self.command_client.run("OUTSTANDING_C")
logger.debug("Got outstanding count: {}".format(outstanding_c))
return outstanding_c
@property
def connected_workers(self):
workers = self.command_client.run("MANAGERS")
logger.debug("Got managers: {}".format(workers))
return workers
def submit(self, func, *args, **kwargs):
"""Submits work to the the outgoing_q.
The outgoing_q is an external process listens on this
queue for new work. This method behaves like a
submit call as described here `Python docs: <https://docs.python.org/3/library/concurrent.futures.html#concurrent.futures.ThreadPoolExecutor>`_
Args:
- func (callable) : Callable function
- *args (list) : List of arbitrary positional arguments.
Kwargs:
- **kwargs (dict) : A dictionary of arbitrary keyword args for func.
Returns:
Future
"""
if self._executor_bad_state.is_set():
raise self._executor_exception
self._task_counter += 1
task_id = self._task_counter
logger.debug("Pushing function {} to queue with args {}".format(func, args))
self.tasks[task_id] = Future()
fn_buf = pack_apply_message(func, args, kwargs,
buffer_threshold=1024 * 1024,
item_threshold=1024)
msg = {"task_id": task_id,
"buffer": fn_buf}
# Post task to the the outgoing queue
self.outgoing_q.put(msg)
# Return the future
return self.tasks[task_id]
@property
def scaling_enabled(self):
return self._scaling_enabled
def scale_out(self, blocks=1):
"""Scales out the number of blocks by "blocks"
Raises:
NotImplementedError
"""
r = []
for i in range(blocks):
if self.provider:
block = self.provider.submit(self.launch_cmd, 1, 1)
logger.debug("Launched block {}:{}".format(i, block))
if not block:
raise(ScalingFailed(self.provider.label,
"Attempts to provision nodes via provider has failed"))
self.blocks.extend([block])
else:
logger.error("No execution provider available")
r = None
return r
def scale_in(self, blocks):
"""Scale in the number of active blocks by specified amount.
The scale in method here is very rude. It doesn't give the workers
the opportunity to finish current tasks or cleanup. This is tracked
in issue #530
Raises:
NotImplementedError
"""
to_kill = self.blocks[:blocks]
if self.provider:
r = self.provider.cancel(to_kill)
return r
def status(self):
"""Return status of all blocks."""
status = []
if self.provider:
status = self.provider.status(self.blocks)
return status
def shutdown(self, hub=True, targets='all', block=False):
"""Shutdown the executor, including all workers and controllers.
This is not implemented.
Kwargs:
- hub (Bool): Whether the hub should be shutdown, Default:True,
- targets (list of ints| 'all'): List of block id's to kill, Default:'all'
- block (Bool): To block for confirmations or not
Raises:
NotImplementedError
"""
logger.warning("Attempting HighThroughputExecutor shutdown")
# self.outgoing_q.close()
# self.incoming_q.close()
self.queue_proc.terminate()
logger.warning("Finished HighThroughputExecutor shutdown attempt")
return True
|
swift-lang/swift-e-lab
|
parsl/executors/high_throughput/executor.py
|
Python
|
apache-2.0
| 21,673
|
# -*- coding: utf-8 -*-
from model.group import Group
import random
import pytest
def test_delete_random_group(app, db, check_ui):
with pytest.allure.step('Предусловие: Проверяем, есть ли на странице групп - группы, которые можно удалить, если их нет, то создаем группу'):
if len(db.get_group_list()) == 0:
app.group.create(Group(name="group for delete"))
with pytest.allure.step('Оракул. Получаем список групп, до выполнения действия'):
old_groups = db.get_group_list()
with pytest.allure.step('Выполняем действие удаления случайной группы'):
group = random.choice(old_groups)
app.group.delete_by_id(group.id) #
with pytest.allure.step('Проверяему что группа удалена'):
new_groups = db.get_group_list() #
old_groups.remove(group) # Оракул. Удаляем из оракула удаленную через интерфейс группу
assert old_groups == new_groups
if check_ui:
assert sorted(new_groups, key=Group.id_or_max) == sorted(db.get_group_list(), key=Group.id_or_max)
|
MaxDavion/Python_traning
|
test/test_del_group.py
|
Python
|
apache-2.0
| 1,298
|
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import shutil
import subprocess
import sys
from cliff import command
from oslo_log import log as logging
from six import moves
LOG = logging.getLogger(__name__)
TESTR_CONF = """[DEFAULT]
test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \\
OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \\
OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-500} \\
${PYTHON:-python} -m subunit.run discover -t %s %s $LISTOPT $IDOPTION
test_id_option=--load-list $IDFILE
test_list_option=--list
group_regex=([^\.]*\.)*
"""
def get_tempest_default_config_dir():
"""Returns the correct default config dir to support both cases of
tempest being or not installed in a virtualenv.
Cases considered:
- no virtual env, python2: real_prefix and base_prefix not set
- no virtual env, python3: real_prefix not set, base_prefix set and
identical to prefix
- virtualenv, python2: real_prefix and prefix are set and different
- virtualenv, python3: real_prefix not set, base_prefix and prefix are
set and identical
- pyvenv, any python version: real_prefix not set, base_prefix and prefix
are set and different
:return: default config dir
"""
real_prefix = getattr(sys, 'real_prefix', None)
base_prefix = getattr(sys, 'base_prefix', None)
prefix = sys.prefix
if (real_prefix is None and
(base_prefix is None or base_prefix == prefix)):
# Probably not running in a virtual environment.
# NOTE(andreaf) we cannot distinguish this case from the case of
# a virtual environment created with virtualenv, and running python3.
# Also if it appears we are not in virtual env and fail to find
# global config: '/etc/tempest', fall back to
# '[sys.prefix]/etc/tempest'
global_conf_dir = '/etc/tempest'
if os.path.isdir(global_conf_dir):
return global_conf_dir
else:
return os.path.join(prefix, 'etc/tempest')
else:
return os.path.join(prefix, 'etc/tempest')
class TempestInit(command.Command):
"""Setup a local working environment for running tempest"""
def get_parser(self, prog_name):
parser = super(TempestInit, self).get_parser(prog_name)
parser.add_argument('dir', nargs='?', default=os.getcwd())
parser.add_argument('--config-dir', '-c', default=None)
return parser
def generate_testr_conf(self, local_path):
testr_conf_path = os.path.join(local_path, '.testr.conf')
top_level_path = os.path.dirname(os.path.dirname(__file__))
discover_path = os.path.join(top_level_path, 'test_discover')
testr_conf = TESTR_CONF % (top_level_path, discover_path)
with open(testr_conf_path, 'w+') as testr_conf_file:
testr_conf_file.write(testr_conf)
def update_local_conf(self, conf_path, lock_dir, log_dir):
config_parse = moves.configparser.SafeConfigParser()
config_parse.optionxform = str
with open(conf_path, 'w+') as conf_file:
config_parse.readfp(conf_file)
# Set local lock_dir in tempest conf
if not config_parse.has_section('oslo_concurrency'):
config_parse.add_section('oslo_concurrency')
config_parse.set('oslo_concurrency', 'lock_path', lock_dir)
# Set local log_dir in tempest conf
config_parse.set('DEFAULT', 'log_dir', log_dir)
# Set default log filename to tempest.log
config_parse.set('DEFAULT', 'log_file', 'tempest.log')
def copy_config(self, etc_dir, config_dir):
shutil.copytree(config_dir, etc_dir)
def generate_sample_config(self, local_dir, config_dir):
conf_generator = os.path.join(config_dir,
'config-generator.tempest.conf')
subprocess.call(['oslo-config-generator', '--config-file',
conf_generator],
cwd=local_dir)
def create_working_dir(self, local_dir, config_dir):
# Create local dir if missing
if not os.path.isdir(local_dir):
LOG.debug('Creating local working dir: %s' % local_dir)
os.mkdir(local_dir)
else:
raise OSError("Directory you are trying to initialize already "
"exists: %s" % local_dir)
lock_dir = os.path.join(local_dir, 'tempest_lock')
etc_dir = os.path.join(local_dir, 'etc')
config_path = os.path.join(etc_dir, 'tempest.conf')
log_dir = os.path.join(local_dir, 'logs')
testr_dir = os.path.join(local_dir, '.testrepository')
# Create lock dir
if not os.path.isdir(lock_dir):
LOG.debug('Creating lock dir: %s' % lock_dir)
os.mkdir(lock_dir)
# Create log dir
if not os.path.isdir(log_dir):
LOG.debug('Creating log dir: %s' % log_dir)
os.mkdir(log_dir)
# Create and copy local etc dir
self.copy_config(etc_dir, config_dir)
# Generate the sample config file
self.generate_sample_config(local_dir, config_dir)
# Update local confs to reflect local paths
self.update_local_conf(config_path, lock_dir, log_dir)
# Generate a testr conf file
self.generate_testr_conf(local_dir)
# setup local testr working dir
if not os.path.isdir(testr_dir):
subprocess.call(['testr', 'init'], cwd=local_dir)
def take_action(self, parsed_args):
config_dir = parsed_args.config_dir or get_tempest_default_config_dir()
self.create_working_dir(parsed_args.dir, config_dir)
|
izadorozhna/tempest
|
tempest/cmd/init.py
|
Python
|
apache-2.0
| 6,245
|
from armstrong.apps.embeds.backends import get_backend
from .._utils import TestCase
class GetBackendTestCase(TestCase):
def test_load_no_backend_raises_error(self):
with self.assertRaises(ImportError):
get_backend('')
def test_load_missing_backend_raises_error(self):
with self.assertRaises(ImportError):
get_backend('fake')
|
armstrong/armstrong.apps.embeds
|
tests/backends/_init.py
|
Python
|
apache-2.0
| 377
|
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Cisco Systems, Inc.
# Copyright 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import collections
import logging
import netaddr
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from neutronclient.common import exceptions as neutron_exc
from neutronclient.v2_0 import client as neutron_client
import six
from horizon import messages
from horizon.utils.memoized import memoized # noqa
from openstack_dashboard.api import base
from openstack_dashboard.api import network_base
from openstack_dashboard.api import nova
from openstack_dashboard import policy
LOG = logging.getLogger(__name__)
IP_VERSION_DICT = {4: 'IPv4', 6: 'IPv6'}
OFF_STATE = 'OFF'
ON_STATE = 'ON'
ROUTER_INTERFACE_OWNERS = (
'network:router_interface',
'network:router_interface_distributed'
)
class NeutronAPIDictWrapper(base.APIDictWrapper):
def __init__(self, apidict):
if 'admin_state_up' in apidict:
if apidict['admin_state_up']:
apidict['admin_state'] = 'UP'
else:
apidict['admin_state'] = 'DOWN'
# Django cannot handle a key name with ':', so use '__'.
apidict.update({
key.replace(':', '__'): value
for key, value in apidict.items()
if ':' in key
})
super(NeutronAPIDictWrapper, self).__init__(apidict)
def set_id_as_name_if_empty(self, length=8):
try:
if not self._apidict['name']:
id = self._apidict['id']
if length:
id = id[:length]
self._apidict['name'] = '(%s)' % id
except KeyError:
pass
def items(self):
return self._apidict.items()
@property
def name_or_id(self):
return (self._apidict.get('name') or
'(%s)' % self._apidict['id'][:13])
class Agent(NeutronAPIDictWrapper):
"""Wrapper for neutron agents."""
class Network(NeutronAPIDictWrapper):
"""Wrapper for neutron Networks."""
def to_dict(self):
d = dict(super(NeutronAPIDictWrapper, self).to_dict())
d['subnets'] = [s.to_dict() for s in d['subnets']]
return d
class Subnet(NeutronAPIDictWrapper):
"""Wrapper for neutron subnets."""
def __init__(self, apidict):
apidict['ipver_str'] = get_ipver_str(apidict['ip_version'])
super(Subnet, self).__init__(apidict)
class SubnetPool(NeutronAPIDictWrapper):
"""Wrapper for neutron subnetpools."""
class Port(NeutronAPIDictWrapper):
"""Wrapper for neutron ports."""
def __init__(self, apidict):
if 'mac_learning_enabled' in apidict:
apidict['mac_state'] = \
ON_STATE if apidict['mac_learning_enabled'] else OFF_STATE
super(Port, self).__init__(apidict)
class Profile(NeutronAPIDictWrapper):
"""Wrapper for neutron profiles."""
_attrs = ['profile_id', 'name', 'segment_type', 'segment_range',
'sub_type', 'multicast_ip_index', 'multicast_ip_range']
class Router(NeutronAPIDictWrapper):
"""Wrapper for neutron routers."""
class RouterStaticRoute(NeutronAPIDictWrapper):
"""Wrapper for neutron routes extra route."""
def __init__(self, route):
super(RouterStaticRoute, self).__init__(route)
# Horizon references id property for table operations
self.id = route['nexthop'] + ":" + route['destination']
class SecurityGroup(NeutronAPIDictWrapper):
# Required attributes: id, name, description, tenant_id, rules
def __init__(self, sg, sg_dict=None):
if sg_dict is None:
sg_dict = {sg['id']: sg['name']}
sg['rules'] = [SecurityGroupRule(rule, sg_dict)
for rule in sg['security_group_rules']]
super(SecurityGroup, self).__init__(sg)
def to_dict(self):
return {k: self._apidict[k] for k in self._apidict if k != 'rules'}
@six.python_2_unicode_compatible
class SecurityGroupRule(NeutronAPIDictWrapper):
# Required attributes:
# id, parent_group_id
# ip_protocol, from_port, to_port, ip_range, group
# ethertype, direction (Neutron specific)
def _get_secgroup_name(self, sg_id, sg_dict):
if sg_id:
if sg_dict is None:
sg_dict = {}
# If sg name not found in sg_dict,
# first two parts of UUID is used as sg name.
return sg_dict.get(sg_id, sg_id[:13])
else:
return u''
def __init__(self, sgr, sg_dict=None):
# In Neutron, if both remote_ip_prefix and remote_group_id are None,
# it means all remote IP range is allowed, i.e., 0.0.0.0/0 or ::/0.
if not sgr['remote_ip_prefix'] and not sgr['remote_group_id']:
if sgr['ethertype'] == 'IPv6':
sgr['remote_ip_prefix'] = '::/0'
else:
sgr['remote_ip_prefix'] = '0.0.0.0/0'
rule = {
'id': sgr['id'],
'parent_group_id': sgr['security_group_id'],
'direction': sgr['direction'],
'ethertype': sgr['ethertype'],
'ip_protocol': sgr['protocol'],
'from_port': sgr['port_range_min'],
'to_port': sgr['port_range_max'],
}
cidr = sgr['remote_ip_prefix']
rule['ip_range'] = {'cidr': cidr} if cidr else {}
group = self._get_secgroup_name(sgr['remote_group_id'], sg_dict)
rule['group'] = {'name': group} if group else {}
super(SecurityGroupRule, self).__init__(rule)
def __str__(self):
if 'name' in self.group:
remote = self.group['name']
elif 'cidr' in self.ip_range:
remote = self.ip_range['cidr']
else:
remote = 'ANY'
direction = 'to' if self.direction == 'egress' else 'from'
if self.from_port:
if self.from_port == self.to_port:
proto_port = ("%s/%s" %
(self.from_port, self.ip_protocol.lower()))
else:
proto_port = ("%s-%s/%s" %
(self.from_port, self.to_port,
self.ip_protocol.lower()))
elif self.ip_protocol:
try:
ip_proto = int(self.ip_protocol)
proto_port = "ip_proto=%d" % ip_proto
except Exception:
# well-defined IP protocol name like TCP, UDP, ICMP.
proto_port = self.ip_protocol
else:
proto_port = ''
return (_('ALLOW %(ethertype)s %(proto_port)s '
'%(direction)s %(remote)s') %
{'ethertype': self.ethertype,
'proto_port': proto_port,
'remote': remote,
'direction': direction})
class SecurityGroupManager(network_base.SecurityGroupManager):
backend = 'neutron'
def __init__(self, request):
self.request = request
self.client = neutronclient(request)
def _list(self, **filters):
secgroups = self.client.list_security_groups(**filters)
return [SecurityGroup(sg) for sg in secgroups.get('security_groups')]
def list(self):
tenant_id = self.request.user.tenant_id
return self._list(tenant_id=tenant_id)
def _sg_name_dict(self, sg_id, rules):
"""Create a mapping dict from secgroup id to its name."""
related_ids = set([sg_id])
related_ids |= set(filter(None, [r['remote_group_id'] for r in rules]))
related_sgs = self.client.list_security_groups(id=related_ids,
fields=['id', 'name'])
related_sgs = related_sgs.get('security_groups')
return dict((sg['id'], sg['name']) for sg in related_sgs)
def get(self, sg_id):
secgroup = self.client.show_security_group(sg_id).get('security_group')
sg_dict = self._sg_name_dict(sg_id, secgroup['security_group_rules'])
return SecurityGroup(secgroup, sg_dict)
def create(self, name, desc):
body = {'security_group': {'name': name,
'description': desc,
'tenant_id': self.request.user.project_id}}
secgroup = self.client.create_security_group(body)
return SecurityGroup(secgroup.get('security_group'))
def update(self, sg_id, name, desc):
body = {'security_group': {'name': name,
'description': desc}}
secgroup = self.client.update_security_group(sg_id, body)
return SecurityGroup(secgroup.get('security_group'))
def delete(self, sg_id):
self.client.delete_security_group(sg_id)
def rule_create(self, parent_group_id,
direction=None, ethertype=None,
ip_protocol=None, from_port=None, to_port=None,
cidr=None, group_id=None):
if not cidr:
cidr = None
if from_port < 0:
from_port = None
if to_port < 0:
to_port = None
if isinstance(ip_protocol, int) and ip_protocol < 0:
ip_protocol = None
body = {'security_group_rule':
{'security_group_id': parent_group_id,
'direction': direction,
'ethertype': ethertype,
'protocol': ip_protocol,
'port_range_min': from_port,
'port_range_max': to_port,
'remote_ip_prefix': cidr,
'remote_group_id': group_id}}
rule = self.client.create_security_group_rule(body)
rule = rule.get('security_group_rule')
sg_dict = self._sg_name_dict(parent_group_id, [rule])
return SecurityGroupRule(rule, sg_dict)
def rule_delete(self, sgr_id):
self.client.delete_security_group_rule(sgr_id)
def list_by_instance(self, instance_id):
"""Gets security groups of an instance."""
ports = port_list(self.request, device_id=instance_id)
sg_ids = []
for p in ports:
sg_ids += p.security_groups
return self._list(id=set(sg_ids)) if sg_ids else []
def update_instance_security_group(self, instance_id,
new_security_group_ids):
ports = port_list(self.request, device_id=instance_id)
for p in ports:
params = {'security_groups': new_security_group_ids}
port_update(self.request, p.id, **params)
class FloatingIp(base.APIDictWrapper):
_attrs = ['id', 'ip', 'fixed_ip', 'port_id', 'instance_id',
'instance_type', 'pool']
def __init__(self, fip):
fip['ip'] = fip['floating_ip_address']
fip['fixed_ip'] = fip['fixed_ip_address']
fip['pool'] = fip['floating_network_id']
super(FloatingIp, self).__init__(fip)
class FloatingIpPool(base.APIDictWrapper):
pass
class FloatingIpTarget(base.APIDictWrapper):
pass
class FloatingIpManager(network_base.FloatingIpManager):
device_owner_map = {
'compute:': 'compute',
'neutron:LOADBALANCER': 'loadbalancer',
}
def __init__(self, request):
self.request = request
self.client = neutronclient(request)
def list_pools(self):
search_opts = {'router:external': True}
return [FloatingIpPool(pool) for pool
in self.client.list_networks(**search_opts).get('networks')]
def _get_instance_type_from_device_owner(self, device_owner):
for key, value in self.device_owner_map.items():
if device_owner.startswith(key):
return value
return device_owner
def _set_instance_info(self, fip, port=None):
if fip['port_id']:
if not port:
port = port_get(self.request, fip['port_id'])
fip['instance_id'] = port.device_id
fip['instance_type'] = self._get_instance_type_from_device_owner(
port.device_owner)
else:
fip['instance_id'] = None
fip['instance_type'] = None
def list(self, all_tenants=False, **search_opts):
if not all_tenants:
tenant_id = self.request.user.tenant_id
# In Neutron, list_floatingips returns Floating IPs from
# all tenants when the API is called with admin role, so
# we need to filter them with tenant_id.
search_opts['tenant_id'] = tenant_id
port_search_opts = {'tenant_id': tenant_id}
else:
port_search_opts = {}
fips = self.client.list_floatingips(**search_opts)
fips = fips.get('floatingips')
# Get port list to add instance_id to floating IP list
# instance_id is stored in device_id attribute
ports = port_list(self.request, **port_search_opts)
port_dict = collections.OrderedDict([(p['id'], p) for p in ports])
for fip in fips:
self._set_instance_info(fip, port_dict.get(fip['port_id']))
return [FloatingIp(fip) for fip in fips]
def get(self, floating_ip_id):
fip = self.client.show_floatingip(floating_ip_id).get('floatingip')
self._set_instance_info(fip)
return FloatingIp(fip)
def allocate(self, pool):
body = {'floatingip': {'floating_network_id': pool,
'tenant_id': self.request.user.project_id}}
fip = self.client.create_floatingip(body).get('floatingip')
self._set_instance_info(fip)
return FloatingIp(fip)
def release(self, floating_ip_id):
self.client.delete_floatingip(floating_ip_id)
def associate(self, floating_ip_id, port_id):
# NOTE: In Neutron Horizon floating IP support, port_id is
# "<port_id>_<ip_address>" format to identify multiple ports.
pid, ip_address = port_id.split('_', 1)
update_dict = {'port_id': pid,
'fixed_ip_address': ip_address}
self.client.update_floatingip(floating_ip_id,
{'floatingip': update_dict})
def disassociate(self, floating_ip_id):
update_dict = {'port_id': None}
self.client.update_floatingip(floating_ip_id,
{'floatingip': update_dict})
def _get_reachable_subnets(self, ports):
if not is_enabled_by_config('enable_fip_topology_check', True):
# All subnets are reachable from external network
return set(
p.fixed_ips[0]['subnet_id'] for p in ports if p.fixed_ips
)
# Retrieve subnet list reachable from external network
ext_net_ids = [ext_net.id for ext_net in self.list_pools()]
gw_routers = [r.id for r in router_list(self.request)
if (r.external_gateway_info and
r.external_gateway_info.get('network_id')
in ext_net_ids)]
reachable_subnets = set([p.fixed_ips[0]['subnet_id'] for p in ports
if ((p.device_owner in
ROUTER_INTERFACE_OWNERS)
and (p.device_id in gw_routers))])
# we have to include any shared subnets as well because we may not
# have permission to see the router interface to infer connectivity
shared = set([s.id for n in network_list(self.request, shared=True)
for s in n.subnets])
return reachable_subnets | shared
def list_targets(self):
tenant_id = self.request.user.tenant_id
ports = port_list(self.request, tenant_id=tenant_id)
servers, has_more = nova.server_list(self.request)
server_dict = collections.OrderedDict(
[(s.id, s.name) for s in servers])
reachable_subnets = self._get_reachable_subnets(ports)
if is_service_enabled(self.request,
config_name='enable_lb',
ext_name='lbaas'):
# Also get the loadbalancer VIPs
vip_dict = {v['port_id']: v['name']
for v in self.client.list_vips().get('vips', [])}
else:
vip_dict = {}
targets = []
for p in ports:
# Remove network ports from Floating IP targets
if p.device_owner.startswith('network:'):
continue
port_id = p.id
server_name = server_dict.get(p.device_id) or vip_dict.get(port_id)
for ip in p.fixed_ips:
if ip['subnet_id'] not in reachable_subnets:
continue
target = {'name': '%s: %s' % (server_name, ip['ip_address']),
'id': '%s_%s' % (port_id, ip['ip_address']),
'port_id': port_id,
'instance_id': p.device_id}
targets.append(FloatingIpTarget(target))
return targets
def _target_ports_by_instance(self, instance_id):
if not instance_id:
return None
search_opts = {'device_id': instance_id}
return port_list(self.request, **search_opts)
def get_target_id_by_instance(self, instance_id, target_list=None):
if target_list is not None:
targets = [target for target in target_list
if target['instance_id'] == instance_id]
if not targets:
return None
return targets[0]['id']
else:
# In Neutron one port can have multiple ip addresses, so this
# method picks up the first one and generate target id.
ports = self._target_ports_by_instance(instance_id)
if not ports:
return None
return '{0}_{1}'.format(ports[0].id,
ports[0].fixed_ips[0]['ip_address'])
def list_target_id_by_instance(self, instance_id, target_list=None):
if target_list is not None:
return [target['id'] for target in target_list
if target['instance_id'] == instance_id]
else:
ports = self._target_ports_by_instance(instance_id)
return ['{0}_{1}'.format(p.id, p.fixed_ips[0]['ip_address'])
for p in ports]
def is_simple_associate_supported(self):
# NOTE: There are two reason that simple association support
# needs more considerations. (1) Neutron does not support the
# default floating IP pool at the moment. It can be avoided
# in case where only one floating IP pool exists.
# (2) Neutron floating IP is associated with each VIF and
# we need to check whether such VIF is only one for an instance
# to enable simple association support.
return False
def is_supported(self):
network_config = getattr(settings, 'OPENSTACK_NEUTRON_NETWORK', {})
return network_config.get('enable_router', True)
def get_ipver_str(ip_version):
"""Convert an ip version number to a human-friendly string."""
return IP_VERSION_DICT.get(ip_version, '')
@memoized
def neutronclient(request):
insecure = getattr(settings, 'OPENSTACK_SSL_NO_VERIFY', False)
cacert = getattr(settings, 'OPENSTACK_SSL_CACERT', None)
c = neutron_client.Client(token=request.user.token.id,
auth_url=base.url_for(request, 'identity'),
endpoint_url=base.url_for(request, 'network'),
insecure=insecure, ca_cert=cacert)
return c
def list_resources_with_long_filters(list_method,
filter_attr, filter_values, **params):
"""List neutron resources with handling RequestURITooLong exception.
If filter parameters are long, list resources API request leads to
414 error (URL is too long). For such case, this method split
list parameters specified by a list_field argument into chunks
and call the specified list_method repeatedly.
:param list_method: Method used to retrieve resource list.
:param filter_attr: attribute name to be filtered. The value corresponding
to this attribute is specified by "filter_values".
If you want to specify more attributes for a filter condition,
pass them as keyword arguments like "attr2=values2".
:param filter_values: values of "filter_attr" to be filtered.
If filter_values are too long and the total URI length exceed the
maximum length supported by the neutron server, filter_values will
be split into sub lists if filter_values is a list.
:param params: parameters to pass a specified listing API call
without any changes. You can specify more filter conditions
in addition to a pair of filter_attr and filter_values.
"""
try:
params[filter_attr] = filter_values
return list_method(**params)
except neutron_exc.RequestURITooLong as uri_len_exc:
# The URI is too long because of too many filter values.
# Use the excess attribute of the exception to know how many
# filter values can be inserted into a single request.
# We consider only the filter condition from (filter_attr,
# filter_values) and do not consider other filter conditions
# which may be specified in **params.
if type(filter_values) != list:
filter_values = [filter_values]
# Length of each query filter is:
# <key>=<value>& (e.g., id=<uuid>)
# The length will be key_len + value_maxlen + 2
all_filter_len = sum(len(filter_attr) + len(val) + 2
for val in filter_values)
allowed_filter_len = all_filter_len - uri_len_exc.excess
val_maxlen = max(len(val) for val in filter_values)
filter_maxlen = len(filter_attr) + val_maxlen + 2
chunk_size = allowed_filter_len / filter_maxlen
resources = []
for i in range(0, len(filter_values), chunk_size):
params[filter_attr] = filter_values[i:i + chunk_size]
resources.extend(list_method(**params))
return resources
def network_list(request, **params):
LOG.debug("network_list(): params=%s", params)
networks = neutronclient(request).list_networks(**params).get('networks')
# Get subnet list to expand subnet info in network list.
subnets = subnet_list(request)
subnet_dict = dict([(s['id'], s) for s in subnets])
# Expand subnet list from subnet_id to values.
for n in networks:
# Due to potential timing issues, we can't assume the subnet_dict data
# is in sync with the network data.
n['subnets'] = [subnet_dict[s] for s in n.get('subnets', []) if
s in subnet_dict]
return [Network(n) for n in networks]
def network_list_for_tenant(request, tenant_id, **params):
"""Return a network list available for the tenant.
The list contains networks owned by the tenant and public networks.
If requested_networks specified, it searches requested_networks only.
"""
LOG.debug("network_list_for_tenant(): tenant_id=%s, params=%s"
% (tenant_id, params))
# If a user has admin role, network list returned by Neutron API
# contains networks that do not belong to that tenant.
# So we need to specify tenant_id when calling network_list().
networks = network_list(request, tenant_id=tenant_id,
shared=False, **params)
# In the current Neutron API, there is no way to retrieve
# both owner networks and public networks in a single API call.
networks += network_list(request, shared=True, **params)
return networks
def network_get(request, network_id, expand_subnet=True, **params):
LOG.debug("network_get(): netid=%s, params=%s" % (network_id, params))
network = neutronclient(request).show_network(network_id,
**params).get('network')
if expand_subnet:
if request.user.tenant_id == network['tenant_id'] or network['shared']:
# Since the number of subnets per network must be small,
# call subnet_get() for each subnet instead of calling
# subnet_list() once.
network['subnets'] = [subnet_get(request, sid)
for sid in network['subnets']]
return Network(network)
def network_create(request, **kwargs):
"""Create a network object.
:param request: request context
:param tenant_id: (optional) tenant id of the network created
:param name: (optional) name of the network created
:returns: Network object
"""
LOG.debug("network_create(): kwargs = %s" % kwargs)
# In the case network profiles are being used, profile id is needed.
if 'net_profile_id' in kwargs:
kwargs['n1kv:profile'] = kwargs.pop('net_profile_id')
if 'tenant_id' not in kwargs:
kwargs['tenant_id'] = request.user.project_id
body = {'network': kwargs}
network = neutronclient(request).create_network(body=body).get('network')
return Network(network)
def network_update(request, network_id, **kwargs):
LOG.debug("network_update(): netid=%s, params=%s" % (network_id, kwargs))
body = {'network': kwargs}
network = neutronclient(request).update_network(network_id,
body=body).get('network')
return Network(network)
def network_delete(request, network_id):
LOG.debug("network_delete(): netid=%s" % network_id)
neutronclient(request).delete_network(network_id)
def subnet_list(request, **params):
LOG.debug("subnet_list(): params=%s" % (params))
subnets = neutronclient(request).list_subnets(**params).get('subnets')
return [Subnet(s) for s in subnets]
def subnet_get(request, subnet_id, **params):
LOG.debug("subnet_get(): subnetid=%s, params=%s" % (subnet_id, params))
subnet = neutronclient(request).show_subnet(subnet_id,
**params).get('subnet')
return Subnet(subnet)
def subnet_create(request, network_id, **kwargs):
"""Create a subnet on a specified network.
:param request: request context
:param network_id: network id a subnet is created on
:param cidr: (optional) subnet IP address range
:param ip_version: (optional) IP version (4 or 6)
:param gateway_ip: (optional) IP address of gateway
:param tenant_id: (optional) tenant id of the subnet created
:param name: (optional) name of the subnet created
:param subnetpool_id: (optional) subnetpool to allocate prefix from
:param prefixlen: (optional) length of prefix to allocate
:returns: Subnet object
Although both cidr+ip_version and subnetpool_id+preifxlen is listed as
optional you MUST pass along one of the combinations to get a successful
result.
"""
LOG.debug("subnet_create(): netid=%s, kwargs=%s"
% (network_id, kwargs))
body = {'subnet': {'network_id': network_id}}
if 'tenant_id' not in kwargs:
kwargs['tenant_id'] = request.user.project_id
body['subnet'].update(kwargs)
subnet = neutronclient(request).create_subnet(body=body).get('subnet')
return Subnet(subnet)
def subnet_update(request, subnet_id, **kwargs):
LOG.debug("subnet_update(): subnetid=%s, kwargs=%s" % (subnet_id, kwargs))
body = {'subnet': kwargs}
subnet = neutronclient(request).update_subnet(subnet_id,
body=body).get('subnet')
return Subnet(subnet)
def subnet_delete(request, subnet_id):
LOG.debug("subnet_delete(): subnetid=%s" % subnet_id)
neutronclient(request).delete_subnet(subnet_id)
def subnetpool_list(request, **params):
LOG.debug("subnetpool_list(): params=%s" % (params))
subnetpools = \
neutronclient(request).list_subnetpools(**params).get('subnetpools')
return [SubnetPool(s) for s in subnetpools]
def subnetpool_get(request, subnetpool_id, **params):
LOG.debug("subnetpool_get(): subnetpoolid=%s, params=%s" %
(subnetpool_id, params))
subnetpool = \
neutronclient(request).show_subnetpool(subnetpool_id,
**params).get('subnetpool')
return SubnetPool(subnetpool)
def subnetpool_create(request, name, prefixes, **kwargs):
"""Create a subnetpool.
ip_version is auto-detected in back-end.
Parameters:
request -- Request context
name -- Name for subnetpool
prefixes -- List of prefixes for pool
Keyword Arguments (optional):
min_prefixlen -- Minimum prefix length for allocations from pool
max_prefixlen -- Maximum prefix length for allocations from pool
default_prefixlen -- Default prefix length for allocations from pool
default_quota -- Default quota for allocations from pool
shared -- Subnetpool should be shared (Admin-only)
tenant_id -- Owner of subnetpool
Returns:
SubnetPool object
"""
LOG.debug("subnetpool_create(): name=%s, prefixes=%s, kwargs=%s"
% (name, prefixes, kwargs))
body = {'subnetpool':
{'name': name,
'prefixes': prefixes,
}
}
if 'tenant_id' not in kwargs:
kwargs['tenant_id'] = request.user.project_id
body['subnetpool'].update(kwargs)
subnetpool = \
neutronclient(request).create_subnetpool(body=body).get('subnetpool')
return SubnetPool(subnetpool)
def subnetpool_update(request, subnetpool_id, **kwargs):
LOG.debug("subnetpool_update(): subnetpoolid=%s, kwargs=%s" %
(subnetpool_id, kwargs))
body = {'subnetpool': kwargs}
subnetpool = \
neutronclient(request).update_subnetpool(subnetpool_id,
body=body).get('subnetpool')
return SubnetPool(subnetpool)
def subnetpool_delete(request, subnetpool_id):
LOG.debug("subnetpool_delete(): subnetpoolid=%s" % subnetpool_id)
return neutronclient(request).delete_subnetpool(subnetpool_id)
def port_list(request, **params):
LOG.debug("port_list(): params=%s" % (params))
ports = neutronclient(request).list_ports(**params).get('ports')
return [Port(p) for p in ports]
def port_get(request, port_id, **params):
LOG.debug("port_get(): portid=%s, params=%s" % (port_id, params))
port = neutronclient(request).show_port(port_id, **params).get('port')
return Port(port)
def unescape_port_kwargs(**kwargs):
for key in kwargs:
if '__' in key:
kwargs[':'.join(key.split('__'))] = kwargs.pop(key)
return kwargs
def port_create(request, network_id, **kwargs):
"""Create a port on a specified network.
:param request: request context
:param network_id: network id a subnet is created on
:param device_id: (optional) device id attached to the port
:param tenant_id: (optional) tenant id of the port created
:param name: (optional) name of the port created
:returns: Port object
"""
LOG.debug("port_create(): netid=%s, kwargs=%s" % (network_id, kwargs))
# In the case policy profiles are being used, profile id is needed.
if 'policy_profile_id' in kwargs:
kwargs['n1kv:profile'] = kwargs.pop('policy_profile_id')
kwargs = unescape_port_kwargs(**kwargs)
body = {'port': {'network_id': network_id}}
if 'tenant_id' not in kwargs:
kwargs['tenant_id'] = request.user.project_id
body['port'].update(kwargs)
port = neutronclient(request).create_port(body=body).get('port')
return Port(port)
def port_delete(request, port_id):
LOG.debug("port_delete(): portid=%s" % port_id)
neutronclient(request).delete_port(port_id)
def port_update(request, port_id, **kwargs):
LOG.debug("port_update(): portid=%s, kwargs=%s" % (port_id, kwargs))
kwargs = unescape_port_kwargs(**kwargs)
body = {'port': kwargs}
port = neutronclient(request).update_port(port_id, body=body).get('port')
return Port(port)
def profile_list(request, type_p, **params):
LOG.debug("profile_list(): "
"profile_type=%(profile_type)s, params=%(params)s",
{'profile_type': type_p, 'params': params})
if type_p == 'network':
profiles = neutronclient(request).list_network_profiles(
**params).get('network_profiles')
elif type_p == 'policy':
profiles = neutronclient(request).list_policy_profiles(
**params).get('policy_profiles')
return [Profile(n) for n in profiles]
def profile_get(request, profile_id, **params):
LOG.debug("profile_get(): "
"profileid=%(profileid)s, params=%(params)s",
{'profileid': profile_id, 'params': params})
profile = neutronclient(request).show_network_profile(
profile_id, **params).get('network_profile')
return Profile(profile)
def profile_create(request, **kwargs):
LOG.debug("profile_create(): kwargs=%s", kwargs)
body = {'network_profile': {}}
body['network_profile'].update(kwargs)
profile = neutronclient(request).create_network_profile(
body=body).get('network_profile')
return Profile(profile)
def profile_delete(request, profile_id):
LOG.debug("profile_delete(): profile_id=%s", profile_id)
neutronclient(request).delete_network_profile(profile_id)
def profile_update(request, profile_id, **kwargs):
LOG.debug("profile_update(): "
"profileid=%(profileid)s, kwargs=%(kwargs)s",
{'profileid': profile_id, 'kwargs': kwargs})
body = {'network_profile': kwargs}
profile = neutronclient(request).update_network_profile(
profile_id, body=body).get('network_profile')
return Profile(profile)
def profile_bindings_list(request, type_p, **params):
LOG.debug("profile_bindings_list(): "
"profile_type=%(profile_type)s params=%(params)s",
{'profile_type': type_p, 'params': params})
if type_p == 'network':
bindings = neutronclient(request).list_network_profile_bindings(
**params).get('network_profile_bindings')
elif type_p == 'policy':
bindings = neutronclient(request).list_policy_profile_bindings(
**params).get('policy_profile_bindings')
return [Profile(n) for n in bindings]
def router_create(request, **kwargs):
LOG.debug("router_create():, kwargs=%s" % kwargs)
body = {'router': {}}
if 'tenant_id' not in kwargs:
kwargs['tenant_id'] = request.user.project_id
body['router'].update(kwargs)
router = neutronclient(request).create_router(body=body).get('router')
return Router(router)
def router_update(request, r_id, **kwargs):
LOG.debug("router_update(): router_id=%s, kwargs=%s" % (r_id, kwargs))
body = {'router': {}}
body['router'].update(kwargs)
router = neutronclient(request).update_router(r_id, body=body)
return Router(router['router'])
def router_get(request, router_id, **params):
router = neutronclient(request).show_router(router_id,
**params).get('router')
return Router(router)
def router_list(request, **params):
routers = neutronclient(request).list_routers(**params).get('routers')
return [Router(r) for r in routers]
def router_delete(request, router_id):
neutronclient(request).delete_router(router_id)
def router_add_interface(request, router_id, subnet_id=None, port_id=None):
body = {}
if subnet_id:
body['subnet_id'] = subnet_id
if port_id:
body['port_id'] = port_id
client = neutronclient(request)
return client.add_interface_router(router_id, body)
def router_remove_interface(request, router_id, subnet_id=None, port_id=None):
body = {}
if subnet_id:
body['subnet_id'] = subnet_id
if port_id:
body['port_id'] = port_id
neutronclient(request).remove_interface_router(router_id, body)
def router_add_gateway(request, router_id, network_id):
body = {'network_id': network_id}
neutronclient(request).add_gateway_router(router_id, body)
def router_remove_gateway(request, router_id):
neutronclient(request).remove_gateway_router(router_id)
def router_static_route_list(request, router_id=None):
router = router_get(request, router_id)
try:
routes = [RouterStaticRoute(r) for r in router.routes]
except AttributeError:
LOG.debug("router_static_route_list(): router_id=%s, "
"router=%s", (router_id, router))
return []
return routes
def router_static_route_remove(request, router_id, route_ids):
currentroutes = router_static_route_list(request, router_id=router_id)
newroutes = []
for oldroute in currentroutes:
if oldroute.id not in route_ids:
newroutes.append({'nexthop': oldroute.nexthop,
'destination': oldroute.destination})
body = {'routes': newroutes}
new = router_update(request, router_id, **body)
return new
def router_static_route_add(request, router_id, newroute):
body = {}
currentroutes = router_static_route_list(request, router_id=router_id)
body['routes'] = [newroute] + [{'nexthop': r.nexthop,
'destination': r.destination}
for r in currentroutes]
new = router_update(request, router_id, **body)
return new
def tenant_quota_get(request, tenant_id):
return base.QuotaSet(neutronclient(request).show_quota(tenant_id)['quota'])
def tenant_quota_update(request, tenant_id, **kwargs):
quotas = {'quota': kwargs}
return neutronclient(request).update_quota(tenant_id, quotas)
def agent_list(request, **params):
agents = neutronclient(request).list_agents(**params)
return [Agent(a) for a in agents['agents']]
def list_dhcp_agent_hosting_networks(request, network, **params):
agents = neutronclient(request).list_dhcp_agent_hosting_networks(network,
**params)
return [Agent(a) for a in agents['agents']]
def add_network_to_dhcp_agent(request, dhcp_agent, network_id):
body = {'network_id': network_id}
return neutronclient(request).add_network_to_dhcp_agent(dhcp_agent, body)
def remove_network_from_dhcp_agent(request, dhcp_agent, network_id):
return neutronclient(request).remove_network_from_dhcp_agent(dhcp_agent,
network_id)
def provider_list(request):
providers = neutronclient(request).list_service_providers()
return providers['service_providers']
def servers_update_addresses(request, servers, all_tenants=False):
"""Retrieve servers networking information from Neutron if enabled.
Should be used when up to date networking information is required,
and Nova's networking info caching mechanism is not fast enough.
"""
# Get all (filtered for relevant servers) information from Neutron
try:
ports = list_resources_with_long_filters(
port_list, 'device_id', [instance.id for instance in servers],
request=request)
fips = FloatingIpManager(request)
if fips.is_supported():
floating_ips = list_resources_with_long_filters(
fips.list, 'port_id', [port.id for port in ports],
all_tenants=all_tenants)
else:
floating_ips = []
networks = list_resources_with_long_filters(
network_list, 'id', set([port.network_id for port in ports]),
request=request)
except Exception:
error_message = _('Unable to connect to Neutron.')
LOG.error(error_message)
messages.error(request, error_message)
return
# Map instance to its ports
instances_ports = collections.defaultdict(list)
for port in ports:
instances_ports[port.device_id].append(port)
# Map port to its floating ips
ports_floating_ips = collections.defaultdict(list)
for fip in floating_ips:
ports_floating_ips[fip.port_id].append(fip)
# Map network id to its name
network_names = dict(((network.id, network.name) for network in networks))
for server in servers:
try:
addresses = _server_get_addresses(
request,
server,
instances_ports,
ports_floating_ips,
network_names)
except Exception as e:
LOG.error(six.text_type(e))
else:
server.addresses = addresses
def _server_get_addresses(request, server, ports, floating_ips, network_names):
def _format_address(mac, ip, type):
try:
version = netaddr.IPAddress(ip).version
except Exception as e:
error_message = _('Unable to parse IP address %s.') % ip
LOG.error(error_message)
messages.error(request, error_message)
raise e
return {u'OS-EXT-IPS-MAC:mac_addr': mac,
u'version': version,
u'addr': ip,
u'OS-EXT-IPS:type': type}
addresses = collections.defaultdict(list)
instance_ports = ports.get(server.id, [])
for port in instance_ports:
network_name = network_names.get(port.network_id)
if network_name is not None:
for fixed_ip in port.fixed_ips:
addresses[network_name].append(
_format_address(port.mac_address,
fixed_ip['ip_address'],
u'fixed'))
port_fips = floating_ips.get(port.id, [])
for fip in port_fips:
addresses[network_name].append(
_format_address(port.mac_address,
fip.floating_ip_address,
u'floating'))
return dict(addresses)
@memoized
def list_extensions(request):
extensions_list = neutronclient(request).list_extensions()
if 'extensions' in extensions_list:
return extensions_list['extensions']
else:
return {}
@memoized
def is_extension_supported(request, extension_alias):
extensions = list_extensions(request)
for extension in extensions:
if extension['alias'] == extension_alias:
return True
else:
return False
def is_enabled_by_config(name, default=True):
network_config = getattr(settings, 'OPENSTACK_NEUTRON_NETWORK', {})
return network_config.get(name, default)
@memoized
def is_service_enabled(request, config_name, ext_name):
return (is_enabled_by_config(config_name) and
is_extension_supported(request, ext_name))
@memoized
def is_quotas_extension_supported(request):
return (is_enabled_by_config('enable_quotas', False) and
is_extension_supported(request, 'quotas'))
# Using this mechanism till a better plugin/sub-plugin detection
# mechanism is available.
# When using specific plugins the profile_support can be
# turned on if needed to configure and/or use profiles.
# Since this is a temporary mechanism used to detect profile_support
# @memorize is not being used.
# TODO(absubram): Change this config variable check with
# subplugin/plugin detection API when it becomes available.
def is_port_profiles_supported():
network_config = getattr(settings, 'OPENSTACK_NEUTRON_NETWORK', {})
# Can be used to check for vendor specific plugin
profile_support = network_config.get('profile_support', None)
if str(profile_support).lower() == 'cisco':
return True
# FEATURE_MAP is used to define:
# - related neutron extension name (key: "extension")
# - corresponding dashboard config (key: "config")
# - RBAC policies (key: "poclies")
# If a key is not contained, the corresponding permission check is skipped.
FEATURE_MAP = {
'dvr': {
'extension': 'dvr',
'config': {
'name': 'enable_distributed_router',
'default': False,
},
'policies': {
'get': 'get_router:distributed',
'create': 'create_router:distributed',
'update': 'update_router:distributed',
}
},
'l3-ha': {
'extension': 'l3-ha',
'config': {'name': 'enable_ha_router',
'default': False},
'policies': {
'get': 'get_router:ha',
'create': 'create_router:ha',
'update': 'update_router:ha',
}
},
}
def get_feature_permission(request, feature, operation=None):
"""Check if a feature-specific field can be displayed.
This method check a permission for a feature-specific field.
Such field is usually provided through Neutron extension.
:param request: Request Object
:param feature: feature name defined in FEATURE_MAP
:param operation (optional): Operation type. The valid value should be
defined in FEATURE_MAP[feature]['policies']
It must be specified if FEATURE_MAP[feature] has 'policies'.
"""
network_config = getattr(settings, 'OPENSTACK_NEUTRON_NETWORK', {})
feature_info = FEATURE_MAP.get(feature)
if not feature_info:
# Translators: Only used inside Horizon code and invisible to users
raise ValueError(_("The requested feature '%(feature)s' is unknown. "
"Please make sure to specify a feature defined "
"in FEATURE_MAP."))
# Check dashboard settings
feature_config = feature_info.get('config')
if feature_config:
if not network_config.get(feature_config['name'],
feature_config['default']):
return False
# Check policy
feature_policies = feature_info.get('policies')
policy_check = getattr(settings, "POLICY_CHECK_FUNCTION", None)
if feature_policies and policy_check:
policy_name = feature_policies.get(operation)
if not policy_name:
# Translators: Only used inside Horizon code and invisible to users
raise ValueError(_("The 'operation' parameter for "
"get_feature_permission '%(feature)s' "
"is invalid. It should be one of %(allowed)s")
% {'feature': feature,
'allowed': ' '.join(feature_policies.keys())})
role = (('network', policy_name),)
if not policy.check(role, request):
return False
# Check if a required extension is enabled
feature_extension = feature_info.get('extension')
if feature_extension:
try:
return is_extension_supported(request, feature_extension)
except Exception:
msg = (_("Failed to check Neutron '%s' extension is not supported")
% feature_extension)
LOG.info(msg)
return False
# If all checks are passed, now a given feature is allowed.
return True
|
redhat-cip/horizon
|
openstack_dashboard/api/neutron.py
|
Python
|
apache-2.0
| 48,033
|
# -*- coding: utf-8 -*-
import wx
from wx.lib.scrolledpanel import ScrolledPanel
from classes.ui import MultiSplitterWindow
class WellPlotInternal(wx.SplitterWindow):
SASH_POSITION = 100
def __init__(self, parent):
super().__init__(parent, style=wx.SP_THIN_SASH)
self._top_panel = BaseScrolled(self)
self._bottom_panel = BaseScrolled(self)
self.SplitHorizontally(self._top_panel, self._bottom_panel)
self.SetSashPosition(self._top_panel._splitter.GetSize().GetHeight()) # + 2)
self.SetMinimumPaneSize(self.SASH_POSITION)
self.Bind(wx.EVT_SIZE, self._on_size)
def _on_size(self, event):
event.Skip()
def _on_sash_changing(self, event):
if isinstance(event, wx.SplitterEvent):
event.SetSashPosition(-1)
else:
event.Skip()
def __len__(self):
if len(self.top_splitter) != len(self.bottom_splitter):
raise Exception('ERROR: Top panel and bottom panel must have same size.')
return len(self.top_splitter)
def insert(self, pos, top_window, bottom_window, width):
self.top_splitter.InsertWindow(pos, top_window, width)
self.bottom_splitter.InsertWindow(pos, bottom_window, width)
def append(self, top_window, bottom_window):
pos = len(self.top_splitter)
self.insert(pos, top_window, bottom_window)
@property
def top_splitter(self):
return self._top_panel._splitter
@property
def bottom_splitter(self):
return self._bottom_panel._splitter
class BaseScrolled(ScrolledPanel):
def __init__(self, parent, **kwargs):
ScrolledPanel.__init__(self, parent, -1, style=wx.BORDER_STATIC)
self._splitter = MultiSplitterWindow(self)
hbox = wx.BoxSizer(wx.HORIZONTAL)
self.item = hbox.Add(self._splitter, 0, wx.EXPAND)
hbox.Layout()
self.SetSizer(hbox)
self.ShowScrollbars(wx.SHOW_SB_NEVER, wx.SHOW_SB_ALWAYS)
self.SetupScrolling()
def fit(self, fit=True):
self._splitter._SetFit(fit)
self.Layout()
|
giruenf/GRIPy
|
classes/ui/wellplot_internal.py
|
Python
|
apache-2.0
| 2,105
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pandas as pd
from sqlalchemy import DateTime
from superset import db
from superset.utils import core as utils
from .helpers import config, get_example_data, get_slice_json, merge_slice, Slice, TBL
def load_random_time_series_data(only_metadata=False, force=False):
"""Loading random time series data from a zip file in the repo"""
tbl_name = "random_time_series"
database = utils.get_example_database()
table_exists = database.has_table_by_name(tbl_name)
if not only_metadata and (not table_exists or force):
data = get_example_data("random_time_series.json.gz")
pdf = pd.read_json(data)
pdf.ds = pd.to_datetime(pdf.ds, unit="s")
pdf.to_sql(
tbl_name,
database.get_sqla_engine(),
if_exists="replace",
chunksize=500,
dtype={"ds": DateTime},
index=False,
)
print("Done loading table!")
print("-" * 80)
print(f"Creating table [{tbl_name}] reference")
obj = db.session.query(TBL).filter_by(table_name=tbl_name).first()
if not obj:
obj = TBL(table_name=tbl_name)
obj.main_dttm_col = "ds"
obj.database = database
db.session.merge(obj)
db.session.commit()
obj.fetch_metadata()
tbl = obj
slice_data = {
"granularity_sqla": "day",
"row_limit": config["ROW_LIMIT"],
"since": "1 year ago",
"until": "now",
"metric": "count",
"viz_type": "cal_heatmap",
"domain_granularity": "month",
"subdomain_granularity": "day",
}
print("Creating a slice")
slc = Slice(
slice_name="Calendar Heatmap",
viz_type="cal_heatmap",
datasource_type="table",
datasource_id=tbl.id,
params=get_slice_json(slice_data),
)
merge_slice(slc)
|
zhouyao1994/incubator-superset
|
superset/examples/random_time_series.py
|
Python
|
apache-2.0
| 2,622
|
from common_fixtures import * # NOQA
logger = logging.getLogger(__name__)
def create_environment_with_linked_services(
super_client, client, service_scale, consumed_service_scale, port,
ssh_port="22", isnetworkModeHost_svc=False,
isnetworkModeHost_consumed_svc=False):
if not isnetworkModeHost_svc and not isnetworkModeHost_consumed_svc:
env, service, consumed_service = create_env_with_2_svc(
client, service_scale, consumed_service_scale, port)
else:
env, service, consumed_service = create_env_with_2_svc_hostnetwork(
client, service_scale, consumed_service_scale, port, ssh_port,
isnetworkModeHost_svc, isnetworkModeHost_consumed_svc)
service.activate()
consumed_service.activate()
service.setservicelinks(
serviceLinks=[{"serviceId": consumed_service.id, "name": "mylink"}])
service = client.wait_success(service, 120)
consumed_service = client.wait_success(consumed_service, 120)
assert service.state == "active"
assert consumed_service.state == "active"
validate_add_service_link(super_client, service, consumed_service)
return env, service, consumed_service
def test_link_activate_svc_activate_consumed_svc_link(super_client, client):
port = "301"
service_scale = 1
consumed_service_scale = 2
env, service, consumed_service = create_environment_with_linked_services(
super_client, client, service_scale, consumed_service_scale, port)
validate_linked_service(super_client, service, [consumed_service], port,
linkName="mylink")
delete_all(client, [env])
def test_link_activate_consumed_svc_link_activate_svc(super_client, client):
port = "302"
service_scale = 1
consumed_service_scale = 2
env, service, consumed_service = create_env_with_2_svc(
client, service_scale, consumed_service_scale, port)
consumed_service = activate_svc(client, consumed_service)
service.setservicelinks(
serviceLinks=[{"serviceId": consumed_service.id, "name": "mylink"}])
service = activate_svc(client, service)
validate_linked_service(super_client, service, [consumed_service], port,
linkName="mylink")
delete_all(client, [env])
def test_link_activate_svc_link_activate_consumed_svc(super_client, client):
port = "303"
service_scale = 1
consumed_service_scale = 2
env, service, consumed_service = create_env_with_2_svc(
client, service_scale, consumed_service_scale, port)
service = activate_svc(client, service)
service.setservicelinks(
serviceLinks=[{"serviceId": consumed_service.id, "name": "mylink"}])
consumed_service = activate_svc(client, consumed_service)
validate_linked_service(super_client, service, [consumed_service], port,
linkName="mylink")
delete_all(client, [env])
def test_link_link_activate_consumed_svc_activate_svc(super_client, client):
port = "304"
service_scale = 1
consumed_service_scale = 2
env, service, consumed_service = create_env_with_2_svc(
client, service_scale, consumed_service_scale, port)
service.setservicelinks(
serviceLinks=[{"serviceId": consumed_service.id, "name": "mylink"}])
consumed_service = activate_svc(client, consumed_service)
service = activate_svc(client, service)
validate_linked_service(super_client, service, [consumed_service], port,
linkName="mylink")
delete_all(client, [env])
def test_link_link_activate_svc_activate_consumed_svc(super_client, client):
port = "305"
service_scale = 1
consumed_service_scale = 2
env, service, consumed_service = create_env_with_2_svc(
client, service_scale, consumed_service_scale, port)
service.setservicelinks(
serviceLinks=[{"serviceId": consumed_service.id, "name": "mylink"}])
service = activate_svc(client, service)
consumed_service = activate_svc(client, consumed_service)
validate_linked_service(super_client, service, [consumed_service], port,
linkName="mylink")
delete_all(client, [env])
def test_link_link_when_services_still_activating(super_client, client):
port = "306"
service_scale = 1
consumed_service_scale = 2
env, service, consumed_service = create_env_with_2_svc(
client, service_scale, consumed_service_scale, port)
service.activate()
consumed_service.activate()
service.setservicelinks(serviceLinks=[{"serviceId": consumed_service.id,
"name": "mylink"}])
service = client.wait_success(service, 120)
consumed_service = client.wait_success(consumed_service, 120)
assert service.state == "active"
assert consumed_service.state == "active"
validate_add_service_link(super_client, service, consumed_service)
validate_linked_service(super_client, service, [consumed_service], port,
linkName="mylink")
delete_all(client, [env])
def test_link_service_scale_up(super_client, client):
port = "307"
service_scale = 1
consumed_service_scale = 2
final_service_scale = 3
env, service, consumed_service = create_environment_with_linked_services(
super_client, client, service_scale, consumed_service_scale, port)
validate_linked_service(super_client, service, [consumed_service], port,
linkName="mylink")
service = client.update(service, scale=final_service_scale,
name=service.name)
service = client.wait_success(service, 120)
assert service.state == "active"
assert service.scale == final_service_scale
validate_linked_service(super_client, service, [consumed_service], port,
linkName="mylink")
delete_all(client, [env])
def test_link_services_scale_down(super_client, client):
port = "308"
service_scale = 3
consumed_service_scale = 2
final_service_scale = 1
env, service, consumed_service = create_environment_with_linked_services(
super_client, client, service_scale, consumed_service_scale, port)
validate_linked_service(super_client, service, [consumed_service], port,
linkName="mylink")
service = client.update(service, scale=final_service_scale,
name=service.name)
service = client.wait_success(service, 120)
assert service.state == "active"
assert service.scale == final_service_scale
validate_linked_service(super_client, service, [consumed_service], port,
linkName="mylink")
delete_all(client, [env])
def test_link_consumed_services_scale_up(super_client, client):
port = "309"
service_scale = 1
consumed_service_scale = 2
final_consumed_service_scale = 4
env, service, consumed_service = create_environment_with_linked_services(
super_client, client, service_scale, consumed_service_scale, port)
validate_linked_service(super_client, service, [consumed_service], port,
linkName="mylink")
consumed_service = client.update(consumed_service,
scale=final_consumed_service_scale,
name=consumed_service.name)
consumed_service = client.wait_success(consumed_service, 120)
assert consumed_service.state == "active"
assert consumed_service.scale == final_consumed_service_scale
validate_linked_service(super_client, service, [consumed_service], port,
linkName="mylink")
delete_all(client, [env])
def test_link_consumed_services_scale_down(super_client, client):
port = "310"
service_scale = 2
consumed_service_scale = 3
final_consumed_service_scale = 1
env, service, consumed_service = create_environment_with_linked_services(
super_client, client, service_scale, consumed_service_scale, port)
validate_linked_service(super_client, service, [consumed_service], port,
linkName="mylink")
consumed_service = client.update(consumed_service,
scale=final_consumed_service_scale,
name=consumed_service.name)
consumed_service = client.wait_success(consumed_service, 120)
assert consumed_service.state == "active"
assert consumed_service.scale == final_consumed_service_scale
validate_linked_service(super_client, service, [consumed_service], port,
linkName="mylink")
delete_all(client, [env])
def test_link_consumed_services_stop_start_instance(super_client, client):
port = "311"
service_scale = 1
consumed_service_scale = 3
env, service, consumed_service = create_environment_with_linked_services(
super_client, client, service_scale, consumed_service_scale, port)
validate_linked_service(super_client, service, [consumed_service], port,
linkName="mylink")
container_name = env.name + "_" + consumed_service.name + "_2"
containers = client.list_container(name=container_name)
assert len(containers) == 1
container = containers[0]
# Stop instance
container = client.wait_success(container.stop(), 120)
service = client.wait_success(service)
wait_for_scale_to_adjust(super_client, consumed_service)
validate_linked_service(super_client, service, [consumed_service], port,
linkName="mylink")
delete_all(client, [env])
def test_link_consumed_services_restart_instance(super_client, client):
port = "312"
service_scale = 1
consumed_service_scale = 3
env, service, consumed_service = create_environment_with_linked_services(
super_client, client, service_scale, consumed_service_scale, port)
validate_linked_service(super_client, service, [consumed_service], port,
linkName="mylink")
container_name = env.name + "_" + consumed_service.name + "_2"
containers = client.list_container(name=container_name)
assert len(containers) == 1
container = containers[0]
# Restart instance
container = client.wait_success(container.restart(), 120)
assert container.state == 'running'
validate_linked_service(super_client, service, [consumed_service], port,
linkName="mylink")
delete_all(client, [env])
def test_link_consumed_services_delete_instance(super_client, client):
port = "313"
service_scale = 1
consumed_service_scale = 3
env, service, consumed_service = create_environment_with_linked_services(
super_client, client, service_scale, consumed_service_scale, port)
validate_linked_service(super_client, service, [consumed_service], port,
linkName="mylink")
container_name = env.name + "_" + consumed_service.name + "_1"
containers = client.list_container(name=container_name)
assert len(containers) == 1
container = containers[0]
# Delete instance
container = client.wait_success(client.delete(container))
assert container.state == 'removed'
wait_for_scale_to_adjust(super_client, consumed_service)
validate_linked_service(super_client, service, [consumed_service], port,
linkName="mylink")
delete_all(client, [env])
def test_link_consumed_services_deactivate_activate(super_client, client):
port = "314"
service_scale = 1
consumed_service_scale = 2
env, service, consumed_service = create_environment_with_linked_services(
super_client, client, service_scale, consumed_service_scale, port)
validate_linked_service(super_client, service, [consumed_service], port,
linkName="mylink")
consumed_service = consumed_service.deactivate()
consumed_service = client.wait_success(consumed_service, 120)
assert consumed_service.state == "inactive"
wait_until_instances_get_stopped(super_client, consumed_service)
consumed_service = consumed_service.activate()
consumed_service = client.wait_success(consumed_service, 120)
assert consumed_service.state == "active"
validate_linked_service(super_client, service, [consumed_service], port,
linkName="mylink")
delete_all(client, [env])
def test_link_service_deactivate_activate(super_client, client):
port = "315"
service_scale = 1
consumed_service_scale = 2
env, service, consumed_service = create_environment_with_linked_services(
super_client, client, service_scale, consumed_service_scale, port)
validate_linked_service(super_client, service, [consumed_service], port,
linkName="mylink")
service = service.deactivate()
service = client.wait_success(service, 120)
assert service.state == "inactive"
wait_until_instances_get_stopped(super_client, service)
service = service.activate()
service = client.wait_success(service, 120)
assert service.state == "active"
validate_linked_service(super_client, service, [consumed_service], port,
linkName="mylink")
delete_all(client, [env])
def test_link_deactivate_activate_environment(super_client, client):
port = "316"
service_scale = 1
consumed_service_scale = 2
env, service, consumed_service = create_environment_with_linked_services(
super_client, client, service_scale, consumed_service_scale, port)
validate_linked_service(super_client, service, [consumed_service], port,
linkName="mylink")
env = env.deactivateservices()
service = client.wait_success(service, 120)
assert service.state == "inactive"
consumed_service = client.wait_success(consumed_service, 120)
assert consumed_service.state == "inactive"
wait_until_instances_get_stopped(super_client, consumed_service)
env = env.activateservices()
service = client.wait_success(service, 120)
assert service.state == "active"
consumed_service = client.wait_success(consumed_service, 120)
assert consumed_service.state == "active"
validate_linked_service(super_client, service, [consumed_service], port,
linkName="mylink")
delete_all(client, [env])
def test_link_add_remove_servicelinks(super_client, client):
port = "317"
service_scale = 1
consumed_service_scale = 2
env, service, consumed_service = create_environment_with_linked_services(
super_client, client, service_scale, consumed_service_scale, port)
validate_linked_service(super_client, service, [consumed_service], port,
linkName="mylink")
# Add another service to environment
launch_config = {"imageUuid": WEB_IMAGE_UUID}
random_name = random_str()
consumed_service_name = random_name.replace("-", "")
consumed_service1 = client.create_service(name=consumed_service_name,
environmentId=env.id,
launchConfig=launch_config,
scale=2)
consumed_service1 = client.wait_success(consumed_service1)
assert consumed_service1.state == "inactive"
consumed_service1 = consumed_service1.activate()
consumed_service1 = client.wait_success(consumed_service1, 120)
assert consumed_service1.state == "active"
# Add another service link
service.setservicelinks(
serviceLinks=[{"serviceId": consumed_service.id, "name": "mylink"},
{"serviceId": consumed_service1.id, "name": "mylink2"}])
validate_add_service_link(super_client, service, consumed_service)
validate_add_service_link(super_client, service, consumed_service1)
validate_linked_service(super_client, service,
[consumed_service], port,
linkName="mylink")
validate_linked_service(super_client, service,
[consumed_service1], port,
linkName="mylink2")
# Remove existing service link to the service
service.setservicelinks(
serviceLinks=[{"serviceId": consumed_service1.id, "name": "mylink2"}])
validate_linked_service(super_client, service, [consumed_service1], port,
linkName="mylink2")
delete_all(client, [env])
def test_link_services_delete_service_add_service(super_client, client):
port = "318"
service_scale = 2
consumed_service_scale = 2
env, service, consumed_service = create_environment_with_linked_services(
super_client, client, service_scale, consumed_service_scale, port)
validate_linked_service(super_client, service, [consumed_service], port,
linkName="mylink")
# Delete Service
service = client.wait_success(client.delete(service))
assert service.state == "removed"
validate_remove_service_link(super_client, service, consumed_service)
port1 = "3180"
# Add another service and link to consumed service
launch_config = {"imageUuid": SSH_IMAGE_UUID,
"ports": [port1+":22/tcp"]}
random_name = random_str()
service_name = random_name.replace("-", "")
service1 = client.create_service(name=service_name,
environmentId=env.id,
launchConfig=launch_config,
scale=1)
service1 = client.wait_success(service1)
assert service1.state == "inactive"
service1 = service1.activate()
service1 = client.wait_success(service1, 120)
assert service1.state == "active"
service1.setservicelinks(
serviceLinks=[{"serviceId": consumed_service.id, "name": "mylink"}])
validate_add_service_link(super_client, service1, consumed_service)
validate_linked_service(super_client, service1, [consumed_service], port1,
linkName="mylink")
delete_all(client, [env])
def test_link_services_delete_and_add_consumed_service(super_client, client):
port = "319"
service_scale = 2
consumed_service_scale = 2
env, service, consumed_service = create_environment_with_linked_services(
super_client, client, service_scale, consumed_service_scale, port)
validate_linked_service(super_client, service, [consumed_service], port,
linkName="mylink")
# Delete consume service
consumed_service = client.wait_success(client.delete(consumed_service))
assert consumed_service.state == "removed"
validate_remove_service_link(super_client, service, consumed_service)
# Add another consume service and link the service to this newly created
# service
launch_config = {"imageUuid": WEB_IMAGE_UUID}
random_name = random_str()
service_name = random_name.replace("-", "")
consumed_service1 = client.create_service(name=service_name,
environmentId=env.id,
launchConfig=launch_config,
scale=1)
consumed_service1 = client.wait_success(consumed_service1)
assert consumed_service1.state == "inactive"
consumed_service1 = consumed_service1.activate()
consumed_service1 = client.wait_success(consumed_service1, 120)
assert consumed_service1.state == "active"
service.setservicelinks(
serviceLinks=[{"serviceId": consumed_service1.id,
"name": "mylink1"}])
validate_add_service_link(super_client, service, consumed_service1)
validate_linked_service(super_client, service, [consumed_service1], port,
linkName="mylink1")
delete_all(client, [env])
def test_link_services_stop_start_instance(super_client, client):
port = "320"
service_scale = 2
consumed_service_scale = 2
env, service, consumed_service = create_environment_with_linked_services(
super_client, client, service_scale, consumed_service_scale, port)
validate_linked_service(super_client, service, [consumed_service], port,
linkName="mylink")
container_name = env.name + "_" + service.name + "_2"
containers = client.list_container(name=container_name)
assert len(containers) == 1
service_instance = containers[0]
# Stop service instance
service_instance = client.wait_success(service_instance.stop(), 120)
service = client.wait_success(service)
wait_for_scale_to_adjust(super_client, service)
validate_linked_service(super_client, service, [consumed_service], port,
linkName="mylink")
delete_all(client, [env])
def test_link_services_restart_instance(super_client, client):
port = "321"
service_scale = 2
consumed_service_scale = 2
env, service, consumed_service = create_environment_with_linked_services(
super_client, client, service_scale, consumed_service_scale, port)
validate_linked_service(super_client, service, [consumed_service], port,
linkName="mylink")
container_name = env.name + "_" + service.name + "_2"
containers = client.list_container(name=container_name)
assert len(containers) == 1
service_instance = containers[0]
# Restart consumed instance
service_instance = client.wait_success(service_instance.restart(), 120)
assert service_instance.state == 'running'
validate_linked_service(super_client, service, [consumed_service], port,
linkName="mylink")
delete_all(client, [env])
def test_link_services_delete_instance(super_client, client):
port = "322"
service_scale = 2
consumed_service_scale = 2
env, service, consumed_service = create_environment_with_linked_services(
super_client, client, service_scale, consumed_service_scale, port)
validate_linked_service(super_client, service, [consumed_service], port,
linkName="mylink")
container_name = env.name + "_" + service.name + "_2"
containers = client.list_container(name=container_name)
assert len(containers) == 1
service_instance = containers[0]
# Delete instance
container = client.wait_success(client.delete(service_instance))
assert container.state == 'removed'
wait_for_scale_to_adjust(super_client, service)
validate_linked_service(super_client, service, [consumed_service], port,
linkName="mylink")
delete_all(client, [env])
def test_links_with_hostnetwork_1(super_client, client):
port = "323"
service_scale = 1
consumed_service_scale = 2
ssh_port = "33"
env, service, consumed_service = create_environment_with_linked_services(
super_client, client, service_scale, consumed_service_scale, port,
ssh_port, isnetworkModeHost_svc=False,
isnetworkModeHost_consumed_svc=True)
validate_linked_service(super_client, service, [consumed_service], port,
linkName="mylink")
delete_all(client, [env])
def test_links_with_hostnetwork_2(super_client, client):
port = "324"
service_scale = 1
consumed_service_scale = 2
ssh_port = "33"
env, service, consumed_service = create_environment_with_linked_services(
super_client, client, service_scale, consumed_service_scale, port,
ssh_port, isnetworkModeHost_svc=True,
isnetworkModeHost_consumed_svc=True)
validate_linked_service(
super_client, service, [consumed_service], ssh_port, linkName="mylink")
delete_all(client, [env])
def test_links_with_hostnetwork_3(super_client, client):
port = "325"
service_scale = 1
consumed_service_scale = 2
ssh_port = "33"
env, service, consumed_service = create_environment_with_linked_services(
super_client, client, service_scale, consumed_service_scale, port,
ssh_port, isnetworkModeHost_svc=True,
isnetworkModeHost_consumed_svc=False)
validate_linked_service(
super_client, service, [consumed_service], ssh_port, linkName="mylink")
delete_all(client, [env])
|
aruneli/validation-tests
|
tests/validation/cattlevalidationtest/core/test_services_links.py
|
Python
|
apache-2.0
| 24,550
|
#!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 Openstack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for VNC Proxying."""
from nova import flags
from nova.openstack.common import cfg
vnc_opts = [
cfg.StrOpt('novncproxy_base_url',
default='http://127.0.0.1:6080/vnc_auto.html',
help='location of vnc console proxy, in the form '
'"http://127.0.0.1:6080/vnc_auto.html"'),
cfg.StrOpt('xvpvncproxy_base_url',
default='http://127.0.0.1:6081/console',
help='location of nova xvp vnc console proxy, in the form '
'"http://127.0.0.1:6081/console"'),
cfg.StrOpt('vncserver_listen',
default='127.0.0.1',
help='Ip address on which instance vncserversshould listen'),
cfg.StrOpt('vncserver_proxyclient_address',
default='127.0.0.1',
help='the address to which proxy clients '
'(like nova-xvpvncproxy) should connect'),
cfg.BoolOpt('vnc_enabled',
default=True,
help='enable vnc related features'),
cfg.StrOpt('vnc_keymap',
default='en-us',
help='keymap for vnc'),
]
FLAGS = flags.FLAGS
FLAGS.register_opts(vnc_opts)
|
russellb/nova
|
nova/vnc/__init__.py
|
Python
|
apache-2.0
| 1,887
|
# coding: utf-8
__author__ = 'Administrator'
import re
import os
from bs4 import BeautifulSoup
import urllib
import urllib2
def getHtml(url):
html = urllib.urlopen(url)
scode = html.read()
return scode
def getImage(source):
#images = re.findall(r'input src=.*?img.picuphost.com/img/upload/image/.*?\.jpeg type="image"',source)
#images = re.findall(r'input src="(.*\.jpeg)" type=',source)
re2 = r'input src=.*?/.*?\.jp.g'
imgre = re.compile(re2)
images = re.findall(imgre,source)
#print(images)
x = 0
pic=[]
for i in images:
picurl=i.replace("input src='",'')
pic.append(picurl)
#print(picurl)
#print(picurl,type(picurl))
# save_path = "c://downloads"
# fileName = save_path + "\\{0}.jpg".format(str(x))
# imgData = urllib2.urlopen(picurl).read()
x+=1
return pic
def getImage2(source):
re2=r'src="http://.+?.jpeg"'
imgre = re.compile(re2)
images = re.findall(imgre,source)
return images
def saveImg(imageURL):
file=0
for i in imageURL:
urllib.urlretrieve(i)
print(i)
file+=1
if __name__=='__main__':
#source=getHtml('http://www.cl529.com/htm_data/16/1512/1775897.html')
source=getHtml('http://www.cl547.com/htm_data/16/1512/1767606.html')
pic=getImage(source)
print(pic)
#saveImg(pic)
#input src="http://ipoock.com/img/g4/20151222040151e4u6a.jpeg" type="image"
# t="input src='http://img.picuphost.com/img/upload/image/20151130/113000016301.jpeg"
# url=t.replace("input src='",'')
# print(url,type(url))
# source=getHtml('http://www.cl529.com/htm_data/16/1512/1775897.html')
# print(source)
# print getImage2(source)
# #src='http://img.picuphost.com/img/upload/image/20151130/113000016308.jpeg'
# x=re.findall(r'input src=.*?img.picuphost.com/img/upload/image/.*?\.jpeg',source)
# #x=re.findall(r'src=.*?img.picuphost.com/img/upload/image/.\d*?\.jpeg',source)
# for i in x:
# print(i)
|
wangfengfighting/Caoliu-master
|
DecodeHTML.py
|
Python
|
apache-2.0
| 2,125
|
"""
Prints ExifTags from an image using PIL
"""
from yapsy.IPlugin import IPlugin
try:
from PIL import Image, ExifTags
except ImportError:
Image = None
import os
from flask import render_template_string
TEMPLATE = """
<!DOCTYPE html>
<html>
<head>
<style>
table {
overflow-y: scroll;
width: 100%;
}
table, th, td {
border: 0px;
border-collapse: collapse;
}
th, td {
text-align: left;
padding: 10px;
}
table#t01 tr:nth-child(even) {
background-color: #fff;
}
table#t01 tr:nth-child(odd) {
background-color:#eee;
}
table#t01 th {
background-color: #444;
color: white;
}
html{
height: 100%;
}
body {
min-height: 100%;
margin: 0px;
}
</style>
</head>
<body>
<table id="t01" class="display">
<thead>
<tr>
<th>Name</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for key, value in table %}
<tr><td>{{ key }}</td><td>{{ value }}</td></tr>
{% endfor %}
{% for key, value in safe_table %}
<tr><td>{{ key }}</td><td>{{ value | safe }}</td></tr>
{% endfor %}
</tbody>
</tbody>
</table>
</body>
</html>
"""
class FaExif(IPlugin):
def __init__(self):
self.display_name = 'EXIF'
self.popularity = 6
self.category = 'multimedia'
self.cache = True
self.fast = False
self.action = False
self.icon = 'fa-file-image-o'
IPlugin.__init__(self)
def activate(self):
IPlugin.activate(self)
return
def deactivate(self):
IPlugin.deactivate(self)
return
def check(self, evidence, path_on_disk):
"""Checks if the file is compatible with this plugin"""
allowed = [ 'image/jpeg', 'image/tiff', 'image/x-tiff' ]
return evidence['mimetype'].lower() in allowed and Image
def mimetype(self, mimetype):
"""Returns the mimetype of this plugins get command"""
return "text/plain"
def get(self, evidence, helper, path_on_disk, request):
"""Returns the result of this plugin to be displayed in a browser"""
cached = os.path.isfile(evidence['file_cache_path'])
if cached:
image = Image.open(evidence['file_cache_path'])
else:
image = Image.open(self._open_file_object(evidence['pathspec']))
if not image._getexif():
return '<xmp>No Exif data found</xmp>'
exif_data = {
ExifTags.TAGS[k]: v
for k, v in image._getexif().items()
if k in ExifTags.TAGS
}
table = []
safe_table = []
for key, value in exif_data.iteritems():
table.append((key, value))
if key == 'GPSInfo':
lat_long = get_lat_lon(value)
safe_table.append(('Google Maps', '<a target="_blank" href="https://maps.google.com/?q=loc:'
+ str(lat_long[0]) + ',' + str(lat_long[1]) + '">Link to Google Maps</a>'))
return render_template_string(TEMPLATE, table=table, safe_table=safe_table)
def get_exif_data(image):
"""Returns a dictionary from the exif data of an PIL Image item. Also converts the GPS Tags"""
exif_data = {}
info = image._getexif()
if info:
for tag, value in info.items():
decoded = ExifTags.TAGS.get(tag, tag)
if decoded == "GPSInfo":
gps_data = {}
for t in value:
sub_decoded = ExifTags.GPSTAGS.get(t, t)
gps_data[sub_decoded] = value[t]
exif_data[decoded] = gps_data
else:
exif_data[decoded] = value
return exif_data
def _get_if_exist(data, key):
if key in data:
return data[key]
return None
def _convert_to_degrees(value):
"""Helper function to convert the GPS coordinates stored in the EXIF to degress in float format"""
d0 = value[0][0]
d1 = value[0][1]
d = float(d0) / float(d1)
m0 = value[1][0]
m1 = value[1][1]
m = float(m0) / float(m1)
s0 = value[2][0]
s1 = value[2][1]
s = float(s0) / float(s1)
return d + (m / 60.0) + (s / 3600.0)
def get_lat_lon(gps_info_value):
"""Returns the latitude and longitude, if available, from the provided exif_data (obtained through get_exif_data above)"""
lat = 0
lon = 0
gps_info = {}
for t in gps_info_value:
sub_decoded = ExifTags.GPSTAGS.get(t, t)
gps_info[sub_decoded] = gps_info_value[t]
gps_latitude = _get_if_exist(gps_info, "GPSLatitude")
gps_latitude_ref = _get_if_exist(gps_info, 'GPSLatitudeRef')
gps_longitude = _get_if_exist(gps_info, 'GPSLongitude')
gps_longitude_ref = _get_if_exist(gps_info, 'GPSLongitudeRef')
if gps_latitude and gps_latitude_ref and gps_longitude and gps_longitude_ref:
lat = _convert_to_degrees(gps_latitude)
if gps_latitude_ref != "N":
lat = 0 - lat
lon = _convert_to_degrees(gps_longitude)
if gps_longitude_ref != "E":
lon = 0 - lon
return lat, lon
|
maurermj08/efetch
|
efetch_server/plugins/fa_exif/fa_exif.py
|
Python
|
apache-2.0
| 5,586
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Mini Fantasy War
# https://www.acmicpc.net/problem/12790
import sys
num = int(sys.stdin.readline().strip())
for cnt in range(num):
power_list = str(sys.stdin.readline().strip()).split(' ')
HP = 1 if (int(power_list[0]) + int(power_list[4])) < 1 else (int(power_list[0]) + int(power_list[4]))
MP = 1 if (int(power_list[1]) + int(power_list[5])) < 1 else (int(power_list[1]) + int(power_list[5]))
PO = 0 if (int(power_list[2]) + int(power_list[6])) < 0 else (int(power_list[2]) + int(power_list[6]))
DE = int(power_list[3]) + int(power_list[7])
power = 1 * (HP) + 5 * (MP) + 2 * (PO) + 2 * (DE)
print(power)
|
youngjun0528/moheeto
|
BaekjoonOnlineJudge/acmicpc_12790.py
|
Python
|
apache-2.0
| 688
|
import abc
from lab.with_log import WithLogMixIn
class TestCaseWorker(WithLogMixIn):
STATUS_CREATED = 'created'
STATUS_SETUP_RUNING = 'status=NonParallelSetupRunning'
STATUS_SETUP_FINISHED = 'status=NonParallelSetupFinished ' + 47 * '-'
STATUS_DELAYED = 'delayed'
STATUS_LOOPING = 'looping'
STATUS_FINISHED = 'finished'
STATUS_FAILED = 'status=FAILED message='
STATUS_PASSED = 'status=PASSED message='
ARG_MANDATORY_DELAY = 'delay'
ARG_MANDATORY_RUN = 'run'
ARG_OPTIONAL_PAUSE_AT_START = 'pause_at_start'
ARG_OPTIONAL_PAUSE_AT_END = 'pause_at_end'
ARG_OPTIONAL_TIMEOUT = 'timeout'
def __repr__(self):
return u'TCW={}.{}'.format(self.test_case.path.split('-')[0], self.name)
def __init__(self, test_case, args_dict):
""" Executed before subprocesses start in the context of RunnerHA.execute()
:param args_dict: dictionry with custom worker arguments, they are checked in check_config()
"""
self.test_case = test_case
self.name = args_dict.pop('name')
self.successes = [] # succeses will be collected here in self.passed()
self.failures = [] # failures (problems in soft under test) will be collected here in self.failed()
self.errors = [] # errors (problems in this code) will be collected here self.start_worker_parallel()
self.status_dict = None # will be set just before running multiprocessing.Pool.map() to multiprocessing.Manager().dict()
self.cloud = None
self.args = {} # all arguments will be kept in this dict
self.loop_counter = 0 # counts loops executed
args_dict.pop('class')
all_arg_names = set([x for x in dir(self) if x.startswith('ARG')])
correct_names = set([x for x in all_arg_names if x.startswith('ARG_MANDATORY_') or x.startswith('ARG_OPTIONAL_')])
assert len(all_arg_names - correct_names) == 0, '{}: has wrong argument definitions: {}, all must start with either ARG_MANDATORY_ or ARG_OPTIONAL_'.format(self.__class__, all_arg_names - correct_names)
optional_arguments = set([getattr(self, x) for x in correct_names if x.startswith('ARG_OPTIONAL_')])
required_arguments = set([getattr(self, x) for x in correct_names if x.startswith('ARG_MANDATORY_')])
all_arguments = optional_arguments ^ required_arguments
assert all_arguments.issubset(dir(self)), '{}: please define method(s) decorated with @property {}'.format(self.__class__, all_arguments - set(dir(self)))
for x in sorted(required_arguments):
assert x in args_dict, '{}: no mandatory argument "{}"'.format(self, x)
self.args[x] = args_dict.pop(x)
for x in sorted(optional_arguments):
if x in args_dict:
self.args[x] = args_dict.pop(x)
assert len(args_dict) == 0, '{}: argument dict contains not known key(s): {}'.format(self, args_dict)
if type(self.run) is not list:
assert type(self.run) is int and self.run > 0
if type(self.delay) is not list:
assert type(self.delay) is int and self.delay >= 0, '{}: wrong delay "{}". should be list of names or int'.format(self, self.delay)
self.check_arguments()
@property
def description(self):
return ' '.join(['{}={}'.format(x[0], x[1]) for x in self.args.items()])
@property
def delay(self): # delay: 3 means delay by 3 secs after common start, delay: [name1, name2] means delay until workers name1, name2 go to self.STATUS_FINISHED
return self.args[self.ARG_MANDATORY_DELAY]
@property
def run(self): # run: 3 means repeat 3 times, run: [name1, name2] means run until workers name1, name2 go to self.STATUS_FINISHED
return self.args[self.ARG_MANDATORY_RUN]
@property
def timeout(self): # if operation will not successfully finished in that time, the worker will be forced to quit with exception
return self.args.get(self.ARG_OPTIONAL_TIMEOUT, 1000)
@property
def pause_at_start(self): # wait this time at the start of each loop
return self.args.get(self.ARG_OPTIONAL_PAUSE_AT_START, 0)
@property
def pause_at_end(self): # wait this time at the end of each loop
return self.args.get(self.ARG_OPTIONAL_PAUSE_AT_END, 0)
@property
def pod(self):
return self.cloud.pod
@property
def mgm(self):
return self.pod.mgm
def set_status(self, status):
self.status_dict[self.name] = status
@abc.abstractmethod
def check_arguments(self):
raise NotImplementedError(self, 'check_arguments')
@abc.abstractmethod
def setup_worker(self):
raise NotImplementedError(self)
@abc.abstractmethod
def loop_worker(self):
raise NotImplementedError(self)
def teardown_worker(self):
pass
def is_ready_to_finish(self):
if type(self.run) is list:
return all([self.status_dict[x] == self.STATUS_FINISHED for x in self.run])
else:
return self.run == self.loop_counter
def delay_execution(self):
import time
self.set_status(status=self.STATUS_DELAYED)
time_passed = 0
if type(self.delay) is list:
self.log('status=delayed until other={} finish'.format(self.delay, self.STATUS_FINISHED))
while all([self.status_dict[x] != self.STATUS_FINISHED for x in self.delay]):
time.sleep(1)
time_passed += 1
if time_passed == self.timeout:
raise RuntimeError('{} not finished in {} secs'.format(self.delay, self.timeout))
self.log('status=active since other={} finished'.format(self.delay))
else:
self.log('status=delayed for time={} secs...'.format(self.delay))
time.sleep(1 if self.test_case.is_debug else self.delay)
def start_worker_parallel(self):
"""This code is executed once when subprocess starts. This is the only entry point to the worker loop."""
import os
import time
import sys
import fabric.network
worker_parameters = 'parameters ppid={} pid={} {}'.format(os.getppid(), os.getpid(), self.description)
self.log(worker_parameters)
time.sleep(1)
try:
self.delay_execution()
self.set_status(status=self.STATUS_LOOPING)
while not self.is_ready_to_finish():
if self.pause_at_start > 0:
self.log('status=pause_loop{}_at_start time={} sec ...'.format(self.loop_counter + 1, self.pause_at_start))
time.sleep(1 if self.test_case.is_debug else self.pause_at_start)
self.log('status=looping{} until={} other={}'.format(self.loop_counter + 1, self.run, self.status_dict))
if not self.test_case.is_debug:
self.loop_worker()
if self.pause_at_end > 0:
self.log('status=pause_loop{}_at_end time={} sec ...'.format(self.loop_counter + 1, self.pause_at_end))
time.sleep(1 if self.test_case.is_debug else self.pause_at_end)
self.log('status=finish_loop{} until={} {} ...'.format(self.loop_counter + 1, self.run, self.status_dict))
self.loop_counter += 1
except RuntimeError as ex:
self.log_exception()
except Exception as ex:
frame = sys.exc_traceback
while frame.tb_next:
frame = frame.tb_next
self.errors.append(str(self) + ': ' + str(ex).replace('\\', '') + ' ' + frame.tb_frame.f_code.co_filename + ':' + str(frame.tb_lineno))
self.log_exception()
fabric.network.disconnect_all()
finally:
time.sleep(1) # sleep to align log output
self.set_status(status=self.STATUS_FINISHED)
self.log('status=finish after loop={} until={} {}'.format(self.loop_counter, self.run, self.status_dict))
self.log(80*'-')
return self
def failed(self, message, is_stop_running):
self.log(self.STATUS_FAILED + str(message))
self.failures.append('{}: {}'.format(self, message))
if is_stop_running:
raise RuntimeError(str(message))
def passed(self, message):
self.log(self.STATUS_PASSED + message)
self.successes.append('{}: {}'.format(self, message))
|
CiscoSystems/os-sqe
|
lab/test_case_worker.py
|
Python
|
apache-2.0
| 8,762
|
# Copyright 2013 Josh Durgin
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import json
from lxml import etree
import webob
import nova
from nova.api.openstack.compute.contrib import volumes
from nova.compute import instance_types
from nova import context
import nova.db
from nova import flags
from nova import test
from nova.tests.api.openstack import fakes
from nova import volume
from webob import exc
FLAGS = flags.FLAGS
FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
FAKE_UUID_A = '00000000-aaaa-aaaa-aaaa-000000000000'
FAKE_UUID_B = 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb'
FAKE_UUID_C = 'cccccccc-cccc-cccc-cccc-cccccccccccc'
FAKE_UUID_D = 'dddddddd-dddd-dddd-dddd-dddddddddddd'
IMAGE_UUID = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
def fake_compute_api_create(cls, context, instance_type, image_href, **kwargs):
global _block_device_mapping_seen
_block_device_mapping_seen = kwargs.get('block_device_mapping')
inst_type = instance_types.get_instance_type_by_flavor_id(2)
resv_id = None
return ([{'id': 1,
'display_name': 'test_server',
'uuid': FAKE_UUID,
'instance_type': dict(inst_type),
'access_ip_v4': '1.2.3.4',
'access_ip_v6': 'fead::1234',
'image_ref': IMAGE_UUID,
'user_id': 'fake',
'project_id': 'fake',
'created_at': datetime.datetime(2010, 10, 10, 12, 0, 0),
'updated_at': datetime.datetime(2010, 11, 11, 11, 0, 0),
'progress': 0,
'fixed_ips': []
}], resv_id)
def fake_get_instance(self, context, instance_id):
return({'uuid': instance_id})
def fake_attach_volume(self, context, instance, volume_id, device):
return()
def fake_detach_volume(self, context, volume_id):
return()
def fake_get_instance_bdms(self, context, instance):
return([{'id': 1,
'instance_uuid': instance['uuid'],
'device_name': '/dev/fake0',
'delete_on_termination': 'False',
'virtual_name': 'MyNamesVirtual',
'snapshot_id': None,
'volume_id': FAKE_UUID_A,
'volume_size': 1},
{'id': 2,
'instance_uuid':instance['uuid'],
'device_name': '/dev/fake1',
'delete_on_termination': 'False',
'virtual_name': 'MyNamesVirtual',
'snapshot_id': None,
'volume_id': FAKE_UUID_B,
'volume_size': 1}])
class BootFromVolumeTest(test.TestCase):
def setUp(self):
super(BootFromVolumeTest, self).setUp()
self.stubs.Set(nova.compute.API, 'create', fake_compute_api_create)
fakes.stub_out_nw_api(self.stubs)
def test_create_root_volume(self):
body = dict(server=dict(
name='test_server', imageRef=IMAGE_UUID,
flavorRef=2, min_count=1, max_count=1,
block_device_mapping=[dict(
volume_id=1,
device_name='/dev/vda',
virtual='root',
delete_on_termination=False,
)]
))
global _block_device_mapping_seen
_block_device_mapping_seen = None
req = webob.Request.blank('/v2/fake/os-volumes_boot')
req.method = 'POST'
req.body = json.dumps(body)
req.headers['content-type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 202)
server = json.loads(res.body)['server']
self.assertEqual(FAKE_UUID, server['id'])
self.assertEqual(FLAGS.password_length, len(server['adminPass']))
self.assertEqual(len(_block_device_mapping_seen), 1)
self.assertEqual(_block_device_mapping_seen[0]['volume_id'], 1)
self.assertEqual(_block_device_mapping_seen[0]['device_name'],
'/dev/vda')
def return_volume(context, volume_id):
return {'id': volume_id}
class VolumeApiTest(test.TestCase):
def setUp(self):
super(VolumeApiTest, self).setUp()
fakes.FakeAuthManager.reset_fake_data()
fakes.FakeAuthDatabase.data = {}
fakes.stub_out_networking(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
self.stubs.Set(nova.db, 'volume_get', return_volume)
self.stubs.Set(volume.api.API, "delete", fakes.stub_volume_delete)
self.stubs.Set(volume.api.API, "get", fakes.stub_volume_get)
self.stubs.Set(volume.api.API, "get_all", fakes.stub_volume_get_all)
self.context = context.get_admin_context()
def test_volume_create(self):
self.stubs.Set(volume.api.API, "create", fakes.stub_volume_create)
vol = {"size": 100,
"display_name": "Volume Test Name",
"display_description": "Volume Test Desc",
"availability_zone": "zone1:host1"}
body = {"volume": vol}
req = webob.Request.blank('/v2/fake/os-volumes')
req.method = 'POST'
req.body = json.dumps(body)
req.headers['content-type'] = 'application/json'
resp = req.get_response(fakes.wsgi_app())
self.assertEqual(resp.status_int, 200)
resp_dict = json.loads(resp.body)
self.assertTrue('volume' in resp_dict)
self.assertEqual(resp_dict['volume']['size'],
vol['size'])
self.assertEqual(resp_dict['volume']['displayName'],
vol['display_name'])
self.assertEqual(resp_dict['volume']['displayDescription'],
vol['display_description'])
self.assertEqual(resp_dict['volume']['availabilityZone'],
vol['availability_zone'])
def test_volume_create_no_body(self):
req = webob.Request.blank('/v2/fake/os-volumes')
req.method = 'POST'
req.body = json.dumps({})
req.headers['content-type'] = 'application/json'
resp = req.get_response(fakes.wsgi_app())
self.assertEqual(resp.status_int, 422)
def test_volume_index(self):
req = webob.Request.blank('/v2/fake/os-volumes')
resp = req.get_response(fakes.wsgi_app())
self.assertEqual(resp.status_int, 200)
def test_volume_detail(self):
req = webob.Request.blank('/v2/fake/os-volumes/detail')
resp = req.get_response(fakes.wsgi_app())
self.assertEqual(resp.status_int, 200)
def test_volume_show(self):
req = webob.Request.blank('/v2/fake/os-volumes/123')
resp = req.get_response(fakes.wsgi_app())
self.assertEqual(resp.status_int, 200)
def test_volume_show_no_volume(self):
self.stubs.Set(volume.api.API, "get", fakes.stub_volume_get_notfound)
req = webob.Request.blank('/v2/fake/os-volumes/456')
resp = req.get_response(fakes.wsgi_app())
self.assertEqual(resp.status_int, 404)
def test_volume_delete(self):
req = webob.Request.blank('/v2/fake/os-volumes/123')
req.method = 'DELETE'
resp = req.get_response(fakes.wsgi_app())
self.assertEqual(resp.status_int, 202)
def test_volume_delete_no_volume(self):
self.stubs.Set(volume.api.API, "get", fakes.stub_volume_get_notfound)
req = webob.Request.blank('/v2/fake/os-volumes/456')
req.method = 'DELETE'
resp = req.get_response(fakes.wsgi_app())
self.assertEqual(resp.status_int, 404)
class VolumeAttachTests(test.TestCase):
def setUp(self):
super(VolumeAttachTests, self).setUp()
self.stubs.Set(nova.compute.API,
'get_instance_bdms',
fake_get_instance_bdms)
self.stubs.Set(nova.compute.API, 'get', fake_get_instance)
self.context = context.get_admin_context()
self.expected_show = {'volumeAttachment':
{'device': '/dev/fake0',
'serverId': FAKE_UUID,
'id': FAKE_UUID_A,
'volumeId': FAKE_UUID_A
}}
def test_show(self):
attachments = volumes.VolumeAttachmentController()
req = webob.Request.blank('/v2/fake/os-volumes/show')
req.method = 'POST'
req.body = json.dumps({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
result = attachments.show(req, FAKE_UUID, FAKE_UUID_A)
self.assertEqual(self.expected_show, result)
def test_delete(self):
self.stubs.Set(nova.compute.API, 'detach_volume', fake_detach_volume)
attachments = volumes.VolumeAttachmentController()
req = webob.Request.blank('/v2/fake/os-volumes/delete')
req.method = 'POST'
req.body = json.dumps({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
result = attachments.delete(req, FAKE_UUID, FAKE_UUID_A)
self.assertEqual('202 Accepted', result.status)
def test_delete_vol_not_found(self):
self.stubs.Set(nova.compute.API, 'detach_volume', fake_detach_volume)
attachments = volumes.VolumeAttachmentController()
req = webob.Request.blank('/v2/fake/os-volumes/delete')
req.method = 'POST'
req.body = json.dumps({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
self.assertRaises(exc.HTTPNotFound,
attachments.delete,
req,
FAKE_UUID,
FAKE_UUID_C)
def test_attach_volume(self):
self.stubs.Set(nova.compute.API, 'attach_volume', fake_attach_volume)
attachments = volumes.VolumeAttachmentController()
body = {'volumeAttachment': {'volumeId': FAKE_UUID_A,
'device': '/dev/fake'}}
req = webob.Request.blank('/v2/fake/os-volumes/attach')
req.method = 'POST'
req.body = json.dumps({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
result = attachments.create(req, FAKE_UUID, body)
self.assertEqual(result['volumeAttachment']['id'],
'00000000-aaaa-aaaa-aaaa-000000000000')
class VolumeSerializerTest(test.TestCase):
def _verify_volume_attachment(self, attach, tree):
for attr in ('id', 'volumeId', 'serverId', 'device'):
self.assertEqual(str(attach[attr]), tree.get(attr))
def _verify_volume(self, vol, tree):
self.assertEqual(tree.tag, 'volume')
for attr in ('id', 'status', 'size', 'availabilityZone', 'createdAt',
'displayName', 'displayDescription', 'volumeType',
'snapshotId'):
self.assertEqual(str(vol[attr]), tree.get(attr))
for child in tree:
self.assertTrue(child.tag in ('attachments', 'metadata'))
if child.tag == 'attachments':
self.assertEqual(1, len(child))
self.assertEqual('attachment', child[0].tag)
self._verify_volume_attachment(vol['attachments'][0], child[0])
elif child.tag == 'metadata':
not_seen = set(vol['metadata'].keys())
for gr_child in child:
self.assertTrue(gr_child.tag in not_seen)
self.assertEqual(str(vol['metadata'][gr_child.tag]),
gr_child.text)
not_seen.remove(gr_child.tag)
self.assertEqual(0, len(not_seen))
def test_attach_show_create_serializer(self):
serializer = volumes.VolumeAttachmentTemplate()
raw_attach = dict(
id='vol_id',
volumeId='vol_id',
serverId='instance_uuid',
device='/foo')
text = serializer.serialize(dict(volumeAttachment=raw_attach))
print text
tree = etree.fromstring(text)
self.assertEqual('volumeAttachment', tree.tag)
self._verify_volume_attachment(raw_attach, tree)
def test_attach_index_serializer(self):
serializer = volumes.VolumeAttachmentsTemplate()
raw_attaches = [dict(
id='vol_id1',
volumeId='vol_id1',
serverId='instance1_uuid',
device='/foo1'),
dict(
id='vol_id2',
volumeId='vol_id2',
serverId='instance2_uuid',
device='/foo2')]
text = serializer.serialize(dict(volumeAttachments=raw_attaches))
print text
tree = etree.fromstring(text)
self.assertEqual('volumeAttachments', tree.tag)
self.assertEqual(len(raw_attaches), len(tree))
for idx, child in enumerate(tree):
self.assertEqual('volumeAttachment', child.tag)
self._verify_volume_attachment(raw_attaches[idx], child)
def test_volume_show_create_serializer(self):
serializer = volumes.VolumeTemplate()
raw_volume = dict(
id='vol_id',
status='vol_status',
size=1024,
availabilityZone='vol_availability',
createdAt=datetime.datetime.now(),
attachments=[dict(
id='vol_id',
volumeId='vol_id',
serverId='instance_uuid',
device='/foo')],
displayName='vol_name',
displayDescription='vol_desc',
volumeType='vol_type',
snapshotId='snap_id',
metadata=dict(
foo='bar',
baz='quux',
),
)
text = serializer.serialize(dict(volume=raw_volume))
print text
tree = etree.fromstring(text)
self._verify_volume(raw_volume, tree)
def test_volume_index_detail_serializer(self):
serializer = volumes.VolumesTemplate()
raw_volumes = [dict(
id='vol1_id',
status='vol1_status',
size=1024,
availabilityZone='vol1_availability',
createdAt=datetime.datetime.now(),
attachments=[dict(
id='vol1_id',
volumeId='vol1_id',
serverId='instance_uuid',
device='/foo1')],
displayName='vol1_name',
displayDescription='vol1_desc',
volumeType='vol1_type',
snapshotId='snap1_id',
metadata=dict(
foo='vol1_foo',
bar='vol1_bar',
),
),
dict(
id='vol2_id',
status='vol2_status',
size=1024,
availabilityZone='vol2_availability',
createdAt=datetime.datetime.now(),
attachments=[dict(
id='vol2_id',
volumeId='vol2_id',
serverId='instance_uuid',
device='/foo2')],
displayName='vol2_name',
displayDescription='vol2_desc',
volumeType='vol2_type',
snapshotId='snap2_id',
metadata=dict(
foo='vol2_foo',
bar='vol2_bar',
),
)]
text = serializer.serialize(dict(volumes=raw_volumes))
print text
tree = etree.fromstring(text)
self.assertEqual('volumes', tree.tag)
self.assertEqual(len(raw_volumes), len(tree))
for idx, child in enumerate(tree):
self._verify_volume(raw_volumes[idx], child)
|
usc-isi/extra-specs
|
nova/tests/api/openstack/compute/contrib/test_volumes.py
|
Python
|
apache-2.0
| 16,379
|
#!/usr/bin/python
#-----------------------------------------------------------------------------------------------
'''-------------------------------------------------------------------------
Copyright IBM Corp. 2015, 2015 All Rights Reserved
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
Limitations under the License.
-------------------------------------------------------------------------'''
from setuptools import setup
setup( name = 'SBusPythonFacade',
version = '1.0',
package_dir={'SBusPythonFacade':''},
packages=['SBusPythonFacade'] )
|
Open-I-Beam/swift-storlets
|
Engine/SBus/SBusPythonFacade/setup.py
|
Python
|
apache-2.0
| 1,009
|
#!/usr/bin/python
"""
Setup script for Gittle.
"""
import platform
windows = platform.system() == 'Windows'
try:
from setuptools import setup
except ImportError:
has_setuptools = False
from distutils.core import setup
else:
has_setuptools = True
version_string = '0.4.0'
setup_kwargs = {
'name': 'gittle',
'description': 'A high level pure python git implementation',
'keywords': 'git dulwich pure python gittle',
'version': version_string,
'url': 'https://github.com/FriendCode/gittle',
'license': 'MIT',
'author': "Aaron O'Mullan",
'author_email': 'aaron@friendco.de',
'long_description': """
Gittle is a wrapper around dulwich. It provides an easy and familiar interface to git.
It's pure python (no dependancy on the git binary) and has no other dependancies besides
the python stdlib, dulwich and paramiko (optional).
""",
'packages': ['gittle', 'gittle.utils'],
'install_requires': [
# PyPI
'paramiko==1.10.0',
'pycrypto==2.6',
'dulwich==0.9.7',
'funky==0.0.2',
],
}
try:
# Run setup with C extensions
setup(**setup_kwargs)
except SystemExit as exc:
import logging
logging.exception(exc)
logging.info("retrying installation without VisualStudio...")
# Remove C dependencies
install_requires = [r for r in setup_kwargs['install_requires']
if r.split('=')[0] not in ('paramiko', 'pycrypto')]
# Install dulwich as pure Python
if windows and has_setuptools:
from setuptools.command.easy_install import easy_install
run_setup = easy_install.run_setup
def _run_setup(self, setup_script, setup_base, args):
"""Alternate run_setup function to pass '--pure' to the
Dulwich installer on Windows.
"""
if 'dulwich' in setup_script:
args.insert(0, '--pure')
run_setup(self, setup_script, setup_base, args)
easy_install.run_setup = _run_setup
# Run setup without C extensions
setup_kwargs['install_requires'] = install_requires
setup(**setup_kwargs)
|
0asa/gittle
|
setup.py
|
Python
|
apache-2.0
| 2,136
|
import asyncio
import ipaddress
import socket
from unittest.mock import Mock, patch
import pytest
from aiohttp.resolver import AsyncResolver, DefaultResolver, ThreadedResolver
try:
import aiodns
gethostbyname = hasattr(aiodns.DNSResolver, 'gethostbyname')
except ImportError:
aiodns = None
gethostbyname = False
class FakeResult:
def __init__(self, addresses):
self.addresses = addresses
class FakeQueryResult:
def __init__(self, host):
self.host = host
@asyncio.coroutine
def fake_result(addresses):
return FakeResult(addresses=tuple(addresses))
@asyncio.coroutine
def fake_query_result(result):
return [FakeQueryResult(host=h)
for h in result]
def fake_addrinfo(hosts):
@asyncio.coroutine
def fake(*args, **kwargs):
if not hosts:
raise socket.gaierror
return list([(None, None, None, None, [h, 0])
for h in hosts])
return fake
@pytest.mark.skipif(not gethostbyname, reason="aiodns 1.1 required")
async def test_async_resolver_positive_lookup(loop):
with patch('aiodns.DNSResolver') as mock:
mock().gethostbyname.return_value = fake_result(['127.0.0.1'])
resolver = AsyncResolver(loop=loop)
real = await resolver.resolve('www.python.org')
ipaddress.ip_address(real[0]['host'])
mock().gethostbyname.assert_called_with('www.python.org',
socket.AF_INET)
@pytest.mark.skipif(aiodns is None, reason="aiodns required")
async def test_async_resolver_query_positive_lookup(loop):
with patch('aiodns.DNSResolver') as mock:
del mock().gethostbyname
mock().query.return_value = fake_query_result(['127.0.0.1'])
resolver = AsyncResolver(loop=loop)
real = await resolver.resolve('www.python.org')
ipaddress.ip_address(real[0]['host'])
mock().query.assert_called_with('www.python.org', 'A')
@pytest.mark.skipif(not gethostbyname, reason="aiodns 1.1 required")
async def test_async_resolver_multiple_replies(loop):
with patch('aiodns.DNSResolver') as mock:
ips = ['127.0.0.1', '127.0.0.2', '127.0.0.3', '127.0.0.4']
mock().gethostbyname.return_value = fake_result(ips)
resolver = AsyncResolver(loop=loop)
real = await resolver.resolve('www.google.com')
ips = [ipaddress.ip_address(x['host']) for x in real]
assert len(ips) > 3, "Expecting multiple addresses"
@pytest.mark.skipif(aiodns is None, reason="aiodns required")
async def test_async_resolver_query_multiple_replies(loop):
with patch('aiodns.DNSResolver') as mock:
del mock().gethostbyname
ips = ['127.0.0.1', '127.0.0.2', '127.0.0.3', '127.0.0.4']
mock().query.return_value = fake_query_result(ips)
resolver = AsyncResolver(loop=loop)
real = await resolver.resolve('www.google.com')
ips = [ipaddress.ip_address(x['host']) for x in real]
@pytest.mark.skipif(not gethostbyname, reason="aiodns 1.1 required")
async def test_async_resolver_negative_lookup(loop):
with patch('aiodns.DNSResolver') as mock:
mock().gethostbyname.side_effect = aiodns.error.DNSError()
resolver = AsyncResolver(loop=loop)
with pytest.raises(OSError):
await resolver.resolve('doesnotexist.bla')
@pytest.mark.skipif(aiodns is None, reason="aiodns required")
async def test_async_resolver_query_negative_lookup(loop):
with patch('aiodns.DNSResolver') as mock:
del mock().gethostbyname
mock().query.side_effect = aiodns.error.DNSError()
resolver = AsyncResolver(loop=loop)
with pytest.raises(OSError):
await resolver.resolve('doesnotexist.bla')
@pytest.mark.skipif(aiodns is None, reason="aiodns required")
async def test_async_resolver_no_hosts_in_query(loop):
with patch('aiodns.DNSResolver') as mock:
del mock().gethostbyname
mock().query.return_value = fake_query_result([])
resolver = AsyncResolver(loop=loop)
with pytest.raises(OSError):
await resolver.resolve('doesnotexist.bla')
@pytest.mark.skipif(not gethostbyname, reason="aiodns 1.1 required")
async def test_async_resolver_no_hosts_in_gethostbyname(loop):
with patch('aiodns.DNSResolver') as mock:
mock().gethostbyname.return_value = fake_result([])
resolver = AsyncResolver(loop=loop)
with pytest.raises(OSError):
await resolver.resolve('doesnotexist.bla')
async def test_threaded_resolver_positive_lookup():
loop = Mock()
loop.getaddrinfo = fake_addrinfo(["127.0.0.1"])
resolver = ThreadedResolver(loop=loop)
real = await resolver.resolve('www.python.org')
ipaddress.ip_address(real[0]['host'])
async def test_threaded_resolver_multiple_replies():
loop = Mock()
ips = ['127.0.0.1', '127.0.0.2', '127.0.0.3', '127.0.0.4']
loop.getaddrinfo = fake_addrinfo(ips)
resolver = ThreadedResolver(loop=loop)
real = await resolver.resolve('www.google.com')
ips = [ipaddress.ip_address(x['host']) for x in real]
assert len(ips) > 3, "Expecting multiple addresses"
async def test_threaded_negative_lookup():
loop = Mock()
ips = []
loop.getaddrinfo = fake_addrinfo(ips)
resolver = ThreadedResolver(loop=loop)
with pytest.raises(socket.gaierror):
await resolver.resolve('doesnotexist.bla')
async def test_close_for_threaded_resolver(loop):
resolver = ThreadedResolver(loop=loop)
await resolver.close()
@pytest.mark.skipif(aiodns is None, reason="aiodns required")
async def test_close_for_async_resolver(loop):
resolver = AsyncResolver(loop=loop)
await resolver.close()
def test_default_loop_for_threaded_resolver(loop):
asyncio.set_event_loop(loop)
resolver = ThreadedResolver()
assert resolver._loop is loop
@pytest.mark.skipif(aiodns is None, reason="aiodns required")
def test_default_loop_for_async_resolver(loop):
asyncio.set_event_loop(loop)
resolver = AsyncResolver()
assert resolver._loop is loop
@pytest.mark.skipif(not gethostbyname, reason="aiodns 1.1 required")
async def test_async_resolver_ipv6_positive_lookup(loop):
with patch('aiodns.DNSResolver') as mock:
mock().gethostbyname.return_value = fake_result(['::1'])
resolver = AsyncResolver(loop=loop)
real = await resolver.resolve('www.python.org',
family=socket.AF_INET6)
ipaddress.ip_address(real[0]['host'])
mock().gethostbyname.assert_called_with('www.python.org',
socket.AF_INET6)
@pytest.mark.skipif(aiodns is None, reason="aiodns required")
async def test_async_resolver_query_ipv6_positive_lookup(loop):
with patch('aiodns.DNSResolver') as mock:
del mock().gethostbyname
mock().query.return_value = fake_query_result(['::1'])
resolver = AsyncResolver(loop=loop)
real = await resolver.resolve('www.python.org',
family=socket.AF_INET6)
ipaddress.ip_address(real[0]['host'])
mock().query.assert_called_with('www.python.org', 'AAAA')
def test_async_resolver_aiodns_not_present(loop, monkeypatch):
monkeypatch.setattr("aiohttp.resolver.aiodns", None)
with pytest.raises(RuntimeError):
AsyncResolver(loop=loop)
def test_default_resolver():
# if gethostbyname:
# assert DefaultResolver is AsyncResolver
# else:
# assert DefaultResolver is ThreadedResolver
assert DefaultResolver is ThreadedResolver
|
playpauseandstop/aiohttp
|
tests/test_resolver.py
|
Python
|
apache-2.0
| 7,611
|
# coding: utf-8
"""
Copyright 2015 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
class V1ComponentCondition(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
V1ComponentCondition - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'type': 'str',
'status': 'str',
'message': 'str',
'error': 'str'
}
self.attribute_map = {
'type': 'type',
'status': 'status',
'message': 'message',
'error': 'error'
}
self._type = None
self._status = None
self._message = None
self._error = None
@property
def type(self):
"""
Gets the type of this V1ComponentCondition.
Type of condition for a component. Valid value: \"Healthy\"
:return: The type of this V1ComponentCondition.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""
Sets the type of this V1ComponentCondition.
Type of condition for a component. Valid value: \"Healthy\"
:param type: The type of this V1ComponentCondition.
:type: str
"""
self._type = type
@property
def status(self):
"""
Gets the status of this V1ComponentCondition.
Status of the condition for a component. Valid values for \"Healthy\": \"True\", \"False\", or \"Unknown\".
:return: The status of this V1ComponentCondition.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""
Sets the status of this V1ComponentCondition.
Status of the condition for a component. Valid values for \"Healthy\": \"True\", \"False\", or \"Unknown\".
:param status: The status of this V1ComponentCondition.
:type: str
"""
self._status = status
@property
def message(self):
"""
Gets the message of this V1ComponentCondition.
Message about the condition for a component. For example, information about a health check.
:return: The message of this V1ComponentCondition.
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""
Sets the message of this V1ComponentCondition.
Message about the condition for a component. For example, information about a health check.
:param message: The message of this V1ComponentCondition.
:type: str
"""
self._message = message
@property
def error(self):
"""
Gets the error of this V1ComponentCondition.
Condition error code for a component. For example, a health check error code.
:return: The error of this V1ComponentCondition.
:rtype: str
"""
return self._error
@error.setter
def error(self, error):
"""
Sets the error of this V1ComponentCondition.
Condition error code for a component. For example, a health check error code.
:param error: The error of this V1ComponentCondition.
:type: str
"""
self._error = error
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
danielfrg/jupyterhub-kubernetes_spawner
|
kubernetes_spawner/swagger_client/models/v1_component_condition.py
|
Python
|
apache-2.0
| 5,391
|
# -*- coding: utf-8 -*-
from apps.bigcz.clients.usgswqp.models import USGSResource
from apps.bigcz.clients.usgswqp.serializers import USGSResourceSerializer
# Import catalog name and search function, so it can be exported from here
from apps.bigcz.clients.usgswqp.search import CATALOG_NAME, search # NOQA
model = USGSResource
serializer = USGSResourceSerializer
|
WikiWatershed/model-my-watershed
|
src/mmw/apps/bigcz/clients/usgswqp/__init__.py
|
Python
|
apache-2.0
| 366
|
#!/usr/bin/env python
# Licensed to Pioneers in Engineering under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Pioneers in Engineering licenses
# this file to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License
from __future__ import print_function
import eagle_util_funcs
import os
import os.path
import subprocess
import sys
import shutil
def run_script(file_name, script_name):
ret = eagle_util_funcs.run_eagle([
file_name,
'-S' + script_name,
]
)
if ret != 0:
print("Eagle returned error!")
sys.exit(ret)
def copy_and_replace(src, dst, pattern, replacement):
with open(src) as src_file:
with open(dst, "w") as dst_file:
dst_file.write(src_file.read().replace(pattern, replacement))
def compile_pdf(inputs, output):
ret = subprocess.call(["pdftk"] + inputs + [
"cat",
"output",
output
]
)
if ret != 0:
print("pdftk returned error!")
sys.exit(ret)
def main():
if len(sys.argv) < 3:
print("Usage: %s in.sch|in.brd out.pdf" % (sys.argv[0]))
sys.exit(1)
scr_dir = os.path.abspath(os.path.dirname(sys.argv[0]))
base_name = os.path.splitext(os.path.abspath(sys.argv[1]))[0]
out_name = os.path.join(os.getcwd(), os.path.abspath(sys.argv[2]))
sch_name = os.path.join(os.getcwd(), base_name + ".sch")
brd_name = os.path.join(os.getcwd(), base_name + ".brd")
have_sch = os.path.isfile(sch_name)
have_brd = os.path.isfile(brd_name)
# Start xvfb
xvfb, display_num = eagle_util_funcs.start_xvfb()
# Create temporary directory
tmp_dir = eagle_util_funcs.setup_tmp_dir()
# Copy scripts to the temporary directory
# Eagle's default location for saving exported images is unrelated to the
# current working directory, so the scripts must be modified to hardcode
# the output file paths
copy_and_replace(os.path.join(scr_dir, "docu-packet-schematic.scr"),
os.path.join(tmp_dir, "schematic.scr"),
"%PATH%",
tmp_dir)
copy_and_replace(os.path.join(scr_dir, "docu-packet-board.scr"),
os.path.join(tmp_dir, "board.scr"),
"%PATH%",
tmp_dir)
inputs = []
# Generate schematic image
if have_sch:
dst_sch_name = os.path.join(tmp_dir, "file.sch")
shutil.copy(sch_name, dst_sch_name)
run_script(dst_sch_name, "schematic.scr")
os.remove(dst_sch_name)
inputs.append(os.path.join(tmp_dir, "schematic.pdf"))
# Generate board images
if have_brd:
dst_brd_name = os.path.join(tmp_dir, "file.brd")
shutil.copy(brd_name, dst_brd_name)
run_script(dst_brd_name, "board.scr")
os.remove(dst_brd_name)
inputs.append(os.path.join(tmp_dir, "top.pdf"))
inputs.append(os.path.join(tmp_dir, "bottom.pdf"))
# Compile final pdf
compile_pdf(inputs, out_name)
# Clean up
eagle_util_funcs.remove_tmp_dir(tmp_dir)
eagle_util_funcs.kill_xvfb(xvfb)
if __name__ == '__main__':
main()
|
zentner-kyle/tenshi-old
|
tools/docu-packet-gen.py
|
Python
|
apache-2.0
| 3,744
|
"""Test the Minecraft Server config flow."""
from asynctest import patch
from mcstatus.pinger import PingResponse
from homeassistant.components.minecraft_server.const import (
DEFAULT_NAME,
DEFAULT_PORT,
DOMAIN,
)
from homeassistant.config_entries import SOURCE_USER
from homeassistant.const import CONF_HOST, CONF_NAME, CONF_PORT
from homeassistant.data_entry_flow import (
RESULT_TYPE_ABORT,
RESULT_TYPE_CREATE_ENTRY,
RESULT_TYPE_FORM,
)
from homeassistant.helpers.typing import HomeAssistantType
from tests.common import MockConfigEntry
STATUS_RESPONSE_RAW = {
"description": {"text": "Dummy Description"},
"version": {"name": "Dummy Version", "protocol": 123},
"players": {
"online": 3,
"max": 10,
"sample": [
{"name": "Player 1", "id": "1"},
{"name": "Player 2", "id": "2"},
{"name": "Player 3", "id": "3"},
],
},
}
USER_INPUT = {
CONF_NAME: DEFAULT_NAME,
CONF_HOST: "mc.dummyserver.com",
CONF_PORT: DEFAULT_PORT,
}
USER_INPUT_IPV4 = {
CONF_NAME: DEFAULT_NAME,
CONF_HOST: "1.1.1.1",
CONF_PORT: DEFAULT_PORT,
}
USER_INPUT_IPV6 = {
CONF_NAME: DEFAULT_NAME,
CONF_HOST: "::ffff:0101:0101",
CONF_PORT: DEFAULT_PORT,
}
USER_INPUT_PORT_TOO_SMALL = {
CONF_NAME: DEFAULT_NAME,
CONF_HOST: "mc.dummyserver.com",
CONF_PORT: 1023,
}
USER_INPUT_PORT_TOO_LARGE = {
CONF_NAME: DEFAULT_NAME,
CONF_HOST: "mc.dummyserver.com",
CONF_PORT: 65536,
}
async def test_show_config_form(hass: HomeAssistantType) -> None:
"""Test if initial configuration form is shown."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "user"
async def test_invalid_ip(hass: HomeAssistantType) -> None:
"""Test error in case of an invalid IP address."""
with patch("getmac.get_mac_address", return_value=None):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=USER_INPUT_IPV4
)
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] == {"base": "invalid_ip"}
async def test_same_host(hass: HomeAssistantType) -> None:
"""Test abort in case of same host name."""
unique_id = f"{USER_INPUT[CONF_HOST]}-{USER_INPUT[CONF_PORT]}"
mock_config_entry = MockConfigEntry(
domain=DOMAIN, unique_id=unique_id, data=USER_INPUT
)
mock_config_entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=USER_INPUT
)
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_port_too_small(hass: HomeAssistantType) -> None:
"""Test error in case of a too small port."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=USER_INPUT_PORT_TOO_SMALL
)
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] == {"base": "invalid_port"}
async def test_port_too_large(hass: HomeAssistantType) -> None:
"""Test error in case of a too large port."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=USER_INPUT_PORT_TOO_LARGE
)
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] == {"base": "invalid_port"}
async def test_connection_failed(hass: HomeAssistantType) -> None:
"""Test error in case of a failed connection."""
with patch("mcstatus.server.MinecraftServer.ping", side_effect=OSError):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=USER_INPUT
)
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] == {"base": "cannot_connect"}
async def test_connection_succeeded_with_host(hass: HomeAssistantType) -> None:
"""Test config entry in case of a successful connection with a host name."""
with patch("mcstatus.server.MinecraftServer.ping", return_value=50):
with patch(
"mcstatus.server.MinecraftServer.status",
return_value=PingResponse(STATUS_RESPONSE_RAW),
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=USER_INPUT
)
assert result["type"] == RESULT_TYPE_CREATE_ENTRY
assert result["title"] == f"{USER_INPUT[CONF_HOST]}:{USER_INPUT[CONF_PORT]}"
assert result["data"][CONF_NAME] == USER_INPUT[CONF_NAME]
assert result["data"][CONF_HOST] == USER_INPUT[CONF_HOST]
assert result["data"][CONF_PORT] == USER_INPUT[CONF_PORT]
async def test_connection_succeeded_with_ip4(hass: HomeAssistantType) -> None:
"""Test config entry in case of a successful connection with an IPv4 address."""
with patch("getmac.get_mac_address", return_value="01:23:45:67:89:ab"):
with patch("mcstatus.server.MinecraftServer.ping", return_value=50):
with patch(
"mcstatus.server.MinecraftServer.status",
return_value=PingResponse(STATUS_RESPONSE_RAW),
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=USER_INPUT_IPV4
)
assert result["type"] == RESULT_TYPE_CREATE_ENTRY
assert (
result["title"]
== f"{USER_INPUT_IPV4[CONF_HOST]}:{USER_INPUT_IPV4[CONF_PORT]}"
)
assert result["data"][CONF_NAME] == USER_INPUT_IPV4[CONF_NAME]
assert result["data"][CONF_HOST] == USER_INPUT_IPV4[CONF_HOST]
assert result["data"][CONF_PORT] == USER_INPUT_IPV4[CONF_PORT]
async def test_connection_succeeded_with_ip6(hass: HomeAssistantType) -> None:
"""Test config entry in case of a successful connection with an IPv6 address."""
with patch("getmac.get_mac_address", return_value="01:23:45:67:89:ab"):
with patch("mcstatus.server.MinecraftServer.ping", return_value=50):
with patch(
"mcstatus.server.MinecraftServer.status",
return_value=PingResponse(STATUS_RESPONSE_RAW),
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=USER_INPUT_IPV6
)
assert result["type"] == RESULT_TYPE_CREATE_ENTRY
assert (
result["title"]
== f"{USER_INPUT_IPV6[CONF_HOST]}:{USER_INPUT_IPV6[CONF_PORT]}"
)
assert result["data"][CONF_NAME] == USER_INPUT_IPV6[CONF_NAME]
assert result["data"][CONF_HOST] == USER_INPUT_IPV6[CONF_HOST]
assert result["data"][CONF_PORT] == USER_INPUT_IPV6[CONF_PORT]
|
postlund/home-assistant
|
tests/components/minecraft_server/test_config_flow.py
|
Python
|
apache-2.0
| 7,108
|
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, print_function
import contextlib
import os
import platform
import random
import subprocess
import sys
from contextlib import contextmanager
from textwrap import dedent
from pex.common import atomic_directory, open_zip, safe_mkdir, safe_mkdtemp, temporary_dir
from pex.compatibility import to_unicode
from pex.distribution_target import DistributionTarget
from pex.executor import Executor
from pex.interpreter import PythonInterpreter
from pex.pex import PEX
from pex.pex_builder import PEXBuilder
from pex.pex_info import PexInfo
from pex.pip import get_pip
from pex.third_party.pkg_resources import Distribution
from pex.typing import TYPE_CHECKING
from pex.util import DistributionHelper, named_temporary_file
from pex.variables import ENV
if TYPE_CHECKING:
from typing import (
Any,
Callable,
Dict,
Iterable,
Iterator,
List,
Mapping,
Optional,
Set,
Text,
Tuple,
Union,
)
PY_VER = sys.version_info[:2]
IS_PYPY = hasattr(sys, "pypy_version_info")
IS_PYPY2 = IS_PYPY and sys.version_info[0] == 2
IS_PYPY3 = IS_PYPY and sys.version_info[0] == 3
NOT_CPYTHON27 = IS_PYPY or PY_VER != (2, 7)
NOT_CPYTHON36 = IS_PYPY or PY_VER != (3, 6)
IS_LINUX = platform.system() == "Linux"
IS_NOT_LINUX = not IS_LINUX
NOT_CPYTHON27_OR_OSX = NOT_CPYTHON27 or IS_NOT_LINUX
NOT_CPYTHON36_OR_LINUX = NOT_CPYTHON36 or IS_LINUX
@contextlib.contextmanager
def temporary_filename():
# type: () -> Iterator[str]
"""Creates a temporary filename.
This is useful when you need to pass a filename to an API. Windows requires all handles to a
file be closed before deleting/renaming it, so this makes it a bit simpler.
"""
with named_temporary_file() as fp:
fp.write(b"")
fp.close()
yield fp.name
def random_bytes(length):
# type: (int) -> bytes
return "".join(map(chr, (random.randint(ord("a"), ord("z")) for _ in range(length)))).encode(
"utf-8"
)
def get_dep_dist_names_from_pex(pex_path, match_prefix=""):
# type: (str, str) -> Set[str]
"""Given an on-disk pex, extract all of the unique first-level paths under `.deps`."""
with open_zip(pex_path) as pex_zip:
dep_gen = (f.split(os.sep)[1] for f in pex_zip.namelist() if f.startswith(".deps/"))
return set(item for item in dep_gen if item.startswith(match_prefix))
@contextlib.contextmanager
def temporary_content(content_map, interp=None, seed=31337, perms=0o644):
# type: (Mapping[str, Union[int, str]], Optional[Dict[str, Any]], int, int) -> Iterator[str]
"""Write content to disk where content is map from string => (int, string).
If target is int, write int random bytes. Otherwise write contents of string.
"""
random.seed(seed)
interp = interp or {}
with temporary_dir() as td:
for filename, size_or_content in content_map.items():
dest = os.path.join(td, filename)
safe_mkdir(os.path.dirname(dest))
with open(dest, "wb") as fp:
if isinstance(size_or_content, int):
fp.write(random_bytes(size_or_content))
else:
fp.write((size_or_content % interp).encode("utf-8"))
os.chmod(dest, perms)
yield td
@contextlib.contextmanager
def make_project(
name="my_project", # type: str
version="0.0.0", # type: str
zip_safe=True, # type: bool
install_reqs=None, # type: Optional[List[str]]
extras_require=None, # type: Optional[Dict[str, List[str]]]
entry_points=None, # type: Optional[Union[str, Dict[str, List[str]]]]
python_requires=None, # type: Optional[str]
):
# type: (...) -> Iterator[str]
project_content = {
"setup.py": dedent(
"""
from setuptools import setup
setup(
name=%(project_name)r,
version=%(version)r,
zip_safe=%(zip_safe)r,
packages=[%(project_name)r],
scripts=[
'scripts/hello_world',
'scripts/shell_script',
],
package_data={%(project_name)r: ['package_data/*.dat']},
install_requires=%(install_requires)r,
extras_require=%(extras_require)r,
entry_points=%(entry_points)r,
python_requires=%(python_requires)r,
)
"""
),
"scripts/hello_world": '#!/usr/bin/env python\nprint("hello world!")\n',
"scripts/shell_script": "#!/usr/bin/env bash\necho hello world\n",
os.path.join(name, "__init__.py"): 0,
os.path.join(name, "my_module.py"): 'def do_something():\n print("hello world!")\n',
os.path.join(name, "package_data/resource1.dat"): 1000,
os.path.join(name, "package_data/resource2.dat"): 1000,
} # type: Dict[str, Union[str, int]]
interp = {
"project_name": name,
"version": version,
"zip_safe": zip_safe,
"install_requires": install_reqs or [],
"extras_require": extras_require or {},
"entry_points": entry_points or {},
"python_requires": python_requires,
}
with temporary_content(project_content, interp=interp) as td:
yield td
class WheelBuilder(object):
"""Create a wheel distribution from an unpacked setup.py-based project."""
class BuildFailure(Exception):
pass
def __init__(
self,
source_dir, # type: str
interpreter=None, # type: Optional[PythonInterpreter]
wheel_dir=None, # type: Optional[str]
verify=True, # type: bool
):
# type: (...) -> None
"""Create a wheel from an unpacked source distribution in source_dir."""
self._source_dir = source_dir
self._wheel_dir = wheel_dir or safe_mkdtemp()
self._interpreter = interpreter or PythonInterpreter.get()
self._verify = verify
def bdist(self):
# type: () -> str
get_pip().spawn_build_wheels(
distributions=[self._source_dir],
wheel_dir=self._wheel_dir,
interpreter=self._interpreter,
verify=self._verify,
).wait()
dists = os.listdir(self._wheel_dir)
if len(dists) == 0:
raise self.BuildFailure("No distributions were produced!")
if len(dists) > 1:
raise self.BuildFailure("Ambiguous source distributions found: %s" % (" ".join(dists)))
return os.path.join(self._wheel_dir, dists[0])
@contextlib.contextmanager
def built_wheel(
name="my_project", # type: str
version="0.0.0", # type: str
zip_safe=True, # type: bool
install_reqs=None, # type: Optional[List[str]]
extras_require=None, # type: Optional[Dict[str, List[str]]]
entry_points=None, # type: Optional[Union[str, Dict[str, List[str]]]]
interpreter=None, # type: Optional[PythonInterpreter]
python_requires=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Iterator[str]
with make_project(
name=name,
version=version,
zip_safe=zip_safe,
install_reqs=install_reqs,
extras_require=extras_require,
entry_points=entry_points,
python_requires=python_requires,
) as td:
builder = WheelBuilder(td, interpreter=interpreter, **kwargs)
yield builder.bdist()
@contextlib.contextmanager
def make_source_dir(
name="my_project", # type: str
version="0.0.0", # type: str
install_reqs=None, # type: Optional[List[str]]
extras_require=None, # type: Optional[Dict[str, List[str]]]
):
# type: (...) -> Iterator[str]
with make_project(
name=name, version=version, install_reqs=install_reqs, extras_require=extras_require
) as td:
yield td
@contextlib.contextmanager
def make_bdist(
name="my_project", # type: str
version="0.0.0", # type: str
zip_safe=True, # type: bool
interpreter=None, # type: Optional[PythonInterpreter]
**kwargs # type: Any
):
# type: (...) -> Iterator[Distribution]
with built_wheel(
name=name, version=version, zip_safe=zip_safe, interpreter=interpreter, **kwargs
) as dist_location:
install_dir = os.path.join(safe_mkdtemp(), os.path.basename(dist_location))
get_pip().spawn_install_wheel(
wheel=dist_location,
install_dir=install_dir,
target=DistributionTarget(interpreter=interpreter),
).wait()
dist = DistributionHelper.distribution_from_path(install_dir)
assert dist is not None
yield dist
COVERAGE_PREAMBLE = """
try:
from coverage import coverage
cov = coverage(auto_data=True, data_suffix=True)
cov.start()
except ImportError:
pass
"""
def write_simple_pex(
td, # type: str
exe_contents=None, # type: Optional[str]
dists=None, # type: Optional[Iterable[Distribution]]
sources=None, # type: Optional[Iterable[Tuple[str, str]]]
coverage=False, # type: bool
interpreter=None, # type: Optional[PythonInterpreter]
pex_info=None, # type: Optional[PexInfo]
):
# type: (...) -> PEXBuilder
"""Write a pex file that optionally contains an executable entry point.
:param td: temporary directory path
:param exe_contents: entry point python file
:param dists: distributions to include, typically sdists or bdists
:param sources: sources to include, as a list of pairs (env_filename, contents)
:param coverage: include coverage header
:param interpreter: a custom interpreter to use to build the pex
:param pex_info: a custom PexInfo to use to build the pex.
"""
dists = dists or []
sources = sources or []
safe_mkdir(td)
pb = PEXBuilder(
path=td,
preamble=COVERAGE_PREAMBLE if coverage else None,
interpreter=interpreter,
pex_info=pex_info,
)
for dist in dists:
pb.add_dist_location(dist.location if isinstance(dist, Distribution) else dist)
for env_filename, contents in sources:
src_path = os.path.join(td, env_filename)
safe_mkdir(os.path.dirname(src_path))
with open(src_path, "w") as fp:
fp.write(contents)
pb.add_source(src_path, env_filename)
if exe_contents:
with open(os.path.join(td, "exe.py"), "w") as fp:
fp.write(exe_contents)
pb.set_executable(os.path.join(td, "exe.py"))
pb.freeze()
return pb
# TODO(#1041): use `typing.NamedTuple` once we require Python 3.
class IntegResults(object):
"""Convenience object to return integration run results."""
def __init__(self, output, error, return_code):
# type: (str, str, int) -> None
super(IntegResults, self).__init__()
self.output = output
self.error = error
self.return_code = return_code
def assert_success(self):
# type: () -> None
assert (
self.return_code == 0
), "integration test failed: return_code={}, output={}, error={}".format(
self.return_code, self.output, self.error
)
def assert_failure(self):
# type: () -> None
assert self.return_code != 0
def run_pex_command(args, env=None, python=None, quiet=False):
# type: (Iterable[str], Optional[Dict[str, str]], Optional[str], bool) -> IntegResults
"""Simulate running pex command for integration testing.
This is different from run_simple_pex in that it calls the pex command rather than running a
generated pex. This is useful for testing end to end runs with specific command line arguments
or env options.
"""
cmd = [python or sys.executable, "-mpex"]
if not quiet:
cmd.append("-vvvvv")
cmd.extend(args)
process = Executor.open_process(
cmd=cmd, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
output, error = process.communicate()
return IntegResults(output.decode("utf-8"), error.decode("utf-8"), process.returncode)
def run_simple_pex(
pex, # type: str
args=(), # type: Iterable[str]
interpreter=None, # type: Optional[PythonInterpreter]
stdin=None, # type: Optional[bytes]
**kwargs # type: Any
):
# type: (...) -> Tuple[bytes, int]
p = PEX(pex, interpreter=interpreter)
process = p.run(
args=args,
blocking=False,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
**kwargs
)
stdout, _ = process.communicate(input=stdin)
return stdout.replace(b"\r", b""), process.returncode
def run_simple_pex_test(
body, # type: str
args=(), # type: Iterable[str]
env=None, # type: Optional[Mapping[str, str]]
dists=None, # type: Optional[Iterable[Distribution]]
coverage=False, # type: bool
interpreter=None, # type: Optional[PythonInterpreter]
):
# type: (...) -> Tuple[bytes, int]
with temporary_dir() as td1, temporary_dir() as td2:
pb = write_simple_pex(td1, body, dists=dists, coverage=coverage, interpreter=interpreter)
pex = os.path.join(td2, "app.pex")
pb.build(pex)
return run_simple_pex(pex, args=args, env=env, interpreter=interpreter)
def bootstrap_python_installer(dest):
# type: (str) -> None
for _ in range(3):
try:
subprocess.check_call(["git", "clone", "https://github.com/pyenv/pyenv.git", dest])
except subprocess.CalledProcessError as e:
print("caught exception: %r" % e)
continue
else:
break
else:
raise RuntimeError("Helper method could not clone pyenv from git after 3 tries")
# NB: We keep the pool of bootstrapped interpreters as small as possible to avoid timeouts in CI
# otherwise encountered when fetching and building too many on a cache miss. In the past we had
# issues with the combination of 7 total unique interpreter versions and a Travis-CI timeout of 50
# minutes for a shard.
PY27 = "2.7.15"
PY35 = "3.5.6"
PY36 = "3.6.6"
_ALL_PY_VERSIONS = (PY27, PY35, PY36)
_ALL_PY3_VERSIONS = (PY35, PY36)
def ensure_python_distribution(version):
# type: (str) -> Tuple[str, str, Callable[[Iterable[str]], Text]]
if version not in _ALL_PY_VERSIONS:
raise ValueError("Please constrain version to one of {}".format(_ALL_PY_VERSIONS))
pyenv_root = os.path.abspath(
os.path.join(
os.environ.get("_PEX_TEST_PYENV_ROOT", "{}_dev".format(ENV.PEX_ROOT)),
"pyenv",
)
)
interpreter_location = os.path.join(pyenv_root, "versions", version)
pyenv = os.path.join(pyenv_root, "bin", "pyenv")
pyenv_env = os.environ.copy()
pyenv_env["PYENV_ROOT"] = pyenv_root
pip = os.path.join(interpreter_location, "bin", "pip")
with atomic_directory(target_dir=os.path.join(pyenv_root), exclusive=True) as target_dir:
if not target_dir.is_finalized:
bootstrap_python_installer(target_dir.work_dir)
with atomic_directory(
target_dir=interpreter_location, exclusive=True
) as interpreter_target_dir:
if not interpreter_target_dir.is_finalized:
subprocess.check_call(
[
"git",
"--git-dir={}".format(os.path.join(pyenv_root, ".git")),
"--work-tree={}".format(pyenv_root),
"pull",
"--ff-only",
"https://github.com/pyenv/pyenv.git",
]
)
env = pyenv_env.copy()
if sys.platform.lower().startswith("linux"):
env["CONFIGURE_OPTS"] = "--enable-shared"
# The pyenv builder detects `--enable-shared` and sets up `RPATH` via
# `LDFLAGS=-Wl,-rpath=... $LDFLAGS` to ensure the built python binary links the
# correct libpython shared lib. Some versions of compiler set the `RUNPATH` instead
# though which is searched _after_ the `LD_LIBRARY_PATH` environment variable. To
# ensure an inopportune `LD_LIBRARY_PATH` doesn't fool the pyenv python binary into
# linking the wrong libpython, force `RPATH`, which is searched 1st by the linker,
# with with `--disable-new-dtags`.
env["LDFLAGS"] = "-Wl,--disable-new-dtags"
subprocess.check_call([pyenv, "install", "--keep", version], env=env)
subprocess.check_call([pip, "install", "-U", "pip"])
python = os.path.join(interpreter_location, "bin", "python" + version[0:3])
def run_pyenv(args):
# type: (Iterable[str]) -> Text
return to_unicode(subprocess.check_output([pyenv] + list(args), env=pyenv_env))
return python, pip, run_pyenv
def ensure_python_venv(version, latest_pip=True, system_site_packages=False):
python, pip, _ = ensure_python_distribution(version)
venv = safe_mkdtemp()
if version in _ALL_PY3_VERSIONS:
args = [python, "-m", "venv", venv]
if system_site_packages:
args.append("--system-site-packages")
subprocess.check_call(args=args)
else:
subprocess.check_call(args=[pip, "install", "virtualenv==16.7.10"])
args = [python, "-m", "virtualenv", venv, "-q"]
if system_site_packages:
args.append("--system-site-packages")
subprocess.check_call(args=args)
python, pip = tuple(os.path.join(venv, "bin", exe) for exe in ("python", "pip"))
if latest_pip:
subprocess.check_call(args=[pip, "install", "-U", "pip"])
return python, pip
def ensure_python_interpreter(version):
# type: (str) -> str
python, _, _ = ensure_python_distribution(version)
return python
@contextmanager
def environment_as(**kwargs):
# type: (**str) -> Iterator[None]
existing = {key: os.environ.get(key) for key in kwargs}
def adjust_environment(mapping):
for key, value in mapping.items():
if value is not None:
os.environ[key] = value
else:
del os.environ[key]
adjust_environment(kwargs)
try:
yield
finally:
adjust_environment(existing)
@contextmanager
def pushd(directory):
# type: (str) -> Iterator[None]
cwd = os.getcwd()
try:
os.chdir(directory)
yield
finally:
os.chdir(cwd)
|
jsirois/pex
|
pex/testing.py
|
Python
|
apache-2.0
| 18,590
|
# Copyright 2022 The Brax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logging utilities."""
# Lint as: python3
import collections
import copy
import csv
import logging
import os
import pprint
import sys
import time
from brax.io import file as io_file
import numpy as np
_tabulators = {}
_timers = {}
_record_time = False
def save_config(output_path, config, verbose=False):
io_file.MakeDirs(os.path.dirname(output_path))
config_str = pprint.pformat(config, indent=2)
with io_file.File(output_path, 'w') as f:
f.write(config_str)
if verbose:
print(f'Saved {output_path}')
def load_config(path, verbose=False):
with io_file.File(path, 'r') as f:
config_str = f.read()
config = eval(config_str)
if verbose:
print(f'Loaded {path}')
return config
class Graph(object):
"""Visualize data in dynamic graphs."""
def __init__(
self,
max_length=100,
):
import pyqtgraph as pg
from pyqtgraph.Qt import QtGui
self.max_length = max_length
self.app = QtGui.QApplication([])
self.win = pg.GraphicsWindow()
self.ps = {}
self.curves = {}
self.dats = {}
def add_plot(self, key):
if key not in self.ps:
self.ps[key] = self.win.addPlot(colspan=2)
self.ps[key].setLabel(axis='top', text=key)
self.win.nextRow()
self.curves[key] = self.ps[key].plot()
self.dats[key] = collections.deque()
def update(self, **values):
for key, value in values.items():
self.add_plot(key)
if len(self.dats[key]) > self.max_length:
self.dats[key].popleft()
self.dats[key].append(value)
self.curves[key].setData(self.dats[key])
self.app.processEvents()
class Tabulator(object):
"""Tabulate data and incrementally format into a csv."""
def __init__(self, output_path=None, append=True, cast_type=None):
self._output_path = output_path
self._cast_type = cast_type
self._curr_values = collections.OrderedDict()
self._history_values = collections.OrderedDict()
self._curr_counts = {}
if append and output_path and io_file.Exists(self._output_path):
self.finalize_from_file()
else:
self._finalized = False
def finalize_from_file(self):
data = parse_csv(self._output_path)
self._history_values = data
for key, value in data.items():
self._curr_values[key] = value[-1]
self._finalized = True
def get_statistics(self, indices=None, stat='mean', tag='', key_filter=None):
"""Get statistics (average, max, min) values in the table."""
ret = {}
for key, values in self._history_values.items():
if key_filter and not key_filter(key):
continue
target_values = np.array(values)
if indices is None:
pass
elif isinstance(indices, (int, tuple, list, np.ndarray)):
if isinstance(indices, int):
indices = [indices]
target_values = target_values[indices]
elif isinstance(indices, str) and ':' in indices:
first_index, second_index = [
int(s) if s else None for s in indices.split(':', 1)
]
target_values = target_values[first_index:second_index]
else:
raise NotImplementedError(indices, type(indices))
if tag:
key += tag
if stat == 'mean':
ret[key] = np.mean(target_values, axis=0)
elif stat == 'max':
ret[key] = np.max(target_values, axis=0)
elif stat == 'min':
ret[key] = np.min(target_values, axis=0)
else:
raise NotImplementedError(stat)
return ret
def get_last(self):
return self.get_statistics(indices=-1)
def get_curr(self):
return self._curr_values
def add(self, accumulate=False, **entries):
"""Add an entry of data."""
for key, value in sorted(entries.items()):
if key not in self._history_values:
assert not self._finalized, ('Cannot add a new key {} once tabulator is'
' finalized.').format(key)
self._history_values[key] = []
value = copy.deepcopy(value)
if accumulate:
value += self._curr_values.get(key, 0.0)
self._curr_counts[key] = self._curr_counts.get(key, 0) + 1
value = self.cast(value)
self._curr_values[key] = value
def cast(self, value):
if self._cast_type:
try:
value = self._cast_type(value)
except TypeError as e:
raise TypeError('{}: Failed to cast {} as {}'.format(
e, value, self._cast_type))
return value
def finalize(self):
output_dir = os.path.dirname(self._output_path)
if output_dir:
io_file.MakeDirs(output_dir)
with io_file.File(self._output_path, 'w') as f:
writer = csv.writer(f)
writer.writerow(self._history_values.keys())
self._finalized = True
def dump(self, output_path=None, average=True):
"""Dump to a csv file."""
output_path = output_path or self._output_path
if not self._curr_values:
return # empty
if output_path:
if not self._finalized:
self.finalize() # finalize
with io_file.File(output_path, 'a') as f:
writer = csv.writer(f)
writer.writerow(self._curr_values.values())
for key, value in self._history_values.items():
v = copy.deepcopy(self._curr_values[key])
if average:
v /= self._curr_counts.get(key, 1.0)
value = self.cast(value)
value.append(v)
self._curr_counts = {}
self._curr_values = {k: 0.0 for k in self._curr_values}
def parse_csv(filename: str, verbose: bool = False):
"""Parse a csv file."""
with io_file.File(filename, 'r') as f:
csv_data = np.genfromtxt(f, delimiter=',', names=True, deletechars='')
data = collections.OrderedDict()
try:
for i, key in enumerate(csv_data.dtype.names):
data[key] = [d[i] for d in csv_data]
except TypeError:
# 0-D array errors out
for key in csv_data.dtype.names:
data[key] = np.array([csv_data[key]])
if verbose:
print(f'Loaded len={len(list(data.values())[0])}, '
f'keys={sorted(list(data.keys()))} from {filename}')
return data
def parse_csv_parallel(filenames, n_threads=1):
import multiprocessing
with multiprocessing.pool.ThreadPool(n_threads) as pool:
jobs = {
filename: pool.apply_async(parse_csv, [filename], error_callback=print)
for filename in filenames
}
data = {key: value.get() for key, value in jobs.items()}
return data
def timeit():
global _record_time
_record_time = True
def tic(name):
global _timers
_timers[name] = time.time()
def toc(name, indent=0):
global _timers, _record_time
assert name in _timers
dt = time.time() - _timers[name]
del _timers[name]
if _record_time:
print('{}[{}] runtime: {}s'.format(''.join(['\t'] * indent), name, dt))
return dt
def get_level(name):
"""Get level."""
level = 'info' # default level
os_level = os.getenv('LEVEL')
if os_level is not None:
if ',' in os_level:
os_levels = os_level.split(',')
if name in os_levels[1:]:
level = os_levels[0]
else:
level = os_level
return level
class LoggerWrapper(object):
"""LoggerWrapper."""
def __init__(self, logger, name):
self.logger = logger
self.name = name
def format(self, content='', name=None, **kwargs):
"""Format content to str."""
if name is None:
name = self.name
else:
name = self.name + ':' + name
s = '[{}]'.format(name)
if content:
s += ' ' + pprint.pformat(content)
if kwargs:
s += ' ' + pprint.pformat(kwargs)
return s
def add_name(self, name):
self.name = ':'.join((self.name, name))
def pop_name(self):
self.name = ':'.join(self.name.split(':')[:-1])
def debug(self, content='', name=None, **kwargs):
level = get_level(self.name)
if level in ('debug',):
self.logger.debug(self.format(content=content, name=name, **kwargs))
def info(self, content='', name=None, **kwargs):
self.logger.info(self.format(content=content, name=name, **kwargs))
def get_logger(level=None, name=__name__):
"""Get logger.
If `level` is not specified, it consults os.getenv('LEVEL').
e.g. LEVEL=debug: print all debug messages.
LEVEL=debug,name1,name2: print all debug messages,
only for loggers with `name1` or `name2`,
and use default level (`info`) for others.
Args:
level: a string, e.g. 'info', 'debug', 'error'.
name: a string, identifier for logger.
Returns:
A logging.logger object.
"""
name = name.split('.')[-1] # base name
if level is None:
level = get_level(name)
logger = logging.getLogger(name)
out_hdlr = logging.StreamHandler(sys.stdout)
out_hdlr.setFormatter(
logging.Formatter('[{}] %(asctime)s %(message)s'.format(name)))
out_hdlr.setLevel(getattr(logging, level.upper()))
logger.addHandler(out_hdlr)
logger.setLevel(getattr(logging, level.upper()))
return LoggerWrapper(logger, name=name)
def get_tabulator(name=__name__, **kwargs):
"""Get a tabulator."""
global _tabulators
if name not in _tabulators:
_tabulators[name] = Tabulator(**kwargs)
return _tabulators[name]
if __name__ == '__main__':
tab = get_tabulator(append=False)
tab.dump() # do nothing
tab.add(a=3, b=2, c=4)
tab.add(b=4, d=6)
tab.dump()
tab.add(a=1, d=4)
tab.dump()
tab2 = get_tabulator(append=True)
tab2.add(a=4, b=1, c=2, d=3)
tab2.dump()
|
google/brax
|
brax/experimental/braxlines/common/logger_utils.py
|
Python
|
apache-2.0
| 9,975
|
import re
from setuptools import setup
init_py = open('wikipediabase/__init__.py').read()
metadata = dict(re.findall("__([a-z]+)__ = '([^']+)'", init_py))
metadata['doc'] = re.findall('"""(.+)"""', init_py)[0]
setup(
name='wikipediabase',
version=metadata['version'],
description=metadata['doc'],
author=metadata['author'],
author_email=metadata['email'],
url=metadata['url'],
packages=[
'wikipediabase',
'wikipediabase.resolvers',
],
include_package_data=True,
install_requires=[
'beautifulsoup4',
'docopt',
'edn_format',
'flake8 < 3.0.0',
'fuzzywuzzy',
'hiredis',
'lxml',
'overlay-parse',
'redis',
'requests',
'sqlitedict',
'unittest2 < 1.0.0',
],
dependency_links=[
'git+https://github.com/fakedrake/overlay_parse#egg=overlay-parse',
],
tests_require=[
'nose>=1.0',
'sqlitedict'
],
entry_points={
'console_scripts': [
'wikipediabase = wikipediabase.cli:main',
],
},
test_suite='nose.collector',
license=open('LICENSE').read(),
)
|
fakedrake/WikipediaBase
|
setup.py
|
Python
|
apache-2.0
| 1,174
|
from setuptools import setup
setup(
name="code_block_timer",
version="0.0.1",
packages=[
"code_block_timer",
],
author_email='jeskew@edx.org'
)
|
doctoryes/code_block_timer
|
setup.py
|
Python
|
apache-2.0
| 173
|
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import socket
import uuid
from oslo.config import cfg
from dnrm.openstack.common import context
from dnrm.openstack.common.gettextutils import _ # noqa
from dnrm.openstack.common import importutils
from dnrm.openstack.common import jsonutils
from dnrm.openstack.common import log as logging
from dnrm.openstack.common import timeutils
LOG = logging.getLogger(__name__)
notifier_opts = [
cfg.MultiStrOpt('notification_driver',
default=[],
help='Driver or drivers to handle sending notifications'),
cfg.StrOpt('default_notification_level',
default='INFO',
help='Default notification level for outgoing notifications'),
cfg.StrOpt('default_publisher_id',
default=None,
help='Default publisher_id for outgoing notifications'),
]
CONF = cfg.CONF
CONF.register_opts(notifier_opts)
WARN = 'WARN'
INFO = 'INFO'
ERROR = 'ERROR'
CRITICAL = 'CRITICAL'
DEBUG = 'DEBUG'
log_levels = (DEBUG, WARN, INFO, ERROR, CRITICAL)
class BadPriorityException(Exception):
pass
def notify_decorator(name, fn):
"""Decorator for notify which is used from utils.monkey_patch().
:param name: name of the function
:param function: - object of the function
:returns: function -- decorated function
"""
def wrapped_func(*args, **kwarg):
body = {}
body['args'] = []
body['kwarg'] = {}
for arg in args:
body['args'].append(arg)
for key in kwarg:
body['kwarg'][key] = kwarg[key]
ctxt = context.get_context_from_function_and_args(fn, args, kwarg)
notify(ctxt,
CONF.default_publisher_id or socket.gethostname(),
name,
CONF.default_notification_level,
body)
return fn(*args, **kwarg)
return wrapped_func
def publisher_id(service, host=None):
if not host:
try:
host = CONF.host
except AttributeError:
host = CONF.default_publisher_id or socket.gethostname()
return "%s.%s" % (service, host)
def notify(context, publisher_id, event_type, priority, payload):
"""Sends a notification using the specified driver
:param publisher_id: the source worker_type.host of the message
:param event_type: the literal type of event (ex. Instance Creation)
:param priority: patterned after the enumeration of Python logging
levels in the set (DEBUG, WARN, INFO, ERROR, CRITICAL)
:param payload: A python dictionary of attributes
Outgoing message format includes the above parameters, and appends the
following:
message_id
a UUID representing the id for this notification
timestamp
the GMT timestamp the notification was sent at
The composite message will be constructed as a dictionary of the above
attributes, which will then be sent via the transport mechanism defined
by the driver.
Message example::
{'message_id': str(uuid.uuid4()),
'publisher_id': 'compute.host1',
'timestamp': timeutils.utcnow(),
'priority': 'WARN',
'event_type': 'compute.create_instance',
'payload': {'instance_id': 12, ... }}
"""
if priority not in log_levels:
raise BadPriorityException(
_('%s not in valid priorities') % priority)
# Ensure everything is JSON serializable.
payload = jsonutils.to_primitive(payload, convert_instances=True)
msg = dict(message_id=str(uuid.uuid4()),
publisher_id=publisher_id,
event_type=event_type,
priority=priority,
payload=payload,
timestamp=str(timeutils.utcnow()))
for driver in _get_drivers():
try:
driver.notify(context, msg)
except Exception as e:
LOG.exception(_("Problem '%(e)s' attempting to "
"send to notification system. "
"Payload=%(payload)s")
% dict(e=e, payload=payload))
_drivers = None
def _get_drivers():
"""Instantiate, cache, and return drivers based on the CONF."""
global _drivers
if _drivers is None:
_drivers = {}
for notification_driver in CONF.notification_driver:
try:
driver = importutils.import_module(notification_driver)
_drivers[notification_driver] = driver
except ImportError:
LOG.exception(_("Failed to load notifier %s. "
"These notifications will not be sent.") %
notification_driver)
return _drivers.values()
def _reset_drivers():
"""Used by unit tests to reset the drivers."""
global _drivers
_drivers = None
|
Brocade-OpenSource/OpenStack-DNRM
|
dnrm/openstack/common/notifier/api.py
|
Python
|
apache-2.0
| 5,503
|
"""
Tests for salt.loader.lazy
"""
import sys
import pytest
import salt.loader
import salt.loader.context
import salt.loader.lazy
import salt.utils.files
@pytest.fixture
def loader_dir(tmp_path):
"""
Create a simple directory with a couple modules to load and run tests
against.
"""
mod_contents = """
def __virtual__():
return True
def set_context(key, value):
__context__[key] = value
def get_context(key):
return __context__[key]
"""
with pytest.helpers.temp_file(
"mod_a.py", directory=tmp_path, contents=mod_contents
), pytest.helpers.temp_file("mod_b.py", directory=tmp_path, contents=mod_contents):
yield str(tmp_path)
def test_loaders_have_uniq_context(loader_dir):
"""
Loaded functions run in the LazyLoader's context.
"""
opts = {"optimization_order": [0, 1, 2]}
loader_1 = salt.loader.lazy.LazyLoader([loader_dir], opts)
loader_2 = salt.loader.lazy.LazyLoader([loader_dir], opts)
loader_1._load_all()
loader_2._load_all()
assert loader_1.pack["__context__"] == {}
assert loader_2.pack["__context__"] == {}
loader_1["mod_a.set_context"]("foo", "bar")
assert loader_1.pack["__context__"] == {"foo": "bar"}
assert loader_1["mod_b.get_context"]("foo") == "bar"
with pytest.raises(KeyError):
loader_2["mod_a.get_context"]("foo")
assert loader_2.pack["__context__"] == {}
def test_loaded_methods_are_loaded_func(loader_dir):
"""
Functions loaded from LazyLoader's item lookups are LoadedFunc objects
"""
opts = {"optimization_order": [0, 1, 2]}
loader_1 = salt.loader.lazy.LazyLoader([loader_dir], opts)
fun = loader_1["mod_a.get_context"]
assert isinstance(fun, salt.loader.lazy.LoadedFunc)
def test_loaded_modules_are_loaded_mods(loader_dir):
"""
Modules looked up as attributes of LazyLoaders are LoadedMod objects.
"""
opts = {"optimization_order": [0, 1, 2]}
loader_1 = salt.loader.lazy.LazyLoader([loader_dir], opts)
mod = loader_1.mod_a
assert isinstance(mod, salt.loader.lazy.LoadedMod)
def test_loaders_create_named_loader_contexts(loader_dir):
"""
LazyLoader's create NamedLoaderContexts on the modules the load.
"""
opts = {"optimization_order": [0, 1, 2]}
loader_1 = salt.loader.lazy.LazyLoader([loader_dir], opts)
mod = loader_1.mod_a
assert isinstance(mod.mod, str)
func = mod.set_context
assert isinstance(func, salt.loader.lazy.LoadedFunc)
module_name = func.func.__module__
module = sys.modules[module_name]
assert isinstance(module.__context__, salt.loader.context.NamedLoaderContext)
wrapped_module_name = func.__module__
wrapped_module = sys.modules[wrapped_module_name]
assert isinstance(
wrapped_module.__context__, salt.loader.context.NamedLoaderContext
)
assert module is wrapped_module
def test_loaders_convert_context_to_values(loader_dir):
"""
LazyLoaders convert NamedLoaderContexts to values when instantiated.
"""
loader_context = salt.loader.context.LoaderContext()
grains_default = {
"os": "linux",
}
grains = salt.loader.context.NamedLoaderContext(
"grains", loader_context, grains_default
)
opts = {
"optimization_order": [0, 1, 2],
"grains": grains,
}
loader_1 = salt.loader.lazy.LazyLoader([loader_dir], opts)
assert loader_1.opts["grains"] == grains_default
# The loader's opts is a copy
assert opts["grains"] == grains
def test_missing_loader_from_salt_internal_loaders():
with pytest.raises(RuntimeError):
salt.loader._module_dirs(
{"extension_modules": "/tmp/foo"}, "missingmodules", "module"
)
|
saltstack/salt
|
tests/pytests/unit/loader/test_lazy.py
|
Python
|
apache-2.0
| 3,761
|
# Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
import numpy as np
import torch
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, functional
from pyro.contrib.examples.util import MNIST
class CVAEMNIST(Dataset):
def __init__(self, root, train=True, transform=None, download=False):
self.original = MNIST(root, train=train, download=download)
self.transform = transform
def __len__(self):
return len(self.original)
def __getitem__(self, item):
image, digit = self.original[item]
sample = {"original": image, "digit": digit}
if self.transform:
sample = self.transform(sample)
return sample
class ToTensor:
def __call__(self, sample):
sample["original"] = functional.to_tensor(sample["original"])
sample["digit"] = torch.as_tensor(
np.asarray(sample["digit"]), dtype=torch.int64
)
return sample
class MaskImages:
"""This torchvision image transformation prepares the MNIST digits to be
used in the tutorial. Depending on the number of quadrants to be used as
inputs (1, 2, or 3), the transformation masks the remaining (3, 2, 1)
quadrant(s) setting their pixels with -1. Additionally, the transformation
adds the target output in the sample dict as the complementary of the input
"""
def __init__(self, num_quadrant_inputs, mask_with=-1):
if num_quadrant_inputs <= 0 or num_quadrant_inputs >= 4:
raise ValueError("Number of quadrants as inputs must be 1, 2 or 3")
self.num = num_quadrant_inputs
self.mask_with = mask_with
def __call__(self, sample):
tensor = sample["original"].squeeze()
out = tensor.detach().clone()
h, w = tensor.shape
# removes the bottom left quadrant from the target output
out[h // 2 :, : w // 2] = self.mask_with
# if num of quadrants to be used as input is 2,
# also removes the top left quadrant from the target output
if self.num == 2:
out[:, : w // 2] = self.mask_with
# if num of quadrants to be used as input is 3,
# also removes the top right quadrant from the target output
if self.num == 3:
out[: h // 2, :] = self.mask_with
# now, sets the input as complementary
inp = tensor.clone()
inp[out != -1] = self.mask_with
sample["input"] = inp
sample["output"] = out
return sample
def get_data(num_quadrant_inputs, batch_size):
transforms = Compose(
[ToTensor(), MaskImages(num_quadrant_inputs=num_quadrant_inputs)]
)
datasets, dataloaders, dataset_sizes = {}, {}, {}
for mode in ["train", "val"]:
datasets[mode] = CVAEMNIST(
"../data", download=True, transform=transforms, train=mode == "train"
)
dataloaders[mode] = DataLoader(
datasets[mode],
batch_size=batch_size,
shuffle=mode == "train",
num_workers=0,
)
dataset_sizes[mode] = len(datasets[mode])
return datasets, dataloaders, dataset_sizes
|
uber/pyro
|
examples/cvae/mnist.py
|
Python
|
apache-2.0
| 3,204
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
import uuid
import concurrent.futures
from testtools import matchers
import oslo_messaging
from oslo_messaging.tests.functional import utils
class CallTestCase(utils.SkipIfNoTransportURL):
def test_specific_server(self):
group = self.useFixture(utils.RpcServerGroupFixture(self.url))
client = group.client(1)
client.append(text='open')
self.assertEqual('openstack', client.append(text='stack'))
client.add(increment=2)
self.assertEqual(12, client.add(increment=10))
self.assertEqual(9, client.subtract(increment=3))
self.assertEqual('openstack', group.servers[1].endpoint.sval)
self.assertEqual(9, group.servers[1].endpoint.ival)
for i in [0, 2]:
self.assertEqual('', group.servers[i].endpoint.sval)
self.assertEqual(0, group.servers[i].endpoint.ival)
def test_server_in_group(self):
group = self.useFixture(utils.RpcServerGroupFixture(self.url))
client = group.client()
data = [c for c in 'abcdefghijklmn']
for i in data:
client.append(text=i)
for s in group.servers:
self.assertThat(len(s.endpoint.sval), matchers.GreaterThan(0))
actual = [[c for c in s.endpoint.sval] for s in group.servers]
self.assertThat(actual, utils.IsValidDistributionOf(data))
def test_different_exchanges(self):
# If the different exchanges are not honoured, then the
# teardown may hang unless we broadcast all control messages
# to each server
group1 = self.useFixture(
utils.RpcServerGroupFixture(self.url,
use_fanout_ctrl=True))
group2 = self.useFixture(
utils.RpcServerGroupFixture(self.url, exchange="a",
use_fanout_ctrl=True))
group3 = self.useFixture(
utils.RpcServerGroupFixture(self.url, exchange="b",
use_fanout_ctrl=True))
client1 = group1.client(1)
data1 = [c for c in 'abcdefghijklmn']
for i in data1:
client1.append(text=i)
client2 = group2.client()
data2 = [c for c in 'opqrstuvwxyz']
for i in data2:
client2.append(text=i)
actual1 = [[c for c in s.endpoint.sval] for s in group1.servers]
self.assertThat(actual1, utils.IsValidDistributionOf(data1))
actual1 = [c for c in group1.servers[1].endpoint.sval]
self.assertThat([actual1], utils.IsValidDistributionOf(data1))
for s in group1.servers:
expected = len(data1) if group1.servers.index(s) == 1 else 0
self.assertEqual(expected, len(s.endpoint.sval))
self.assertEqual(0, s.endpoint.ival)
actual2 = [[c for c in s.endpoint.sval] for s in group2.servers]
for s in group2.servers:
self.assertThat(len(s.endpoint.sval), matchers.GreaterThan(0))
self.assertEqual(0, s.endpoint.ival)
self.assertThat(actual2, utils.IsValidDistributionOf(data2))
for s in group3.servers:
self.assertEqual(0, len(s.endpoint.sval))
self.assertEqual(0, s.endpoint.ival)
def test_timeout(self):
transport = self.useFixture(utils.TransportFixture(self.url))
target = oslo_messaging.Target(topic="no_such_topic")
c = utils.ClientStub(transport.transport, target, timeout=1)
self.assertThat(c.ping,
matchers.raises(oslo_messaging.MessagingTimeout))
def test_exception(self):
group = self.useFixture(utils.RpcServerGroupFixture(self.url))
client = group.client(1)
client.add(increment=2)
f = lambda: client.subtract(increment=3)
self.assertThat(f, matchers.raises(ValueError))
def test_timeout_with_concurrently_queues(self):
transport = self.useFixture(utils.TransportFixture(self.url))
target = oslo_messaging.Target(topic="topic_" + str(uuid.uuid4()),
server="server_" + str(uuid.uuid4()))
server = self.useFixture(
utils.RpcServerFixture(self.url, target, executor="threading"))
client = utils.ClientStub(transport.transport, target,
cast=False, timeout=5)
def short_periodical_tasks():
for i in range(10):
client.add(increment=1)
time.sleep(1)
with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor:
future = executor.submit(client.long_running_task, seconds=10)
executor.submit(short_periodical_tasks)
self.assertRaises(oslo_messaging.MessagingTimeout, future.result)
self.assertEqual(10, server.endpoint.ival)
class CastTestCase(utils.SkipIfNoTransportURL):
# Note: casts return immediately, so these tests utilise a special
# internal sync() cast to ensure prior casts are complete before
# making the necessary assertions.
def test_specific_server(self):
group = self.useFixture(utils.RpcServerGroupFixture(self.url))
client = group.client(1, cast=True)
client.append(text='open')
client.append(text='stack')
client.add(increment=2)
client.add(increment=10)
client.sync()
group.sync(1)
self.assertEqual('openstack', group.servers[1].endpoint.sval)
self.assertEqual(12, group.servers[1].endpoint.ival)
for i in [0, 2]:
self.assertEqual('', group.servers[i].endpoint.sval)
self.assertEqual(0, group.servers[i].endpoint.ival)
def test_server_in_group(self):
if self.url.startswith("amqp:"):
self.skipTest("QPID-6307")
group = self.useFixture(utils.RpcServerGroupFixture(self.url))
client = group.client(cast=True)
for i in range(20):
client.add(increment=1)
for i in range(len(group.servers)):
# expect each server to get a sync
client.sync()
group.sync(server="all")
total = 0
for s in group.servers:
ival = s.endpoint.ival
self.assertThat(ival, matchers.GreaterThan(0))
self.assertThat(ival, matchers.LessThan(20))
total += ival
self.assertEqual(20, total)
def test_fanout(self):
group = self.useFixture(utils.RpcServerGroupFixture(self.url))
client = group.client('all', cast=True)
client.append(text='open')
client.append(text='stack')
client.add(increment=2)
client.add(increment=10)
client.sync()
group.sync(server='all')
for s in group.servers:
self.assertEqual('openstack', s.endpoint.sval)
self.assertEqual(12, s.endpoint.ival)
class NotifyTestCase(utils.SkipIfNoTransportURL):
# NOTE(sileht): Each test must not use the same topics
# to be run in parallel
def test_simple(self):
listener = self.useFixture(
utils.NotificationFixture(self.url, ['test_simple']))
notifier = listener.notifier('abc')
notifier.info({}, 'test', 'Hello World!')
event = listener.events.get(timeout=1)
self.assertEqual('info', event[0])
self.assertEqual('test', event[1])
self.assertEqual('Hello World!', event[2])
self.assertEqual('abc', event[3])
def test_multiple_topics(self):
listener = self.useFixture(
utils.NotificationFixture(self.url, ['a', 'b']))
a = listener.notifier('pub-a', topic='a')
b = listener.notifier('pub-b', topic='b')
sent = {
'pub-a': [a, 'test-a', 'payload-a'],
'pub-b': [b, 'test-b', 'payload-b']
}
for e in sent.values():
e[0].info({}, e[1], e[2])
received = {}
while len(received) < len(sent):
e = listener.events.get(timeout=1)
received[e[3]] = e
for key in received:
actual = received[key]
expected = sent[key]
self.assertEqual('info', actual[0])
self.assertEqual(expected[1], actual[1])
self.assertEqual(expected[2], actual[2])
def test_multiple_servers(self):
if self.url.startswith("amqp:"):
self.skipTest("QPID-6307")
listener_a = self.useFixture(
utils.NotificationFixture(self.url, ['test-topic']))
listener_b = self.useFixture(
utils.NotificationFixture(self.url, ['test-topic']))
n = listener_a.notifier('pub')
events_out = [('test-%s' % c, 'payload-%s' % c) for c in 'abcdefgh']
for event_type, payload in events_out:
n.info({}, event_type, payload)
events_in = [[(e[1], e[2]) for e in listener_a.get_events()],
[(e[1], e[2]) for e in listener_b.get_events()]]
self.assertThat(events_in, utils.IsValidDistributionOf(events_out))
for stream in events_in:
self.assertThat(len(stream), matchers.GreaterThan(0))
def test_independent_topics(self):
listener_a = self.useFixture(
utils.NotificationFixture(self.url, ['1']))
listener_b = self.useFixture(
utils.NotificationFixture(self.url, ['2']))
a = listener_a.notifier('pub-1', topic='1')
b = listener_b.notifier('pub-2', topic='2')
a_out = [('test-1-%s' % c, 'payload-1-%s' % c) for c in 'abcdefgh']
for event_type, payload in a_out:
a.info({}, event_type, payload)
b_out = [('test-2-%s' % c, 'payload-2-%s' % c) for c in 'ijklmnop']
for event_type, payload in b_out:
b.info({}, event_type, payload)
for expected in a_out:
actual = listener_a.events.get(timeout=0.5)
self.assertEqual('info', actual[0])
self.assertEqual(expected[0], actual[1])
self.assertEqual(expected[1], actual[2])
self.assertEqual('pub-1', actual[3])
for expected in b_out:
actual = listener_b.events.get(timeout=0.5)
self.assertEqual('info', actual[0])
self.assertEqual(expected[0], actual[1])
self.assertEqual(expected[1], actual[2])
self.assertEqual('pub-2', actual[3])
def test_all_categories(self):
listener = self.useFixture(utils.NotificationFixture(
self.url, ['test_all_categories']))
n = listener.notifier('abc')
cats = ['debug', 'audit', 'info', 'warn', 'error', 'critical']
events = [(getattr(n, c), c, 'type-' + c, c + '-data') for c in cats]
for e in events:
e[0]({}, e[2], e[3])
# order between events with different categories is not guaranteed
received = {}
for expected in events:
e = listener.events.get(timeout=0.5)
received[e[0]] = e
for expected in events:
actual = received[expected[1]]
self.assertEqual(expected[1], actual[0])
self.assertEqual(expected[2], actual[1])
self.assertEqual(expected[3], actual[2])
|
hkumarmk/oslo.messaging
|
oslo_messaging/tests/functional/test_functional.py
|
Python
|
apache-2.0
| 11,782
|
# Copyright 2016 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import six
from .common import BaseTest
from c7n.executor import MainThreadExecutor
from c7n.resources.appelb import AppELB, AppELBTargetGroup
class AppELBTest(BaseTest):
def test_appelb_simple(self):
self.patch(AppELB, 'executor_factory', MainThreadExecutor)
session_factory = self.replay_flight_data('test_appelb_simple')
p = self.load_policy({
'name': 'appelb-simple',
'resource': 'app-elb'},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 2)
def test_appelb_simple_filter(self):
self.patch(AppELB, 'executor_factory', MainThreadExecutor)
session_factory = self.replay_flight_data('test_appelb_simple')
p = self.load_policy({
'name': 'appelb-simple-filter',
'resource': 'app-elb',
'filters': [
{'type': 'value',
'key': 'LoadBalancerName',
'value': 'alb-1'}]},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_appelb_default_vpc_filter(self):
self.patch(AppELB, 'executor_factory', MainThreadExecutor)
session_factory = self.replay_flight_data('test_appelb_default_vpc')
p = self.load_policy({
'name': 'appelb-default-vpc',
'resource': 'app-elb',
'filters': [{'type': 'default-vpc'}]},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_appelb_tags_filter(self):
self.patch(AppELB, 'executor_factory', MainThreadExecutor)
session_factory = self.replay_flight_data('test_appelb_simple')
p = self.load_policy({
'name': 'appelb-tags-filter',
'resource': 'app-elb',
'filters': [{"tag:KEY1": "VALUE1"}]},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
p = self.load_policy({
'name': 'appelb-tags-filter',
'resource': 'app-elb',
'filters': [{"tag:KEY1": "VALUE2"}]},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 0)
def test_appelb_is_https_filter(self):
self.patch(AppELB, 'executor_factory', MainThreadExecutor)
session_factory = self.replay_flight_data('test_appelb_is_https')
p = self.load_policy({
'name': 'appelb-is-https-filter',
'resource': 'app-elb',
'filters': [
{'type': 'listener',
'key': "Protocol",
'value': "HTTPS"}
]},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 0)
def test_appelb_modify_listener(self):
self.patch(AppELB, 'executor_factory', MainThreadExecutor)
session_factory = self.replay_flight_data('test_appelb_modify_listener')
client = session_factory().client('elbv2')
p = self.load_policy({
'name': 'appelb-modify-listener-policy',
'resource': 'app-elb',
'filters': [{
'type': 'listener',
'key': 'Port',
'value': 8080
}],
'actions': [{
'type': 'modify-listener',
'port': 80
}]
},
session_factory=session_factory
)
resources = p.run()
arn = resources[0]['LoadBalancerArn']
listeners = client.describe_listeners(LoadBalancerArn=arn)['Listeners']
self.assertEqual(listeners[0]['Port'],80)
def test_appelb_target_group_filter(self):
self.patch(AppELB, 'executor_factory', MainThreadExecutor)
session_factory = self.replay_flight_data(
'test_appelb_instance_count_non_zero')
p = self.load_policy({
'name': 'appelb-target-group-filter',
'resource': 'app-elb',
'filters': [
{'type': 'target-group',
'key': "length([?Protocol=='HTTP'])", 'value': 1,
'op': 'eq'}
]},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 2)
def test_appelb_instance_count_filter_zero(self):
self.patch(AppELB, 'executor_factory', MainThreadExecutor)
session_factory = self.replay_flight_data(
'test_appelb_instance_count_zero')
p = self.load_policy({
'name': 'appelb-instance-count-filter-zero',
'resource': 'app-elb',
'filters': [
{'type': 'target-group',
'key': "max([].length(TargetHealthDescriptions))",
'value': 0,
'op': 'eq'}
]},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 2)
def test_appelb_instance_count_filter_non_zero(self):
self.patch(AppELB, 'executor_factory', MainThreadExecutor)
session_factory = self.replay_flight_data(
'test_appelb_instance_count_non_zero')
p = self.load_policy({
'name': 'appelb-instance-count-filter-non-zero',
'resource': 'app-elb',
'filters': [
{'type': 'target-group',
'key': "max([].length(TargetHealthDescriptions))",
'value': 0,
'op': 'gt'}
]},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 2)
def test_appelb_add_tag(self):
self.patch(AppELB, 'executor_factory', MainThreadExecutor)
session_factory = self.replay_flight_data('test_appelb_add_tag')
p = self.load_policy({
'name': 'appelb-add-tag',
'resource': 'app-elb',
'filters': [
{'type': 'value',
'key': 'LoadBalancerName',
'value': 'alb-1'}],
'actions': [
{'type': 'tag', 'key': 'KEY42', 'value': 'VALUE99'}]
},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_appelb_remove_tag(self):
self.patch(AppELB, 'executor_factory', MainThreadExecutor)
session_factory = self.replay_flight_data('test_appelb_remove_tag')
p = self.load_policy({
'name': 'appelb-remove-tag',
'resource': 'app-elb',
'filters': [
{'type': 'value',
'key': 'LoadBalancerName',
'value': 'alb-1'}],
'actions': [
{'type': 'remove-tag', 'tags': ['KEY42']}]
},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_appelb_mark_for_delete(self):
self.patch(AppELB, 'executor_factory', MainThreadExecutor)
session_factory = self.replay_flight_data(
'test_appelb_mark_for_delete')
p = self.load_policy({
'name': 'appelb-mark-for-delete',
'resource': 'app-elb',
'filters': [
{'type': 'value',
'key': 'LoadBalancerName',
'value': 'alb-1'}],
'actions': [
{'type': 'mark-for-op', 'op': 'delete',
'tag': 'custodian_next', 'days': 1}]
},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_appelb_delete(self):
self.patch(AppELB, 'executor_factory', MainThreadExecutor)
session_factory = self.replay_flight_data('test_appelb_delete')
p = self.load_policy({
'name': 'appelb-delete',
'resource': 'app-elb',
'filters': [
{'type': 'value',
'key': 'LoadBalancerName',
'value': 'alb-2'}],
'actions': [
{'type': 'delete'}]},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_appelb_delete_force(self):
self.patch(AppELB, 'executor_factory', MainThreadExecutor)
session_factory = self.replay_flight_data('test_appelb_delete_force')
client = session_factory().client('elbv2')
p = self.load_policy({
'name': 'appelb-modify-listener-policy',
'resource': 'app-elb',
'filters': [{
'type': 'listener',
'key': 'Port',
'value': 80
}],
'actions': [{'type': 'delete'}]
},
session_factory=session_factory
)
resources = p.run()
arn = resources[0]['LoadBalancerArn']
attributes = client.describe_load_balancer_attributes(LoadBalancerArn=arn)['Attributes']
for attribute in attributes:
for key,value in six.iteritems(attribute):
if 'deletion_protection.enabled' in key:
self.assertTrue(value)
self.assertEqual(len(resources), 1)
p = self.load_policy({
'name': 'appelb-modify-listener-policy',
'resource': 'app-elb',
'filters': [{
'type': 'listener',
'key': 'Port',
'value': 80
}],
'actions': [{'type': 'delete', 'force': True}]
},
session_factory=session_factory
)
resources = p.run()
self.assertEqual(len(resources), 1)
class AppELBHealthcheckProtocolMismatchTest(BaseTest):
def test_appelb_healthcheck_protocol_mismatch_filter_good(self):
self.patch(AppELB, 'executor_factory', MainThreadExecutor)
session_factory = self.replay_flight_data(
'test_appelb_healthcheck_protocol_mismatch_good')
p = self.load_policy({
'name': 'appelb-healthcheck-protocol-mismatch-good',
'resource': 'app-elb',
'filters': ['healthcheck-protocol-mismatch']},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 0)
def test_appelb_healthcheck_protocol_mismatch_filter_bad(self):
self.patch(AppELB, 'executor_factory', MainThreadExecutor)
session_factory = self.replay_flight_data(
'test_appelb_healthcheck_protocol_mismatch_bad')
p = self.load_policy({
'name': 'appelb-healthcheck-protocol-mismatch-bad',
'resource': 'app-elb',
'filters': ['healthcheck-protocol-mismatch']},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 2)
class AppELBTargetGroupTest(BaseTest):
def test_appelb_target_group_simple(self):
self.patch(AppELBTargetGroup, 'executor_factory', MainThreadExecutor)
session_factory = self.replay_flight_data(
'test_appelb_target_group_simple')
p = self.load_policy({
'name': 'appelb-target-group-simple',
'resource': 'app-elb-target-group'},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 2)
def test_appelb_target_group_simple_filter(self):
self.patch(AppELBTargetGroup, 'executor_factory', MainThreadExecutor)
session_factory = self.replay_flight_data(
'test_appelb_target_group_simple')
p = self.load_policy({
'name': 'appelb-target-group-simple-filter',
'resource': 'app-elb-target-group',
'filters': [
{'type': 'value',
'key': 'Port',
'value': 443}]},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_appelb_target_group_default_vpc(self):
self.patch(AppELBTargetGroup, 'executor_factory', MainThreadExecutor)
session_factory = self.replay_flight_data(
'test_appelb_target_group_default_vpc')
p = self.load_policy({
'name': 'appelb-target-group-default-vpc',
'resource': 'app-elb-target-group',
'filters': [{'type': 'default-vpc'}]},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
|
siddartha1992/cloud-custodian
|
tests/test_appelb.py
|
Python
|
apache-2.0
| 13,328
|
# -*- coding: utf-8 -*-
#
# MLDB-558-python-unicode.py
# mldb.ai inc, 2015
# Mich, 2016-02-08
# this file is part of mldb. copyright 2015 mldb.ai inc. all rights reserved.
#
from urllib.parse import quote
from mldb import mldb, MldbUnitTest
class Utf8IdsTest(MldbUnitTest): # noqa
def test_mldb_create_dataset(self):
_id = 'épluche'
mldb.create_dataset({
'id' : _id,
'type' : 'sparse.mutable'
}).commit()
# fetch escaped ascii
url = quote(('/v1/datasets/' + _id).encode('utf8'))
mldb.log(url)
res = mldb.get(url)
obj = res.json()
self.assertEqual(obj['id'], _id)
# fetch unescaped utf-8 str
url = '/v1/datasets/épluche'
mldb.log(url)
res = mldb.get(url)
obj = res.json()
self.assertEqual(obj['id'], _id)
# fetch unescaped unicode utf-8
url = '/v1/datasets/épluche'
mldb.log(url)
res = mldb.get(url)
obj = res.json()
self.assertEqual(obj['id'], _id)
def test_mixed_utf8_escape(self):
# the parser assumes utf-8 is already escaped
_id = 'éé'
mldb.create_dataset({
'id' : _id,
'type' : 'sparse.mutable'
}).commit()
# fetch escaped ascii
url = '/v1/datasets/é' + quote('é')
mldb.log(url)
res = mldb.get(url)
mldb.log(res)
def test_mldb_post_dataset(self):
_id = 'époque'
res = mldb.post('/v1/datasets', {
'id' : _id,
'type' : 'sparse.mutable'
})
mldb.log(mldb.get('/v1/datasets'))
url = quote(('/v1/datasets/' + _id).encode('utf8'))
mldb.log(url)
res = mldb.get(url).json()
self.assertEqual(res['id'], _id)
def test_mldb_put_dataset(self):
_id = 'épopée'
url = quote('/v1/datasets/' + _id)
mldb.log(url)
mldb.put(url, {
'type' : 'sparse.mutable'
})
res = mldb.get(url).json()
self.assertEqual(res['id'], _id)
def test_name_with_slash(self):
_id = "name/with/slash"
url = '/v1/datasets/' + quote(_id, safe='')
mldb.log(url)
mldb.put(url, {
'type' : 'sparse.mutable'
})
res = mldb.get(url).json()
self.assertEqual(res['id'], _id)
def test_name_with_space(self):
_id = "name with space"
url = '/v1/datasets/' + quote(_id)
mldb.log(url)
mldb.put(url, {
'type' : 'sparse.mutable'
})
res = mldb.get(url).json()
self.assertEqual(res['id'], _id)
def execute_sequence(self, _id):
url = '/v1/datasets/' + quote(_id, safe='')
mldb.log(url)
res = mldb.put(url, {
'type' : 'sparse.mutable'
})
res = mldb.get(res.headers['Location']).json()
self.assertEqual(res['id'], _id)
res = mldb.get(url).json()
self.assertEqual(res['id'], _id)
mldb.delete(url)
with self.assertMldbRaises(status_code=404):
mldb.get(url)
res = mldb.post('/v1/datasets', {
'id' : _id,
'type' : 'sparse.mutable'
})
res = mldb.get(res.headers['Location']).json()
self.assertEqual(res['id'], _id)
res = mldb.get(url).json()
self.assertEqual(res['id'], _id)
mldb.delete(url)
with self.assertMldbRaises(status_code=404):
mldb.get(url)
def test_cedille(self):
self.execute_sequence('françois')
def test_cedille_and_slash(self):
self.execute_sequence('françois/michel')
def test_cedille_and_whitespace(self):
self.execute_sequence('françois michel')
def test_cedille_whitespace_slash_question_mark(self):
self.execute_sequence('"françois says hello/goodbye, eh?"')
def test_plus_sign(self):
self.execute_sequence('"a+b"')
mldb.post('/v1/datasets', {
'id' : 'a+b',
'type' : 'sparse.mutable'
})
mldb.get('/v1/datasets/a+b').json()
def test_extra_5(self):
mldb.put('/v1/datasets/ds', {
'type' : 'sparse.mutable'
})
mldb.post('/v1/datasets/ds/commit')
mldb.put('/v1/procedures/' + quote('françois'), {
'type' : 'transform',
'params' : {
'inputData' : 'SELECT * FROM ds',
'outputDataset' : {
'id' : 'outDs',
'type' : 'sparse.mutable'
}
}
})
url = '/v1/procedures/' + quote('françois') + '/runs/' \
+ quote('michêl')
mldb.put(url)
mldb.get(url)
def test_select_over_utf8_dataset_name(self):
_id = "hellô"
mldb.create_dataset({
"id": _id,
"type": "embedding"
})
result = mldb.get("/v1/query", q="select * from \"hellô\"")
mldb.log(result.text)
result = mldb.get("/v1/datasets")
mldb.log(result.text)
result = mldb.put("/v1/datasets/hôwdy", {"type": 'embedding'})
mldb.log(result.text)
result = mldb.get("/v1/datasets")
mldb.log(result.text)
result = mldb.post("/v1/datasets", {
"id": "hî",
"type": 'embedding'
})
mldb.log(result.text)
result = mldb.get("/v1/datasets")
mldb.log(result.text)
def test_slash_dataset(self):
ds = mldb.create_dataset({
'id' : 's/lash',
'type' : 'sparse.mutable'
})
ds.commit()
mldb.log(mldb.get('/v1/query', q='SELECT * FROM "s/lash"'))
if __name__ == '__main__':
mldb.run_tests()
|
mldbai/mldb
|
testing/MLDB-558-python-unicode.py
|
Python
|
apache-2.0
| 5,796
|
"""Pytentd test suite"""
from flask import current_app, g
from py.test import main
from tentd.tests.http import *
from tentd.tests.mock import *
def profile_url_for(entity, _external=False):
"""Get an entity profile url without using url_for"""
base_url = 'http://{server}' if _external else ''
profile_url = {
'multiple': base_url + '/{user}',
'single': base_url,
'subdomain': 'http://{user}.{server}',
}[current_app.user_mode] + '/profile'
server = current_app.config.get('SERVER_NAME')
return profile_url.format(server=server, user=entity.name)
def response_has_link_header(response):
"""Test that a response includes an entity link header"""
link = '<{}>; rel="https://tent.io/rels/profile"'.format(
profile_url_for(g.entity, _external=True))
return response.headers['Link'] == link
if __name__ == '__main__':
main()
|
pytent/pytentd
|
tentd/tests/__init__.py
|
Python
|
apache-2.0
| 906
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import unittest
from nbformat.v4 import new_notebook
import snapshottest
exporter = importlib.import_module("exporter")
class TestExporterMethods(snapshottest.TestCase):
def setUp(self):
self.maxDiff = None
self.exporter = exporter.Exporter(100, exporter.TemplateType.BASIC)
def test_create_cell_from_args_with_no_args(self):
args = {}
cell = exporter.create_cell_from_args(args)
self.assertMatchSnapshot(cell.source)
def test_create_cell_from_args_with_one_arg(self):
args = {"source": "gs://ml-pipeline/data.csv"}
cell = exporter.create_cell_from_args(args)
self.assertMatchSnapshot(cell.source)
# Test generates html to avoid issues with Python 3.5 where dict objects
# do not retain order upon object creation. Due to this, we test that the
# provided arguments exist and equal the provided value.
def test_create_cell_from_args_with_multiple_args(self):
nb = new_notebook()
args = {
"source": "gs://ml-pipeline/data.csv",
"target_lambda": "lambda x: (x['target'] > x['fare'] * 0.2)"
}
code = [
"print(variables.get('source'))",
"print(variables.get('target_lambda'))"
]
nb.cells.append(exporter.create_cell_from_args(args))
nb.cells.append(exporter.create_cell_from_custom_code(code))
html = self.exporter.generate_html_from_notebook(nb)
self.assertMatchSnapshot(html)
def test_create_cell_from_file(self):
cell = exporter.create_cell_from_file("types/test.py")
self.assertMatchSnapshot(cell.source)
def test_create_cell_from_custom_code(self):
code = [
"x = 2",
"print(x)"
]
cell = exporter.create_cell_from_custom_code(code)
self.assertMatchSnapshot(cell.source)
# Tests to ensure output is generated for custom visualizations.
def test_generate_custom_visualization_html_from_notebook(self):
nb = new_notebook()
args = {"x": 2}
code = ["print(variables.get('x'))"]
nb.cells.append(exporter.create_cell_from_args(args))
nb.cells.append(exporter.create_cell_from_custom_code(code))
html = self.exporter.generate_html_from_notebook(nb)
self.assertMatchSnapshot(html)
if __name__ == "__main__":
unittest.main()
|
kubeflow/kfp-tekton-backend
|
backend/src/apiserver/visualization/test_exporter.py
|
Python
|
apache-2.0
| 2,980
|
"""
"""
from crispy_forms.layout import Layout, Row, Column
from django.utils.functional import cached_property
from django.utils.translation import ugettext_lazy as _
from fluentcms_contactform import appsettings
from fluentcms_contactform.forms.base import AbstractContactForm, ContactFormHelper, SubmitButton
from fluentcms_contactform.models import ContactFormData
class CompactContactForm(AbstractContactForm):
"""
A form with a very compact layout;
all the name/email/phone fields are displayed in a single top row.
It uses Bootstrap 3 layout by default to generate the columns.
For improved appearance, disable the "subject" line too using::
FLUENTCMS_CONTACTFORM_DEFAULT_FIELDS = ('name', 'email', 'phone_number', 'message')
"""
top_row_fields = appsettings.FLUENTCMS_CONTACTFORM_COMPACT_FIELDS
top_row_columns = appsettings.FLUENTCMS_CONTACTFORM_COMPACT_GRID_SIZE
top_column_class = appsettings.FLUENTCMS_CONTACTFORM_COMPACT_COLUMN_CSS_CLASS
class Meta:
model = ContactFormData
fields = appsettings.FLUENTCMS_CONTACTFORM_DEFAULT_FIELDS
@cached_property
def helper(self):
# As extra service, auto-adjust the layout based on the project settings.
# This allows defining the top-row, and still get either 2 or 3 columns
top_fields = [name for name in self.fields.keys() if name in self.top_row_fields]
other_fields = [name for name in self.fields.keys() if name not in self.top_row_fields]
col_size = int(self.top_row_columns / len(top_fields))
col_class = self.top_column_class.format(size=col_size)
helper = ContactFormHelper()
helper.form_class = 'form-vertical contactform-compact'
helper.label_class = 'sr-only'
helper.field_class = ''
helper.layout = Layout(
Row(*[Column(name, css_class=col_class) for name in top_fields]),
*other_fields
)
helper.add_input(SubmitButton())
return helper
def __init__(self, *args, **kwargs):
super(CompactContactForm, self).__init__(*args, **kwargs)
if 'phone_number' in self.fields:
self.fields['phone_number'].label = _("Phone (optional)")
for field in self.fields.values():
field.widget.attrs['placeholder'] = u"{0}".format(field.label)
|
edoburu/fluentcms-contactform
|
fluentcms_contactform/forms/compact.py
|
Python
|
apache-2.0
| 2,356
|
from __future__ import print_function
import hashlib
from calendar import timegm
from datetime import datetime
from builtins import int
import fake_gen
def now():
return timegm(datetime.utcnow().utctimetuple())
class PayloadFactory(fake_gen.DictFactory):
iat = fake_gen.Constant(now())
exp = fake_gen.RandomInteger(int(now() + 15), int(now() * 2))
iss = fake_gen.Constant(u'aap.ebi.ac.uk')
sub = fake_gen.Sum([fake_gen.Constant(u'usr-'),
fake_gen.HashHexDigestFactory(hashlib.md5)])
email = fake_gen.FakeDataFactory(u'email')
name = fake_gen.FakeDataFactory(u'name')
nickname = fake_gen.HashHexDigestFactory(hashlib.sha256)
domains = fake_gen.Constant([])
payloadValidity = [
(u'There is absolutely no cause for alarm',
PayloadFactory(),
True),
(u'Expired',
PayloadFactory(
iat=fake_gen.RandomInteger(0, now() - 3600),
exp=fake_gen.RandomInteger(now() - 3600, now() - 1)
), False),
(u'No expiration',
PayloadFactory(
exp=fake_gen.Constant(None)
), False),
# Standard says iat should be a number, shouldn't care when it's issued
# yay for sanity checks, I guess
(u'Back to the future',
PayloadFactory(
iat=fake_gen.RandomInteger(now() + 3600, now() * 2),
exp=fake_gen.RandomInteger(now() * 2 + 1, now() * 3)
), True),
(u'No issue time',
PayloadFactory(
iat=fake_gen.Constant(None)
), False),
(u'Untrusted issuer',
PayloadFactory(
iss=fake_gen.FakeDataFactory(u'address')
), True),
(u'Untrusted issuer',
PayloadFactory(
iss=fake_gen.Constant(None)
), True),
(u'No subject',
PayloadFactory(
sub=fake_gen.Constant(None)
), False),
(u'No email',
PayloadFactory(
email=fake_gen.Constant(None)
), False),
(u'No name',
PayloadFactory(
name=fake_gen.Constant(None)
), False),
(u'No nickname',
PayloadFactory(
nickname=fake_gen.Constant(None)
), False),
]
validPayloads = [(name, generator) for (name, generator, valid)
in payloadValidity if valid]
invalidPayloads = [(name, generator) for (name, generator, valid)
in payloadValidity if not valid]
if __name__ == u'__main__':
for payload in PayloadFactory().generate(10):
print(payload)
|
EMBL-EBI-TSI/aap-client-python
|
tests/payload_gen.py
|
Python
|
apache-2.0
| 2,438
|
"""Tests for evaluating Celtic initial consonant mutations."""
import unittest
import normalize_breton_lib
class TestStringMethods(unittest.TestCase):
"""Tests for evaluating initial consonant mutations."""
def test_normalize_breton(self):
'Test the output of normalize_breton.'
with open("testdata/bre_normalized_sentences.tsv", "r") as test_file:
test_cases = test_file.readlines()[1:]
for sentence in test_cases:
with self.subTest(sentence=sentence):
test_case = sentence.strip().split("\t")[1]
expected = sentence.strip().split("\t")[2]
test_fst = normalize_breton_lib.normalize_breton(test_case)
self.assertEqual(test_fst, expected)
def test_normalize_breton_soft_mutation(self):
'Test the Breton soft mutation.'
for test in [(("Div plac'h", "div blac'h"),
("DA TRA", "da dra"),
("da Kemper", "da gemper"),
("da gwin", "da win"),
("pe mamm", "pe vamm"))]:
for test_case, expected in test:
with self.subTest(test_case=test_case):
normalized_text = (
normalize_breton_lib.apply_single_mutation(
test_case, "soft"))
self.assertEqual(normalized_text, expected)
def test_normalize_breton_soft_mutation_no_mutation(self):
'Test the Breton soft mutation on words that should not mutate'
for test in [(("bara kozh", "bara kozh"),
("Bara ha kig", "bara ha kig"))]:
for test_case, expected in test:
with self.subTest(test_case=test_case):
normalized_text = (
normalize_breton_lib.apply_single_mutation(
test_case, "soft"))
self.assertEqual(normalized_text, expected)
def test_normalize_breton_hard_mutation(self):
'Test the Breton hard mutation.'
for test in [(("da'z bag", "da'z pag"),
('ho geriadur', 'ho keriadur'),
("ho Gwenn-ha-Du", "ho kwenn-ha-du"))]:
for test_case, expected in test:
with self.subTest(test_case=test_case):
normalized_text = (
normalize_breton_lib.apply_single_mutation(
test_case, "hard"))
self.assertEqual(normalized_text, expected)
def test_normalize_breton_spirant_mutation(self):
'Test the Breton spirant mutation'
for test in [(('tri pesk', 'tri fesk'),
('Hon tad', 'hon zad'),
('nav ki', "nav c'hi"))]:
for test_case, expected in test:
with self.subTest(test_case=test_case):
normalized_text = (
normalize_breton_lib.apply_single_mutation(
test_case, "spirant"))
self.assertEqual(normalized_text, expected)
if __name__ == '__main__':
unittest.main()
|
googleinterns/text-norm-for-low-resource-languages
|
starter_project/normalize_breton_test.py
|
Python
|
apache-2.0
| 3,173
|
"""
.. module:: lm_lstm_crf
:synopsis: lm_lstm_crf
.. moduleauthor:: Liyuan Liu
"""
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.optim as optim
import numpy as np
import model.crf as crf
import model.utils as utils
import model.highway as highway
class LM_LSTM_CRF(nn.Module):
"""LM_LSTM_CRF model
args:
tagset_size: size of label set
char_size: size of char dictionary
char_dim: size of char embedding
char_hidden_dim: size of char-level lstm hidden dim
char_rnn_layers: number of char-level lstm layers
embedding_dim: size of word embedding
word_hidden_dim: size of word-level blstm hidden dim
word_rnn_layers: number of word-level lstm layers
vocab_size: size of word dictionary
dropout_ratio: dropout ratio
large_CRF: use CRF_L or not, refer model.crf.CRF_L and model.crf.CRF_S for more details
if_highway: use highway layers or not
in_doc_words: number of words that occurred in the corpus (used for language model prediction)
highway_layers: number of highway layers
"""
def __init__(self, tagset_size, char_size, char_dim, char_hidden_dim, char_rnn_layers, embedding_dim, word_hidden_dim, word_rnn_layers, vocab_size, dropout_ratio, large_CRF=True, if_highway = False, in_doc_words = 2, highway_layers = 1):
super(LM_LSTM_CRF, self).__init__()
self.char_dim = char_dim
self.char_hidden_dim = char_hidden_dim
self.char_size = char_size
self.word_dim = embedding_dim
self.word_hidden_dim = word_hidden_dim
self.word_size = vocab_size
self.if_highway = if_highway
self.char_embeds = nn.Embedding(char_size, char_dim)
self.forw_char_lstm = nn.LSTM(char_dim, char_hidden_dim, num_layers=char_rnn_layers, bidirectional=False, dropout=dropout_ratio)
self.back_char_lstm = nn.LSTM(char_dim, char_hidden_dim, num_layers=char_rnn_layers, bidirectional=False, dropout=dropout_ratio)
self.char_rnn_layers = char_rnn_layers
self.word_embeds = nn.Embedding(vocab_size, embedding_dim)
self.word_lstm = nn.LSTM(embedding_dim + char_hidden_dim * 2, word_hidden_dim // 2, num_layers=word_rnn_layers, bidirectional=True, dropout=dropout_ratio)
self.word_rnn_layers = word_rnn_layers
self.dropout = nn.Dropout(p=dropout_ratio)
self.tagset_size = tagset_size
if large_CRF:
self.crf = crf.CRF_L(word_hidden_dim, tagset_size)
else:
self.crf = crf.CRF_S(word_hidden_dim, tagset_size)
if if_highway:
self.forw2char = highway.hw(char_hidden_dim, num_layers=highway_layers, dropout_ratio=dropout_ratio)
self.back2char = highway.hw(char_hidden_dim, num_layers=highway_layers, dropout_ratio=dropout_ratio)
self.forw2word = highway.hw(char_hidden_dim, num_layers=highway_layers, dropout_ratio=dropout_ratio)
self.back2word = highway.hw(char_hidden_dim, num_layers=highway_layers, dropout_ratio=dropout_ratio)
self.fb2char = highway.hw(2 * char_hidden_dim, num_layers=highway_layers, dropout_ratio=dropout_ratio)
self.char_pre_train_out = nn.Linear(char_hidden_dim, char_size)
self.word_pre_train_out = nn.Linear(char_hidden_dim, in_doc_words)
self.batch_size = 1
self.word_seq_length = 1
def set_batch_size(self, bsize):
"""
set batch size
"""
self.batch_size = bsize
def set_batch_seq_size(self, sentence):
"""
set batch size and sequence length
"""
tmp = sentence.size()
self.word_seq_length = tmp[0]
self.batch_size = tmp[1]
def rand_init_embedding(self):
"""
random initialize char-level embedding
"""
utils.init_embedding(self.char_embeds.weight)
def load_pretrained_word_embedding(self, pre_word_embeddings):
"""
load pre-trained word embedding
args:
pre_word_embeddings (self.word_size, self.word_dim) : pre-trained embedding
"""
assert (pre_word_embeddings.size()[1] == self.word_dim)
self.word_embeds.weight = nn.Parameter(pre_word_embeddings)
def rand_init(self, init_char_embedding=True, init_word_embedding=False):
"""
random initialization
args:
init_char_embedding: random initialize char embedding or not
init_word_embedding: random initialize word embedding or not
"""
if init_char_embedding:
utils.init_embedding(self.char_embeds.weight)
if init_word_embedding:
utils.init_embedding(self.word_embeds.weight)
if self.if_highway:
self.forw2char.rand_init()
self.back2char.rand_init()
self.forw2word.rand_init()
self.back2word.rand_init()
self.fb2char.rand_init()
utils.init_lstm(self.forw_char_lstm)
utils.init_lstm(self.back_char_lstm)
utils.init_lstm(self.word_lstm)
utils.init_linear(self.char_pre_train_out)
utils.init_linear(self.word_pre_train_out)
self.crf.rand_init()
def word_pre_train_forward(self, sentence, position, hidden=None):
"""
output of forward language model
args:
sentence (char_seq_len, batch_size): char-level representation of sentence
position (word_seq_len, batch_size): position of blank space in char-level representation of sentence
hidden: initial hidden state
return:
language model output (word_seq_len, in_doc_word), hidden
"""
embeds = self.char_embeds(sentence)
d_embeds = self.dropout(embeds)
lstm_out, hidden = self.forw_char_lstm(d_embeds)
tmpsize = position.size()
position = position.unsqueeze(2).expand(tmpsize[0], tmpsize[1], self.char_hidden_dim)
select_lstm_out = torch.gather(lstm_out, 0, position)
d_lstm_out = self.dropout(select_lstm_out).view(-1, self.char_hidden_dim)
if self.if_highway:
char_out = self.forw2word(d_lstm_out)
d_char_out = self.dropout(char_out)
else:
d_char_out = d_lstm_out
pre_score = self.word_pre_train_out(d_char_out)
return pre_score, hidden
def word_pre_train_backward(self, sentence, position, hidden=None):
"""
output of backward language model
args:
sentence (char_seq_len, batch_size): char-level representation of sentence (inverse order)
position (word_seq_len, batch_size): position of blank space in inversed char-level representation of sentence
hidden: initial hidden state
return:
language model output (word_seq_len, in_doc_word), hidden
"""
embeds = self.char_embeds(sentence)
d_embeds = self.dropout(embeds)
lstm_out, hidden = self.back_char_lstm(d_embeds)
tmpsize = position.size()
position = position.unsqueeze(2).expand(tmpsize[0], tmpsize[1], self.char_hidden_dim)
select_lstm_out = torch.gather(lstm_out, 0, position)
d_lstm_out = self.dropout(select_lstm_out).view(-1, self.char_hidden_dim)
if self.if_highway:
char_out = self.back2word(d_lstm_out)
d_char_out = self.dropout(char_out)
else:
d_char_out = d_lstm_out
pre_score = self.word_pre_train_out(d_char_out)
return pre_score, hidden
def forward(self, forw_sentence, forw_position, back_sentence, back_position, word_seq, hidden=None):
'''
args:
forw_sentence (char_seq_len, batch_size) : char-level representation of sentence
forw_position (word_seq_len, batch_size) : position of blank space in char-level representation of sentence
back_sentence (char_seq_len, batch_size) : char-level representation of sentence (inverse order)
back_position (word_seq_len, batch_size) : position of blank space in inversed char-level representation of sentence
word_seq (word_seq_len, batch_size) : word-level representation of sentence
hidden: initial hidden state
return:
crf output (word_seq_len, batch_size, tag_size, tag_size), hidden
'''
self.set_batch_seq_size(forw_position)
#embedding layer
forw_emb = self.char_embeds(forw_sentence)
back_emb = self.char_embeds(back_sentence)
#dropout
d_f_emb = self.dropout(forw_emb)
d_b_emb = self.dropout(back_emb)
#forward the whole sequence
forw_lstm_out, _ = self.forw_char_lstm(d_f_emb)#seq_len_char * batch * char_hidden_dim
back_lstm_out, _ = self.back_char_lstm(d_b_emb)#seq_len_char * batch * char_hidden_dim
#select predict point
forw_position = forw_position.unsqueeze(2).expand(self.word_seq_length, self.batch_size, self.char_hidden_dim)
select_forw_lstm_out = torch.gather(forw_lstm_out, 0, forw_position)
back_position = back_position.unsqueeze(2).expand(self.word_seq_length, self.batch_size, self.char_hidden_dim)
select_back_lstm_out = torch.gather(back_lstm_out, 0, back_position)
fb_lstm_out = self.dropout(torch.cat((select_forw_lstm_out, select_back_lstm_out), dim=2))
if self.if_highway:
char_out = self.fb2char(fb_lstm_out)
d_char_out = self.dropout(char_out)
else:
d_char_out = fb_lstm_out
#word
word_emb = self.word_embeds(word_seq)
d_word_emb = self.dropout(word_emb)
#combine
word_input = torch.cat((d_word_emb, d_char_out), dim = 2)
#word level lstm
lstm_out, _ = self.word_lstm(word_input)
d_lstm_out = self.dropout(lstm_out)
#convert to crf
crf_out = self.crf(d_lstm_out)
crf_out = crf_out.view(self.word_seq_length, self.batch_size, self.tagset_size, self.tagset_size)
return crf_out
|
LiyuanLucasLiu/LM-LSTM-CRF
|
model/lm_lstm_crf.py
|
Python
|
apache-2.0
| 10,371
|
#!/usr/bin/env python
"""
Integration (not unit) tests for pylast.py
"""
import time
import unittest
import pylast
from .test_pylast import TestPyLastWithLastFm
class TestPyLastNetwork(TestPyLastWithLastFm):
def test_scrobble(self):
# Arrange
artist = "test artist"
title = "test title"
timestamp = self.unix_timestamp()
lastfm_user = self.network.get_user(self.username)
# Act
self.network.scrobble(artist=artist, title=title, timestamp=timestamp)
# Assert
# limit=2 to ignore now-playing:
last_scrobble = lastfm_user.get_recent_tracks(limit=2)[0]
self.assertEqual(str(last_scrobble.track.artist).lower(), artist)
self.assertEqual(str(last_scrobble.track.title).lower(), title)
self.assertEqual(str(last_scrobble.timestamp), str(timestamp))
def test_update_now_playing(self):
# Arrange
artist = "Test Artist"
title = "test title"
album = "Test Album"
track_number = 1
lastfm_user = self.network.get_user(self.username)
# Act
self.network.update_now_playing(
artist=artist, title=title, album=album, track_number=track_number
)
# Assert
current_track = lastfm_user.get_now_playing()
self.assertIsNotNone(current_track)
self.assertEqual(str(current_track.title).lower(), "test title")
self.assertEqual(str(current_track.artist).lower(), "test artist")
def test_enable_rate_limiting(self):
# Arrange
self.assertFalse(self.network.is_rate_limited())
# Act
self.network.enable_rate_limit()
then = time.time()
# Make some network call, limit not applied first time
self.network.get_user(self.username)
# Make a second network call, limiting should be applied
self.network.get_top_artists()
now = time.time()
# Assert
self.assertTrue(self.network.is_rate_limited())
self.assertGreaterEqual(now - then, 0.2)
def test_disable_rate_limiting(self):
# Arrange
self.network.enable_rate_limit()
self.assertTrue(self.network.is_rate_limited())
# Act
self.network.disable_rate_limit()
# Make some network call, limit not applied first time
self.network.get_user(self.username)
# Make a second network call, limiting should be applied
self.network.get_top_artists()
# Assert
self.assertFalse(self.network.is_rate_limited())
def test_lastfm_network_name(self):
# Act
name = str(self.network)
# Assert
self.assertEqual(name, "Last.fm Network")
def test_geo_get_top_artists(self):
# Arrange
# Act
artists = self.network.get_geo_top_artists(country="United Kingdom", limit=1)
# Assert
self.assertEqual(len(artists), 1)
self.assertIsInstance(artists[0], pylast.TopItem)
self.assertIsInstance(artists[0].item, pylast.Artist)
def test_geo_get_top_tracks(self):
# Arrange
# Act
tracks = self.network.get_geo_top_tracks(
country="United Kingdom", location="Manchester", limit=1
)
# Assert
self.assertEqual(len(tracks), 1)
self.assertIsInstance(tracks[0], pylast.TopItem)
self.assertIsInstance(tracks[0].item, pylast.Track)
def test_network_get_top_artists_with_limit(self):
# Arrange
# Act
artists = self.network.get_top_artists(limit=1)
# Assert
self.helper_only_one_thing_in_top_list(artists, pylast.Artist)
def test_network_get_top_tags_with_limit(self):
# Arrange
# Act
tags = self.network.get_top_tags(limit=1)
# Assert
self.helper_only_one_thing_in_top_list(tags, pylast.Tag)
def test_network_get_top_tags_with_no_limit(self):
# Arrange
# Act
tags = self.network.get_top_tags()
# Assert
self.helper_at_least_one_thing_in_top_list(tags, pylast.Tag)
def test_network_get_top_tracks_with_limit(self):
# Arrange
# Act
tracks = self.network.get_top_tracks(limit=1)
# Assert
self.helper_only_one_thing_in_top_list(tracks, pylast.Track)
def test_country_top_tracks(self):
# Arrange
country = self.network.get_country("Croatia")
# Act
things = country.get_top_tracks(limit=2)
# Assert
self.helper_two_different_things_in_top_list(things, pylast.Track)
def test_country_network_top_tracks(self):
# Arrange
# Act
things = self.network.get_geo_top_tracks("Croatia", limit=2)
# Assert
self.helper_two_different_things_in_top_list(things, pylast.Track)
def test_tag_top_tracks(self):
# Arrange
tag = self.network.get_tag("blues")
# Act
things = tag.get_top_tracks(limit=2)
# Assert
self.helper_two_different_things_in_top_list(things, pylast.Track)
def test_album_data(self):
# Arrange
thing = self.network.get_album("Test Artist", "Test Album")
# Act
stringed = str(thing)
rep = thing.__repr__()
title = thing.get_title()
name = thing.get_name()
playcount = thing.get_playcount()
url = thing.get_url()
# Assert
self.assertEqual(stringed, "Test Artist - Test Album")
self.assertIn("pylast.Album('Test Artist', 'Test Album',", rep)
self.assertEqual(title, name)
self.assertIsInstance(playcount, int)
self.assertGreater(playcount, 1)
self.assertEqual("https://www.last.fm/music/test%2bartist/test%2balbum", url)
def test_track_data(self):
# Arrange
thing = self.network.get_track("Test Artist", "test title")
# Act
stringed = str(thing)
rep = thing.__repr__()
title = thing.get_title()
name = thing.get_name()
playcount = thing.get_playcount()
url = thing.get_url(pylast.DOMAIN_FRENCH)
# Assert
self.assertEqual(stringed, "Test Artist - test title")
self.assertIn("pylast.Track('Test Artist', 'test title',", rep)
self.assertEqual(title, "test title")
self.assertEqual(title, name)
self.assertIsInstance(playcount, int)
self.assertGreater(playcount, 1)
self.assertEqual(
"https://www.last.fm/fr/music/test%2bartist/_/test%2btitle", url
)
def test_country_top_artists(self):
# Arrange
country = self.network.get_country("Ukraine")
# Act
artists = country.get_top_artists(limit=1)
# Assert
self.helper_only_one_thing_in_top_list(artists, pylast.Artist)
def test_caching(self):
# Arrange
user = self.network.get_user("RJ")
# Act
self.network.enable_caching()
tags1 = user.get_top_tags(limit=1, cacheable=True)
tags2 = user.get_top_tags(limit=1, cacheable=True)
# Assert
self.assertTrue(self.network.is_caching_enabled())
self.assertEqual(tags1, tags2)
self.network.disable_caching()
self.assertFalse(self.network.is_caching_enabled())
def test_album_mbid(self):
# Arrange
mbid = "a6a265bf-9f81-4055-8224-f7ac0aa6b937"
# Act
album = self.network.get_album_by_mbid(mbid)
album_mbid = album.get_mbid()
# Assert
self.assertIsInstance(album, pylast.Album)
self.assertEqual(album.title.lower(), "test")
self.assertEqual(album_mbid, mbid)
def test_artist_mbid(self):
# Arrange
mbid = "7e84f845-ac16-41fe-9ff8-df12eb32af55"
# Act
artist = self.network.get_artist_by_mbid(mbid)
# Assert
self.assertIsInstance(artist, pylast.Artist)
self.assertEqual(artist.name, "MusicBrainz Test Artist")
def test_track_mbid(self):
# Arrange
mbid = "ebc037b1-cc9c-44f2-a21f-83c219f0e1e0"
# Act
track = self.network.get_track_by_mbid(mbid)
track_mbid = track.get_mbid()
# Assert
self.assertIsInstance(track, pylast.Track)
self.assertEqual(track.title, "first")
self.assertEqual(track_mbid, mbid)
def test_init_with_token(self):
# Arrange/Act
msg = None
try:
pylast.LastFMNetwork(
api_key=self.__class__.secrets["api_key"],
api_secret=self.__class__.secrets["api_secret"],
token="invalid",
)
except pylast.WSError as exc:
msg = str(exc)
# Assert
self.assertEqual(msg, "Unauthorized Token - This token has not been issued")
def test_proxy(self):
# Arrange
host = "https://example.com"
port = 1234
# Act / Assert
self.network.enable_proxy(host, port)
self.assertTrue(self.network.is_proxy_enabled())
self.assertEqual(self.network._get_proxy(), ["https://example.com", 1234])
self.network.disable_proxy()
self.assertFalse(self.network.is_proxy_enabled())
def test_album_search(self):
# Arrange
album = "Nevermind"
# Act
search = self.network.search_for_album(album)
results = search.get_next_page()
# Assert
self.assertIsInstance(results, list)
self.assertIsInstance(results[0], pylast.Album)
def test_album_search_images(self):
# Arrange
album = "Nevermind"
search = self.network.search_for_album(album)
# Act
results = search.get_next_page()
images = results[0].info["image"]
# Assert
self.assertEqual(len(images), 4)
self.assert_startswith(images[pylast.SIZE_SMALL], "https://")
self.assert_endswith(images[pylast.SIZE_SMALL], ".png")
self.assertIn("/34s/", images[pylast.SIZE_SMALL])
self.assert_startswith(images[pylast.SIZE_EXTRA_LARGE], "https://")
self.assert_endswith(images[pylast.SIZE_EXTRA_LARGE], ".png")
self.assertIn("/300x300/", images[pylast.SIZE_EXTRA_LARGE])
def test_artist_search(self):
# Arrange
artist = "Nirvana"
# Act
search = self.network.search_for_artist(artist)
results = search.get_next_page()
# Assert
self.assertIsInstance(results, list)
self.assertIsInstance(results[0], pylast.Artist)
def test_artist_search_images(self):
# Arrange
artist = "Nirvana"
search = self.network.search_for_artist(artist)
# Act
results = search.get_next_page()
images = results[0].info["image"]
# Assert
self.assertEqual(len(images), 5)
self.assert_startswith(images[pylast.SIZE_SMALL], "https://")
self.assert_endswith(images[pylast.SIZE_SMALL], ".png")
self.assertIn("/34s/", images[pylast.SIZE_SMALL])
self.assert_startswith(images[pylast.SIZE_EXTRA_LARGE], "https://")
self.assert_endswith(images[pylast.SIZE_EXTRA_LARGE], ".png")
self.assertIn("/300x300/", images[pylast.SIZE_EXTRA_LARGE])
def test_track_search(self):
# Arrange
artist = "Nirvana"
track = "Smells Like Teen Spirit"
# Act
search = self.network.search_for_track(artist, track)
results = search.get_next_page()
# Assert
self.assertIsInstance(results, list)
self.assertIsInstance(results[0], pylast.Track)
def test_track_search_images(self):
# Arrange
artist = "Nirvana"
track = "Smells Like Teen Spirit"
search = self.network.search_for_track(artist, track)
# Act
results = search.get_next_page()
images = results[0].info["image"]
# Assert
self.assertEqual(len(images), 4)
self.assert_startswith(images[pylast.SIZE_SMALL], "https://")
self.assert_endswith(images[pylast.SIZE_SMALL], ".png")
self.assertIn("/34s/", images[pylast.SIZE_SMALL])
self.assert_startswith(images[pylast.SIZE_EXTRA_LARGE], "https://")
self.assert_endswith(images[pylast.SIZE_EXTRA_LARGE], ".png")
self.assertIn("/300x300/", images[pylast.SIZE_EXTRA_LARGE])
def test_search_get_total_result_count(self):
# Arrange
artist = "Nirvana"
track = "Smells Like Teen Spirit"
search = self.network.search_for_track(artist, track)
# Act
total = search.get_total_result_count()
# Assert
self.assertGreater(int(total), 10000)
if __name__ == "__main__":
unittest.main(failfast=True)
|
hugovk/pylast
|
tests/test_network.py
|
Python
|
apache-2.0
| 12,784
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
from _pyio import __metaclass__
'''
接口定义和规范设计
抽象类加抽象方法就叫接口
'''
from abc import ABCMeta, abstractmethod #下面的定义接口是的固定设计
class Alert:
__metaclass__ = ABCMeta
@abstractmethod #指定接口的方法,在继承接口是必须定义这个方法,要不报错
def Send(self):
pass
class Weixin(Alert): #继承接口类
def __init__(self):
print '__init__'
def Send(self): #必须定义接口中定义的方法名称,保证程序不报错和开发的规范
print 'send.weixin'
f=Weixin()
f.Send()
|
zhangyage/Python-oldboy
|
day04/oop/class_study10.py
|
Python
|
apache-2.0
| 711
|
# coding=utf-8
# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
from pants.backend.jvm.targets.jvm_target import JvmTarget
class JavaProtobufLibrary(JvmTarget):
"""A Java library generated from Protocol Buffer IDL files."""
|
twitter/pants
|
src/python/pants/backend/codegen/protobuf/java/java_protobuf_library.py
|
Python
|
apache-2.0
| 395
|
"""
Integrate with namecheap DNS services.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/namecheapdns/
"""
import logging
from datetime import timedelta
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.const import CONF_HOST, CONF_PASSWORD, CONF_DOMAIN
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.helpers.aiohttp_client import async_get_clientsession
REQUIREMENTS = ['defusedxml==0.5.0']
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'namecheapdns'
INTERVAL = timedelta(minutes=5)
UPDATE_URL = 'https://dynamicdns.park-your-domain.com/update'
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_DOMAIN): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_HOST, default='@'): cv.string,
})
}, extra=vol.ALLOW_EXTRA)
async def async_setup(hass, config):
"""Initialize the namecheap DNS component."""
host = config[DOMAIN][CONF_HOST]
domain = config[DOMAIN][CONF_DOMAIN]
password = config[DOMAIN][CONF_PASSWORD]
session = async_get_clientsession(hass)
result = await _update_namecheapdns(session, host, domain, password)
if not result:
return False
async def update_domain_interval(now):
"""Update the namecheap DNS entry."""
await _update_namecheapdns(session, host, domain, password)
async_track_time_interval(hass, update_domain_interval, INTERVAL)
return result
async def _update_namecheapdns(session, host, domain, password):
"""Update namecheap DNS entry."""
import defusedxml.ElementTree as ET
params = {
'host': host,
'domain': domain,
'password': password,
}
resp = await session.get(UPDATE_URL, params=params)
xml_string = await resp.text()
root = ET.fromstring(xml_string)
err_count = root.find('ErrCount').text
if int(err_count) != 0:
_LOGGER.warning("Updating namecheap domain failed: %s", domain)
return False
return True
|
PetePriority/home-assistant
|
homeassistant/components/namecheapdns/__init__.py
|
Python
|
apache-2.0
| 2,123
|
# Lint as: python3
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Store points in time to be used for metrics."""
import typing
from typing import Dict, Optional
from glazier.lib import gtime
if typing.TYPE_CHECKING:
import datetime
class Timers(object):
"""Store named time elements."""
def __init__(self):
self._time_store = {}
def Get(self, name: str) -> Optional['datetime.datetime']:
"""Get the stored value of a single timer.
Args:
name: The name of the timer being requested.
Returns:
A specific named datetime value if stored, or None
"""
if name in self._time_store:
return self._time_store[name]
return None
def GetAll(self) -> Dict[str, 'datetime.datetime']:
"""Get the dictionary of all stored timers.
Returns:
A dictionary of all stored timer names and values.
"""
return self._time_store
def Set(self, name: str, at_time: Optional['datetime.datetime'] = None):
"""Set a timer at a specific time.
Defaults to the current time in UTC.
Args:
name: Name of the timer being set.
at_time: A predetermined time value to store.
"""
if at_time:
self._time_store[name] = at_time
else:
self._time_store[name] = gtime.now()
|
google/glazier
|
glazier/lib/timers.py
|
Python
|
apache-2.0
| 1,820
|
#!/usr/bin/env python
"""
Module defining the LINE-MOD detector to find objects in a scene
"""
from object_recognition_core.pipelines.detection import DetectorBase
import ecto_cells.ecto_linemod as ecto_linemod
########################################################################################################################
class LinemodDetector(ecto_linemod.Detector, DetectorBase):
def __init__(self, *args, **kwargs):
ecto_linemod.Detector.__init__(self, *args, **kwargs)
DetectorBase.__init__(self)
|
WalkingMachine/sara_commun
|
wm_ork/linemod/python/object_recognition_linemod/detector.py
|
Python
|
apache-2.0
| 535
|
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Oliver J. Backhouse <olbackhouse@gmail.com>
# George H. Booth <george.booth@kcl.ac.uk>
#
'''
Auxiliary second-order Green's function perturbation theory for
arbitrary moment consistency
'''
import numpy as np
from pyscf import lib
from pyscf.lib import logger
from pyscf import __config__
from pyscf.agf2 import ragf2
from pyscf.agf2 import aux_space as aux
def build_se_part(agf2, eri, gf_occ, gf_vir, os_factor=1.0, ss_factor=1.0):
''' Builds either the auxiliaries of the occupied self-energy,
or virtual if :attr:`gf_occ` and :attr:`gf_vir` are swapped.
Args:
eri : _ChemistsERIs
Electronic repulsion integrals
gf_occ : GreensFunction
Occupied Green's function
gf_vir : GreensFunction
Virtual Green's function
Kwargs:
os_factor : float
Opposite-spin factor for spin-component-scaled (SCS)
calculations. Default 1.0
ss_factor : float
Same-spin factor for spin-component-scaled (SCS)
calculations. Default 1.0
Returns:
:class:`SelfEnergy`
'''
cput0 = (logger.process_clock(), logger.perf_counter())
log = logger.Logger(agf2.stdout, agf2.verbose)
assert type(gf_occ) is aux.GreensFunction
assert type(gf_vir) is aux.GreensFunction
nmo = agf2.nmo
nocc = gf_occ.naux
nvir = gf_vir.naux
naux = nocc * nocc * nvir
tol = agf2.weight_tol
if not (agf2.frozen is None or agf2.frozen == 0):
mask = ragf2.get_frozen_mask(agf2)
nmo -= np.sum(~mask)
e = np.zeros((naux))
v = np.zeros((nmo, naux))
fpos = np.sqrt(0.5 * os_factor)
fneg = np.sqrt(0.5 * os_factor + ss_factor)
fdia = np.sqrt(os_factor)
eja = lib.direct_sum('j,a->ja', gf_occ.energy, -gf_vir.energy)
coeffs = (gf_occ.coupling, gf_occ.coupling, gf_vir.coupling)
qeri = _make_qmo_eris_incore(agf2, eri, coeffs)
p1 = 0
for i in range(nocc):
xija = qeri[:,i,:i].reshape(nmo, -1)
xjia = qeri[:,:i,i].reshape(nmo, -1)
xiia = qeri[:,i,i].reshape(nmo, -1)
eija = gf_occ.energy[i] + eja[:i+1]
p0, p1 = p1, p1 + i*nvir
e[p0:p1] = eija[:i].ravel()
v[:,p0:p1] = fneg * (xija - xjia)
p0, p1 = p1, p1 + i*nvir
e[p0:p1] = eija[:i].ravel()
v[:,p0:p1] = fpos * (xija + xjia)
p0, p1 = p1, p1 + nvir
e[p0:p1] = eija[i].ravel()
v[:,p0:p1] = fdia * xiia
se = aux.SelfEnergy(e, v, chempot=gf_occ.chempot)
se.remove_uncoupled(tol=tol)
if not (agf2.frozen is None or agf2.frozen == 0):
coupling = np.zeros((agf2.nmo, se.naux))
coupling[mask] = se.coupling
se = aux.SelfEnergy(se.energy, coupling, chempot=se.chempot)
log.timer('se part', *cput0)
return se
class RAGF2(ragf2.RAGF2):
''' Restricted AGF2 with canonical HF reference for arbitrary
moment consistency
Attributes:
nmom : tuple of int
Compression level of the Green's function and
self-energy, respectively
verbose : int
Print level. Default value equals to :class:`Mole.verbose`
max_memory : float or int
Allowed memory in MB. Default value equals to :class:`Mole.max_memory`
conv_tol : float
Convergence threshold for AGF2 energy. Default value is 1e-7
conv_tol_rdm1 : float
Convergence threshold for first-order reduced density matrix.
Default value is 1e-8.
conv_tol_nelec : float
Convergence threshold for the number of electrons. Default
value is 1e-6.
max_cycle : int
Maximum number of AGF2 iterations. Default value is 50.
max_cycle_outer : int
Maximum number of outer Fock loop iterations. Default
value is 20.
max_cycle_inner : int
Maximum number of inner Fock loop iterations. Default
value is 50.
weight_tol : float
Threshold in spectral weight of auxiliaries to be considered
zero. Default 1e-11.
fock_diis_space : int
DIIS space size for Fock loop iterations. Default value is 6.
fock_diis_min_space :
Minimum space of DIIS. Default value is 1.
os_factor : float
Opposite-spin factor for spin-component-scaled (SCS)
calculations. Default 1.0
ss_factor : float
Same-spin factor for spin-component-scaled (SCS)
calculations. Default 1.0
damping : float
Damping factor for the self-energy. Default value is 0.0
Saved results
e_corr : float
AGF2 correlation energy
e_tot : float
Total energy (HF + correlation)
e_1b : float
One-body part of :attr:`e_tot`
e_2b : float
Two-body part of :attr:`e_tot`
e_init : float
Initial correlation energy (truncated MP2)
converged : bool
Whether convergence was successful
se : SelfEnergy
Auxiliaries of the self-energy
gf : GreensFunction
Auxiliaries of the Green's function
'''
def __init__(self, mf, nmom=(None,0), frozen=None, mo_energy=None, mo_coeff=None, mo_occ=None):
ragf2.RAGF2.__init__(self, mf, frozen=frozen, mo_energy=mo_energy,
mo_coeff=mo_coeff, mo_occ=mo_occ)
self.nmom = nmom
self._keys.update(['nmom'])
build_se_part = build_se_part
def build_se(self, eri=None, gf=None, os_factor=None, ss_factor=None, se_prev=None):
''' Builds the auxiliaries of the self-energy.
Args:
eri : _ChemistsERIs
Electronic repulsion integrals
gf : GreensFunction
Auxiliaries of the Green's function
Kwargs:
os_factor : float
Opposite-spin factor for spin-component-scaled (SCS)
calculations. Default 1.0
ss_factor : float
Same-spin factor for spin-component-scaled (SCS)
calculations. Default 1.0
se_prev : SelfEnergy
Previous self-energy for damping. Default value is None
Returns:
:class:`SelfEnergy`
'''
if eri is None: eri = self.ao2mo()
if gf is None: gf = self.gf
if gf is None: gf = self.init_gf()
fock = None
if self.nmom[0] is not None:
fock = self.get_fock(eri=eri, gf=gf)
if os_factor is None: os_factor = self.os_factor
if ss_factor is None: ss_factor = self.ss_factor
facs = dict(os_factor=os_factor, ss_factor=ss_factor)
gf_occ = gf.get_occupied()
gf_vir = gf.get_virtual()
se_occ = self.build_se_part(eri, gf_occ, gf_vir, **facs)
se_occ = se_occ.compress(n=(None, self.nmom[1]))
se_vir = self.build_se_part(eri, gf_vir, gf_occ, **facs)
se_vir = se_vir.compress(n=(None, self.nmom[1]))
se = aux.combine(se_vir, se_occ)
se = se.compress(phys=fock, n=(self.nmom[0], None))
if se_prev is not None and self.damping != 0.0:
se.coupling *= np.sqrt(1.0-self.damping)
se_prev.coupling *= np.sqrt(self.damping)
se = aux.combine(se, se_prev)
se = se.compress(n=self.nmom)
return se
def dump_flags(self, verbose=None):
ragf2.RAGF2.dump_flags(self, verbose=verbose)
logger.info(self, 'nmom = %s', repr(self.nmom))
return self
def run_diis(self, se, diis=None):
return se
class _ChemistsERIs(ragf2._ChemistsERIs):
pass
_make_qmo_eris_incore = ragf2._make_qmo_eris_incore
if __name__ == '__main__':
from pyscf import gto, scf, mp
mol = gto.M(atom='O 0 0 0; H 0 0 1; H 0 1 0', basis='cc-pvdz', verbose=3)
rhf = scf.RHF(mol)
rhf.conv_tol = 1e-11
rhf.run()
agf2 = RAGF2(rhf, nmom=(None,0))
agf2.run()
agf2 = ragf2.RAGF2(rhf)
agf2.run()
|
sunqm/pyscf
|
pyscf/agf2/ragf2_slow.py
|
Python
|
apache-2.0
| 8,709
|
# Copyright 2016 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
from touchdown.core import serializers
from ..common import GenericAction, SimpleApply, SimpleDescribe, SimpleDestroy
class GetChangeTokenAction(GenericAction):
"""
Before every call to a WAF change API first call `get_change_token` and
inject its response into our API call. *Every* API to create or update a
WAF resource must have a change token or it will be rejected.
Wrap all 'action' API calls in a lock so they don't happen concurrently.
This is because the WAF service does not support concurrent changes
whatsoever, but touchdown will run in parallel by default.
"""
change_token_lock = threading.Lock()
def get_arguments(self):
params = super(GetChangeTokenAction, self).get_arguments()
change_token = self.plan.client.get_change_token()["ChangeToken"]
params["ChangeToken"] = change_token
return params
def run(self):
with self.change_token_lock:
return super(GetChangeTokenAction, self).run()
class WafDescribe(SimpleDescribe):
GenericAction = GetChangeTokenAction
def get_describe_filters(self):
"""
The 'Limit' field is mandatory so set it to a sensible default for all
WAF API's
"""
return {"Limit": 20}
def describe_object_matches(self, d):
"""
Perform client side filtering of WAF resources found with the list API.
There is no server-side filtering at all, and all client-side filtering
is by comparing self.resource.name against remote['Name'].
"""
return self.resource.name == d["Name"]
def annotate_object(self, obj):
# Need to do a request to get the detailed information for the
# object - we don't get this for free when doing a list.
annotate_action = getattr(self.client, self.annotate_action)
# This will unfurl to be something like::
# rule = client.get_rule(RuleId=obj['RuleId'])
# obj.update(rule['Rule'])
obj.update(
annotate_action(**{self.key: obj[self.key]})[self.describe_envelope[:-1]]
)
return obj
def get_local_container_field(self):
return self.resource.meta.fields[self.local_container].argument.list_of
def get_local_container_items(self):
return getattr(self.resource, self.local_container, [])
def describe_local(self, local):
desc = ["Inserting {}:".format(local.resource_name)]
for field in local.meta.iter_fields_in_order():
if field.name.startswith("_"):
continue
if not getattr(field.argument, "field", None):
continue
if not field.present(local):
continue
desc.append(
" {}: {}".format(field.name, getattr(local, field.name, "(unset)"))
)
return desc
def describe_remote(self, remote):
"""
Given a remote object that has no correlation to a local object pretty
print the remote object (using the touchdown field names)
"""
# TODO: consider doing a call here to a better
# description for the deleted resource - turn its GUID into its name
field = self.get_local_container_field()
desc = ["Removing {}:".format(field.resource_class.resource_name)]
for field in field.resource_class.meta.iter_fields_in_order():
if (
not getattr(field.argument, "field", None)
or field.argument.field not in remote
):
continue
desc.append(" {}: {}".format(field.name, remote[field.argument.field]))
return desc
class WafApply(SimpleApply):
GenericAction = GetChangeTokenAction
def update_object(self):
changes = []
description = ["Update children of {}".format(self.resource.name)]
for local in getattr(self.resource, self.local_container):
for remote in self.object.get(self.container, []):
if local.matches(self.runner, remote):
break
else:
changes.append(
serializers.Dict(
**{
"Action": "INSERT",
self.container_member: local.serializer_with_kwargs(),
}
)
)
description.extend(self.describe_local(local))
for remote in self.object.get(self.container, []):
for local in getattr(self.resource, self.local_container):
if local.matches(self.runner, remote):
break
else:
changes.append(
serializers.Dict(
**{"Action": "DELETE", self.container_member: remote}
)
)
description.extend(self.describe_remote(remote))
if changes:
kwargs = {
self.key: serializers.Identifier(),
"Updates": serializers.Context(
serializers.Const(changes),
serializers.List(serializers.SubSerializer()),
),
}
yield self.generic_action(
description,
getattr(self.client, self.container_update_action),
**kwargs
)
class WafDestroy(SimpleDestroy):
"""
Subclasses of this destroy action must set:
`container` - for example IPSetDescriptors
`container_member` - for example IPSetDescriptor
"""
GenericAction = GetChangeTokenAction
def destroy_object(self):
changes = []
description = [
"Delete all children from {}".format(self.resource.resource_name)
]
for remote in self.object.get(self.container, []):
changes.append(
serializers.Dict(**{"Action": "DELETE", self.container_member: remote})
)
description.extend(self.describe_remote(remote))
if changes:
kwargs = {
self.key: serializers.Identifier(),
"Updates": serializers.Context(
serializers.Const(changes),
serializers.List(serializers.SubSerializer()),
),
}
yield self.generic_action(
description,
getattr(self.client, self.container_update_action),
**kwargs
)
for obj in super(WafDestroy, self).destroy_object():
yield obj
|
yaybu/touchdown
|
touchdown/aws/waf/waf.py
|
Python
|
apache-2.0
| 7,256
|
#!/usr/bin/env python3
#
# Copyright (c) 2016 Roberto Riggio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Light Virtual Network Function."""
import types
import time
from empower.main import RUNTIME
import empower.logger
LOG = empower.logger.get_logger()
# add lvnf message sent, no status received
PROCESS_SPAWNING = "spawning"
# add lvnf message sent, status received (process is running)
PROCESS_RUNNING = "running"
# del lvnf message sent, no status received
PROCESS_STOPPING = "stopping"
# del lvnf message sent, status
PROCESS_STOPPED = "stopped"
# del lvnf message sent, no status received yet
PROCESS_MIGRATING_STOP = "migrating_stop"
# add lvnf message sent, no status received yet
PROCESS_MIGRATING_START = "migrating_start"
class LVNF(object):
"""A Light Virtual Network Function.
An object representing a Light Virtual Network Function. An LVNF is
an instance of a Image. Each Image consists of a click script
implementing the specific VNF. The following boilerplate code is
automatically generated by the EmPOWER Agent (one port):
in_0 :: FromHost(vnf-br0-0-0);
out_0 :: ToHost(vnf-br0-0-0);
vnf-<bridge>-<k>-<n> is the name of the virtual interface to be created by
this VNF. <bridge> is the name of the OVS bridge where the VNF is attached,
<k> is a counter incremented by the agent for each deployed VNF, <n> is
the virtual port id. Notice how the VNF developer needs not to
care about the specific value of X and Y, however he/she must use 'in_0'
and 'out_0' as respectivelly inputs and outputs of his/her VNF. For
example a valid VNF script for this case is the following:
in_0 -> null::Null() -> out_0
After an LVNF is created it is not automatically spawed in a CPP.
Developers must manually assign the cpp attribute in order to install the
LVNF in a specific CPP. If the LVNF was previously installed in a CPP,
then it is first undeployed from the original CPP and then deployed on the
new CPP. Setting the cpp is asynch, so the fact that the attribute was set
does not mean that the LVNF was deployed. The developer must either check
periodically the status of the LVNF or he/she must register a callback
for the LVNF status event.
Note, as opposed to LVAPs which can live outside a tenant, an LVNF is
bound to one and only one tenant. As a result the runtime does not have a
list of LVNFs which is instead keps by each tenant object.
Attributes:
cpp: Pointer to the CPP hosting this LVNF (CPP)
lvnf_id: The lvnf id (UUID)
tenant_id: The Tenant id (UUID)
image: The Image used by this LVNF (Image)
ports: The virtual ports supported by this LVNF (Map)
message: The error message retuned by Click (String)
returncode: The Click process return code, only if stopped (Integer)
process: The status of the process (running, migrating, migrated,
stopped, done)
"""
def __init__(self, lvnf_id, tenant_id, image, cpp):
self.lvnf_id = lvnf_id
self.tenant_id = tenant_id
self.image = image
self.ports = {}
self.returncode = None
self.context = None
self.__state = None
self.__cpp = cpp
self.__target_cpp = None
self.__migration_timer = None
self.__creation_timer = None
self.__chains = []
def start(self):
"""Spawn LVNF."""
tenant = RUNTIME.tenants[self.tenant_id]
if self.lvnf_id in tenant.lvnfs:
raise KeyError("Already defined %s", self.lvnf_id)
tenant.lvnfs[self.lvnf_id] = self
self.state = PROCESS_SPAWNING
def stop(self):
"""Remove LVNF."""
self.state = PROCESS_STOPPING
@property
def state(self):
"""Return the state."""
return self.__state
@state.setter
def state(self, state):
"""Set the CPP."""
LOG.info("LVNF %s transition %s->%s", self.lvnf_id, self.state, state)
if self.state:
method = "_%s_%s" % (self.state, state)
else:
method = "_none_%s" % state
if hasattr(self, method):
callback = getattr(self, method)
callback()
return
raise IOError("Invalid transistion %s -> %s" % (self.state, state))
def _running_spawning(self):
# set new state
self.__state = PROCESS_MIGRATING_STOP
# remove lvnf
self.cpp.connection.send_del_lvnf(self.lvnf_id)
def _running_stopping(self):
# set new state
self.__state = PROCESS_STOPPING
# send LVNF del message
self.cpp.connection.send_del_lvnf(self.lvnf_id)
def _stopping_stopped(self):
# set new state
self.__state = PROCESS_STOPPED
def _spawning_running(self):
delta = int((time.time() - self.__creation_timer) * 1000)
LOG.info("LVNF %s started in %sms", self.lvnf_id, delta)
self.__state = PROCESS_RUNNING
def _spawning_stopped(self):
self.__state = PROCESS_STOPPED
def _running_migrating_stop(self):
# set time
self.__migration_timer = time.time()
# set new state
self.__state = PROCESS_MIGRATING_STOP
# remove lvnf
self.cpp.connection.send_del_lvnf(self.lvnf_id)
# look for LVAPs that points to this LVNF
self.__chains = []
for lvap in RUNTIME.lvaps.values():
for out_port in lvap.ports:
for rule in list(lvap.ports[out_port].next):
LOG.info("rule lvnf %s",rule)
v_port = lvap.ports[out_port].next[rule]
in_port = v_port.virtual_port_id
if v_port in self.ports.values():
save = (lvap, rule, out_port, in_port)
self.__chains.append(save)
del lvap.ports[0].next[rule]
def _migrating_stop_migrating_start(self):
# set new cpp
self.cpp = self.__target_cpp
# set new state
self.__state = PROCESS_MIGRATING_START
# add lvnf
self.cpp.connection.send_add_lvnf(self.image, self.lvnf_id,
self.tenant_id, self.context)
def _migrating_start_running(self):
self.__state = PROCESS_RUNNING
delta = int((time.time() - self.__migration_timer) * 1000)
LOG.info("LVNF %s migration took %sms", self.lvnf_id, delta)
LOG.info("Restoring chains")
for chain in self.__chains:
vnf = chain[0]
rule = chain[1]
out_port = chain[2]
in_port = chain[3]
LOG.info("LVAP %s port [%u] next [%s] -> %u", vnf.addr,
out_port, rule, in_port)
vnf.ports[out_port].next[rule] = self.ports[in_port]
self.__chains = []
def _none_spawning(self):
# set timer
self.__creation_timer = time.time()
# set new state
self.__state = PROCESS_SPAWNING
# send LVNF add message
self.cpp.connection.send_add_lvnf(self.image,
self.lvnf_id,
self.tenant_id)
def _none_running(self):
self.__state = PROCESS_RUNNING
@property
def cpp(self):
"""Return the CPP."""
return self.__cpp
@cpp.setter
def cpp(self, cpp):
"""Set the CPP."""
if self.state == PROCESS_RUNNING:
# save target cpp
self.__target_cpp = cpp
# move to new state
self.state = PROCESS_MIGRATING_STOP
elif self.state == PROCESS_MIGRATING_STOP:
# set cpp
self.__cpp = cpp
self.__target_cpp = None
else:
IOError("Setting CPP on invalid state: %s" % self.state)
def to_dict(self):
"""Return a JSON-serializable dictionary representing the Poll."""
return {'lvnf_id': self.lvnf_id,
'image': self.image,
'tenant_id': self.tenant_id,
'cpp': self.cpp,
'state': self.state,
'returncode': self.returncode,
'ports': self.ports}
def __eq__(self, other):
if isinstance(other, LVNF):
return self.lvnf_id == other.lvnf_id
return False
def __str__(self):
return "LVNF %s (nb_ports=%u)\n%s" % \
(self.lvnf_id, self.image.nb_ports, self.image.vnf)
|
LokiNetworks/empower-runtime
|
empower/core/lvnf.py
|
Python
|
apache-2.0
| 9,109
|
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""RADOS Block Device Driver"""
from __future__ import absolute_import
import binascii
import json
import math
import os
import tempfile
from castellan import key_manager
from eventlet import tpool
from os_brick import encryptors
from os_brick.initiator import linuxrbd
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import fileutils
from oslo_utils import units
import six
from six.moves import urllib
from cinder import exception
from cinder.i18n import _
from cinder.image import image_utils
from cinder import interface
from cinder.objects import fields
from cinder import utils
from cinder.volume import configuration
from cinder.volume import driver
from cinder.volume import utils as volume_utils
try:
import rados
import rbd
except ImportError:
rados = None
rbd = None
LOG = logging.getLogger(__name__)
RBD_OPTS = [
cfg.StrOpt('rbd_cluster_name',
default='ceph',
help='The name of ceph cluster'),
cfg.StrOpt('rbd_pool',
default='rbd',
help='The RADOS pool where rbd volumes are stored'),
cfg.StrOpt('rbd_user',
help='The RADOS client name for accessing rbd volumes '
'- only set when using cephx authentication'),
cfg.StrOpt('rbd_ceph_conf',
default='', # default determined by librados
help='Path to the ceph configuration file'),
cfg.StrOpt('rbd_keyring_conf',
default='',
help='Path to the ceph keyring file'),
cfg.BoolOpt('rbd_flatten_volume_from_snapshot',
default=False,
help='Flatten volumes created from snapshots to remove '
'dependency from volume to snapshot'),
cfg.StrOpt('rbd_secret_uuid',
help='The libvirt uuid of the secret for the rbd_user '
'volumes'),
cfg.IntOpt('rbd_max_clone_depth',
default=5,
help='Maximum number of nested volume clones that are '
'taken before a flatten occurs. Set to 0 to disable '
'cloning.'),
cfg.IntOpt('rbd_store_chunk_size', default=4,
help='Volumes will be chunked into objects of this size '
'(in megabytes).'),
cfg.IntOpt('rados_connect_timeout', default=-1,
help='Timeout value (in seconds) used when connecting to '
'ceph cluster. If value < 0, no timeout is set and '
'default librados value is used.'),
cfg.IntOpt('rados_connection_retries', default=3,
help='Number of retries if connection to ceph cluster '
'failed.'),
cfg.IntOpt('rados_connection_interval', default=5,
help='Interval value (in seconds) between connection '
'retries to ceph cluster.'),
cfg.IntOpt('replication_connect_timeout', default=5,
help='Timeout value (in seconds) used when connecting to '
'ceph cluster to do a demotion/promotion of volumes. '
'If value < 0, no timeout is set and default librados '
'value is used.'),
cfg.BoolOpt('report_dynamic_total_capacity', default=True,
help='Set to True for driver to report total capacity as a '
'dynamic value -used + current free- and to False to '
'report a static value -quota max bytes if defined and '
'global size of cluster if not-.'),
cfg.BoolOpt('rbd_exclusive_cinder_pool', default=False,
help="Set to True if the pool is used exclusively by Cinder. "
"On exclusive use driver won't query images' provisioned "
"size as they will match the value calculated by the "
"Cinder core code for allocated_capacity_gb. This "
"reduces the load on the Ceph cluster as well as on the "
"volume service."),
]
CONF = cfg.CONF
CONF.register_opts(RBD_OPTS, group=configuration.SHARED_CONF_GROUP)
EXTRA_SPECS_REPL_ENABLED = "replication_enabled"
class RBDVolumeProxy(object):
"""Context manager for dealing with an existing rbd volume.
This handles connecting to rados and opening an ioctx automatically, and
otherwise acts like a librbd Image object.
Also this may reuse an external connection (client and ioctx args), but
note, that caller will be responsible for opening/closing connection.
Also `pool`, `remote`, `timeout` args will be ignored in that case.
The underlying librados client and ioctx can be accessed as the attributes
'client' and 'ioctx'.
"""
def __init__(self, driver, name, pool=None, snapshot=None,
read_only=False, remote=None, timeout=None,
client=None, ioctx=None):
self._close_conn = not (client and ioctx)
rados_client, rados_ioctx = driver._connect_to_rados(
pool, remote, timeout) if self._close_conn else (client, ioctx)
if snapshot is not None:
snapshot = utils.convert_str(snapshot)
try:
self.volume = driver.rbd.Image(rados_ioctx,
utils.convert_str(name),
snapshot=snapshot,
read_only=read_only)
self.volume = tpool.Proxy(self.volume)
except driver.rbd.Error:
LOG.exception("error opening rbd image %s", name)
if self._close_conn:
driver._disconnect_from_rados(rados_client, rados_ioctx)
raise
self.driver = driver
self.client = rados_client
self.ioctx = rados_ioctx
def __enter__(self):
return self
def __exit__(self, type_, value, traceback):
try:
self.volume.close()
finally:
if self._close_conn:
self.driver._disconnect_from_rados(self.client, self.ioctx)
def __getattr__(self, attrib):
return getattr(self.volume, attrib)
class RADOSClient(object):
"""Context manager to simplify error handling for connecting to ceph."""
def __init__(self, driver, pool=None):
self.driver = driver
self.cluster, self.ioctx = driver._connect_to_rados(pool)
def __enter__(self):
return self
def __exit__(self, type_, value, traceback):
self.driver._disconnect_from_rados(self.cluster, self.ioctx)
@property
def features(self):
features = self.cluster.conf_get('rbd_default_features')
if ((features is None) or (int(features) == 0)):
features = self.driver.rbd.RBD_FEATURE_LAYERING
return int(features)
@interface.volumedriver
class RBDDriver(driver.CloneableImageVD, driver.MigrateVD,
driver.ManageableVD, driver.ManageableSnapshotsVD,
driver.BaseVD):
"""Implements RADOS block device (RBD) volume commands."""
VERSION = '1.2.0'
# ThirdPartySystems wiki page
CI_WIKI_NAME = "Cinder_Jenkins"
SYSCONFDIR = '/etc/ceph/'
def __init__(self, active_backend_id=None, *args, **kwargs):
super(RBDDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(RBD_OPTS)
self._stats = {}
# allow overrides for testing
self.rados = kwargs.get('rados', rados)
self.rbd = kwargs.get('rbd', rbd)
# All string args used with librbd must be None or utf-8 otherwise
# librbd will break.
for attr in ['rbd_cluster_name', 'rbd_user',
'rbd_ceph_conf', 'rbd_pool']:
val = getattr(self.configuration, attr)
if val is not None:
setattr(self.configuration, attr, utils.convert_str(val))
self._backend_name = (self.configuration.volume_backend_name or
self.__class__.__name__)
self._active_backend_id = active_backend_id
self._active_config = {}
self._is_replication_enabled = False
self._replication_targets = []
self._target_names = []
def _get_target_config(self, target_id):
"""Get a replication target from known replication targets."""
for target in self._replication_targets:
if target['name'] == target_id:
return target
if not target_id or target_id == 'default':
return {
'name': self.configuration.rbd_cluster_name,
'conf': self.configuration.rbd_ceph_conf,
'user': self.configuration.rbd_user
}
raise exception.InvalidReplicationTarget(
reason=_('RBD: Unknown failover target host %s.') % target_id)
def do_setup(self, context):
"""Performs initialization steps that could raise exceptions."""
self._do_setup_replication()
self._active_config = self._get_target_config(self._active_backend_id)
def _do_setup_replication(self):
replication_devices = self.configuration.safe_get(
'replication_device')
if replication_devices:
self._parse_replication_configs(replication_devices)
self._is_replication_enabled = True
self._target_names.append('default')
def _parse_replication_configs(self, replication_devices):
for replication_device in replication_devices:
if 'backend_id' not in replication_device:
msg = _('Missing backend_id in replication_device '
'configuration.')
raise exception.InvalidConfigurationValue(msg)
name = replication_device['backend_id']
conf = replication_device.get('conf',
self.SYSCONFDIR + name + '.conf')
user = replication_device.get(
'user', self.configuration.rbd_user or 'cinder')
# Pool has to be the same in all clusters
replication_target = {'name': name,
'conf': utils.convert_str(conf),
'user': utils.convert_str(user)}
LOG.info('Adding replication target: %s.', name)
self._replication_targets.append(replication_target)
self._target_names.append(name)
def _get_config_tuple(self, remote=None):
if not remote:
remote = self._active_config
return (remote.get('name'), remote.get('conf'), remote.get('user'))
def check_for_setup_error(self):
"""Returns an error if prerequisites aren't met."""
if rados is None:
msg = _('rados and rbd python libraries not found')
raise exception.VolumeBackendAPIException(data=msg)
for attr in ['rbd_cluster_name', 'rbd_pool']:
val = getattr(self.configuration, attr)
if not val:
raise exception.InvalidConfigurationValue(option=attr,
value=val)
# NOTE: Checking connection to ceph
# RADOSClient __init__ method invokes _connect_to_rados
# so no need to check for self.rados.Error here.
with RADOSClient(self):
pass
def RBDProxy(self):
return tpool.Proxy(self.rbd.RBD())
def _ceph_args(self):
args = []
name, conf, user = self._get_config_tuple()
if user:
args.extend(['--id', user])
if name:
args.extend(['--cluster', name])
if conf:
args.extend(['--conf', conf])
return args
def _connect_to_rados(self, pool=None, remote=None, timeout=None):
@utils.retry(exception.VolumeBackendAPIException,
self.configuration.rados_connection_interval,
self.configuration.rados_connection_retries)
def _do_conn(pool, remote, timeout):
name, conf, user = self._get_config_tuple(remote)
if pool is not None:
pool = utils.convert_str(pool)
else:
pool = self.configuration.rbd_pool
if timeout is None:
timeout = self.configuration.rados_connect_timeout
LOG.debug("connecting to %(user)s@%(name)s (conf=%(conf)s, "
"timeout=%(timeout)s).",
{'user': user, 'name': name, 'conf': conf,
'timeout': timeout})
client = self.rados.Rados(rados_id=user,
clustername=name,
conffile=conf)
try:
if timeout >= 0:
timeout = six.text_type(timeout)
client.conf_set('rados_osd_op_timeout', timeout)
client.conf_set('rados_mon_op_timeout', timeout)
client.conf_set('client_mount_timeout', timeout)
client.connect()
ioctx = client.open_ioctx(pool)
return client, ioctx
except self.rados.Error:
msg = _("Error connecting to ceph cluster.")
LOG.exception(msg)
client.shutdown()
raise exception.VolumeBackendAPIException(data=msg)
return _do_conn(pool, remote, timeout)
def _disconnect_from_rados(self, client, ioctx):
# closing an ioctx cannot raise an exception
ioctx.close()
client.shutdown()
def _get_backup_snaps(self, rbd_image):
"""Get list of any backup snapshots that exist on this volume.
There should only ever be one but accept all since they need to be
deleted before the volume can be.
"""
# NOTE(dosaboy): we do the import here otherwise we get import conflict
# issues between the rbd driver and the ceph backup driver. These
# issues only seem to occur when NOT using them together and are
# triggered when the ceph backup driver imports the rbd volume driver.
from cinder.backup.drivers import ceph
return ceph.CephBackupDriver.get_backup_snaps(rbd_image)
def _get_mon_addrs(self):
args = ['ceph', 'mon', 'dump', '--format=json']
args.extend(self._ceph_args())
out, _ = self._execute(*args)
lines = out.split('\n')
if lines[0].startswith('dumped monmap epoch'):
lines = lines[1:]
monmap = json.loads('\n'.join(lines))
addrs = [mon['addr'] for mon in monmap['mons']]
hosts = []
ports = []
for addr in addrs:
host_port = addr[:addr.rindex('/')]
host, port = host_port.rsplit(':', 1)
hosts.append(host.strip('[]'))
ports.append(port)
return hosts, ports
def _get_usage_info(self):
"""Calculate provisioned volume space in GiB.
Stats report should send provisioned size of volumes (snapshot must not
be included) and not the physical size of those volumes.
We must include all volumes, not only Cinder created volumes, because
Cinder created volumes are reported by the Cinder core code as
allocated_capacity_gb.
"""
total_provisioned = 0
with RADOSClient(self) as client:
for t in self.RBDProxy().list(client.ioctx):
with RBDVolumeProxy(self, t, read_only=True,
client=client.cluster,
ioctx=client.ioctx) as v:
try:
size = v.size()
except self.rbd.ImageNotFound:
LOG.debug("Image %s is not found.", t)
else:
total_provisioned += size
total_provisioned = math.ceil(float(total_provisioned) / units.Gi)
return total_provisioned
def _get_pool_stats(self):
"""Gets pool free and total capacity in GiB.
Calculate free and total capacity of the pool based on the pool's
defined quota and pools stats.
Returns a tuple with (free, total) where they are either unknown or a
real number with a 2 digit precision.
"""
pool_name = self.configuration.rbd_pool
with RADOSClient(self) as client:
ret, df_outbuf, __ = client.cluster.mon_command(
'{"prefix":"df", "format":"json"}', '')
if ret:
LOG.warning('Unable to get rados pool stats.')
return 'unknown', 'unknown'
ret, quota_outbuf, __ = client.cluster.mon_command(
'{"prefix":"osd pool get-quota", "pool": "%s",'
' "format":"json"}' % pool_name, '')
if ret:
LOG.warning('Unable to get rados pool quotas.')
return 'unknown', 'unknown'
df_data = json.loads(df_outbuf)
pool_stats = [pool for pool in df_data['pools']
if pool['name'] == pool_name][0]['stats']
bytes_quota = json.loads(quota_outbuf)['quota_max_bytes']
# With quota the total is the quota limit and free is quota - used
if bytes_quota:
total_capacity = bytes_quota
free_capacity = max(min(total_capacity - pool_stats['bytes_used'],
pool_stats['max_avail']),
0)
# Without quota free is pools max available and total is global size
else:
total_capacity = df_data['stats']['total_bytes']
free_capacity = pool_stats['max_avail']
# If we want dynamic total capacity (default behavior)
if self.configuration.safe_get('report_dynamic_total_capacity'):
total_capacity = free_capacity + pool_stats['bytes_used']
free_capacity = round((float(free_capacity) / units.Gi), 2)
total_capacity = round((float(total_capacity) / units.Gi), 2)
return free_capacity, total_capacity
def _update_volume_stats(self):
location_info = '%s:%s:%s:%s:%s' % (
self.configuration.rbd_cluster_name,
self.configuration.rbd_ceph_conf,
self._get_fsid(),
self.configuration.rbd_user,
self.configuration.rbd_pool)
stats = {
'vendor_name': 'Open Source',
'driver_version': self.VERSION,
'storage_protocol': 'ceph',
'total_capacity_gb': 'unknown',
'free_capacity_gb': 'unknown',
'reserved_percentage': (
self.configuration.safe_get('reserved_percentage')),
# NOTE(eharney): Do not enable multiattach for this driver.
# For multiattach to work correctly, the exclusive-lock
# feature required by ceph journaling must be disabled.
# This has implications for replication and other Cinder
# operations.
# Multiattach support for this driver will be investigated
# as multi-attach support in Cinder matures.
'multiattach': False,
'thin_provisioning_support': True,
'max_over_subscription_ratio': (
self.configuration.safe_get('max_over_subscription_ratio')),
'location_info': location_info,
}
backend_name = self.configuration.safe_get('volume_backend_name')
stats['volume_backend_name'] = backend_name or 'RBD'
stats['replication_enabled'] = self._is_replication_enabled
if self._is_replication_enabled:
stats['replication_targets'] = self._target_names
try:
free_capacity, total_capacity = self._get_pool_stats()
stats['free_capacity_gb'] = free_capacity
stats['total_capacity_gb'] = total_capacity
# For exclusive pools let scheduler set provisioned_capacity_gb to
# allocated_capacity_gb, and for non exclusive query the value.
if not self.configuration.safe_get('rbd_exclusive_cinder_pool'):
total_gbi = self._get_usage_info()
stats['provisioned_capacity_gb'] = total_gbi
except self.rados.Error:
# just log and return unknown capacities and let scheduler set
# provisioned_capacity_gb = allocated_capacity_gb
LOG.exception('error refreshing volume stats')
self._stats = stats
def get_volume_stats(self, refresh=False):
"""Return the current state of the volume service.
If 'refresh' is True, run the update first.
"""
if refresh:
self._update_volume_stats()
return self._stats
def _get_clone_depth(self, client, volume_name, depth=0):
"""Returns the number of ancestral clones of the given volume."""
parent_volume = self.rbd.Image(client.ioctx, volume_name)
try:
_pool, parent, _snap = self._get_clone_info(parent_volume,
volume_name)
finally:
parent_volume.close()
if not parent:
return depth
# If clone depth was reached, flatten should have occurred so if it has
# been exceeded then something has gone wrong.
if depth > self.configuration.rbd_max_clone_depth:
raise Exception(_("clone depth exceeds limit of %s") %
(self.configuration.rbd_max_clone_depth))
return self._get_clone_depth(client, parent, depth + 1)
def _extend_if_required(self, volume, src_vref):
"""Extends a volume if required
In case src_vref size is smaller than the size if the requested
new volume call _resize().
"""
if volume.size != src_vref.size:
LOG.debug("resize volume '%(dst_vol)s' from %(src_size)d to "
"%(dst_size)d",
{'dst_vol': volume.name, 'src_size': src_vref.size,
'dst_size': volume.size})
self._resize(volume)
def create_cloned_volume(self, volume, src_vref):
"""Create a cloned volume from another volume.
Since we are cloning from a volume and not a snapshot, we must first
create a snapshot of the source volume.
The user has the option to limit how long a volume's clone chain can be
by setting rbd_max_clone_depth. If a clone is made of another clone
and that clone has rbd_max_clone_depth clones behind it, the dest
volume will be flattened.
"""
src_name = utils.convert_str(src_vref.name)
dest_name = utils.convert_str(volume.name)
clone_snap = "%s.clone_snap" % dest_name
# Do full copy if requested
if self.configuration.rbd_max_clone_depth <= 0:
with RBDVolumeProxy(self, src_name, read_only=True) as vol:
vol.copy(vol.ioctx, dest_name)
self._extend_if_required(volume, src_vref)
return
# Otherwise do COW clone.
with RADOSClient(self) as client:
src_volume = self.rbd.Image(client.ioctx, src_name)
LOG.debug("creating snapshot='%s'", clone_snap)
try:
# Create new snapshot of source volume
src_volume.create_snap(clone_snap)
src_volume.protect_snap(clone_snap)
# Now clone source volume snapshot
LOG.debug("cloning '%(src_vol)s@%(src_snap)s' to "
"'%(dest)s'",
{'src_vol': src_name, 'src_snap': clone_snap,
'dest': dest_name})
self.RBDProxy().clone(client.ioctx, src_name, clone_snap,
client.ioctx, dest_name,
features=client.features)
except Exception as e:
src_volume.unprotect_snap(clone_snap)
src_volume.remove_snap(clone_snap)
msg = (_("Failed to clone '%(src_vol)s@%(src_snap)s' to "
"'%(dest)s', error: %(error)s") %
{'src_vol': src_name,
'src_snap': clone_snap,
'dest': dest_name,
'error': e})
LOG.exception(msg)
raise exception.VolumeBackendAPIException(data=msg)
finally:
src_volume.close()
depth = self._get_clone_depth(client, src_name)
# If dest volume is a clone and rbd_max_clone_depth reached,
# flatten the dest after cloning. Zero rbd_max_clone_depth means
# infinite is allowed.
if depth >= self.configuration.rbd_max_clone_depth:
LOG.info("maximum clone depth (%d) has been reached - "
"flattening dest volume",
self.configuration.rbd_max_clone_depth)
dest_volume = self.rbd.Image(client.ioctx, dest_name)
try:
# Flatten destination volume
LOG.debug("flattening dest volume %s", dest_name)
dest_volume.flatten()
except Exception as e:
msg = (_("Failed to flatten volume %(volume)s with "
"error: %(error)s.") %
{'volume': dest_name,
'error': e})
LOG.exception(msg)
src_volume.close()
raise exception.VolumeBackendAPIException(data=msg)
finally:
dest_volume.close()
try:
# remove temporary snap
LOG.debug("remove temporary snap %s", clone_snap)
src_volume.unprotect_snap(clone_snap)
src_volume.remove_snap(clone_snap)
except Exception as e:
msg = (_("Failed to remove temporary snap "
"%(snap_name)s, error: %(error)s") %
{'snap_name': clone_snap,
'error': e})
LOG.exception(msg)
src_volume.close()
raise exception.VolumeBackendAPIException(data=msg)
try:
volume_update = self._enable_replication_if_needed(volume)
except Exception:
self.RBDProxy().remove(client.ioctx, dest_name)
src_volume.unprotect_snap(clone_snap)
src_volume.remove_snap(clone_snap)
err_msg = (_('Failed to enable image replication'))
raise exception.ReplicationError(reason=err_msg,
volume_id=volume.id)
finally:
src_volume.close()
self._extend_if_required(volume, src_vref)
LOG.debug("clone created successfully")
return volume_update
def _enable_replication(self, volume):
"""Enable replication for a volume.
Returns required volume update.
"""
vol_name = utils.convert_str(volume.name)
with RBDVolumeProxy(self, vol_name) as image:
had_exclusive_lock = (image.features() &
self.rbd.RBD_FEATURE_EXCLUSIVE_LOCK)
had_journaling = image.features() & self.rbd.RBD_FEATURE_JOURNALING
if not had_exclusive_lock:
image.update_features(self.rbd.RBD_FEATURE_EXCLUSIVE_LOCK,
True)
if not had_journaling:
image.update_features(self.rbd.RBD_FEATURE_JOURNALING, True)
image.mirror_image_enable()
driver_data = self._dumps({
'had_journaling': bool(had_journaling),
'had_exclusive_lock': bool(had_exclusive_lock)
})
return {'replication_status': fields.ReplicationStatus.ENABLED,
'replication_driver_data': driver_data}
def _is_replicated_type(self, volume_type):
# We do a safe attribute get because volume_type could be None
specs = getattr(volume_type, 'extra_specs', {})
return specs.get(EXTRA_SPECS_REPL_ENABLED) == "<is> True"
def _enable_replication_if_needed(self, volume):
if self._is_replicated_type(volume.volume_type):
return self._enable_replication(volume)
if self._is_replication_enabled:
return {'replication_status': fields.ReplicationStatus.DISABLED}
return None
def _check_encryption_provider(self, volume, context):
"""Check that this is a LUKS encryption provider.
:returns: encryption dict
"""
encryption = self.db.volume_encryption_metadata_get(context, volume.id)
provider = encryption['provider']
if provider in encryptors.LEGACY_PROVIDER_CLASS_TO_FORMAT_MAP:
provider = encryptors.LEGACY_PROVIDER_CLASS_TO_FORMAT_MAP[provider]
if provider != encryptors.LUKS:
message = _("Provider %s not supported.") % provider
raise exception.VolumeDriverException(message=message)
if 'cipher' not in encryption or 'key_size' not in encryption:
msg = _('encryption spec must contain "cipher" and'
'"key_size"')
raise exception.VolumeDriverException(message=msg)
return encryption
def _create_encrypted_volume(self, volume, context):
"""Create an encrypted volume.
This works by creating an encrypted image locally,
and then uploading it to the volume.
"""
encryption = self._check_encryption_provider(volume, context)
# Fetch the key associated with the volume and decode the passphrase
keymgr = key_manager.API(CONF)
key = keymgr.get(context, encryption['encryption_key_id'])
passphrase = binascii.hexlify(key.get_encoded()).decode('utf-8')
# create a file
tmp_dir = self._image_conversion_dir()
with tempfile.NamedTemporaryFile(dir=tmp_dir) as tmp_image:
with tempfile.NamedTemporaryFile(dir=tmp_dir) as tmp_key:
with open(tmp_key.name, 'w') as f:
f.write(passphrase)
cipher_spec = image_utils.decode_cipher(encryption['cipher'],
encryption['key_size'])
create_cmd = (
'qemu-img', 'create', '-f', 'luks',
'-o', 'cipher-alg=%(cipher_alg)s,'
'cipher-mode=%(cipher_mode)s,'
'ivgen-alg=%(ivgen_alg)s' % cipher_spec,
'--object', 'secret,id=luks_sec,'
'format=raw,file=%(passfile)s' % {'passfile':
tmp_key.name},
'-o', 'key-secret=luks_sec',
tmp_image.name,
'%sM' % (volume.size * 1024))
self._execute(*create_cmd)
# Copy image into RBD
chunk_size = self.configuration.rbd_store_chunk_size * units.Mi
order = int(math.log(chunk_size, 2))
cmd = ['rbd', 'import',
'--pool', self.configuration.rbd_pool,
'--order', order,
tmp_image.name, volume.name]
cmd.extend(self._ceph_args())
self._execute(*cmd)
def create_volume(self, volume):
"""Creates a logical volume."""
if volume.encryption_key_id:
return self._create_encrypted_volume(volume, volume.obj_context)
size = int(volume.size) * units.Gi
LOG.debug("creating volume '%s'", volume.name)
chunk_size = self.configuration.rbd_store_chunk_size * units.Mi
order = int(math.log(chunk_size, 2))
vol_name = utils.convert_str(volume.name)
with RADOSClient(self) as client:
self.RBDProxy().create(client.ioctx,
vol_name,
size,
order,
old_format=False,
features=client.features)
try:
volume_update = self._enable_replication_if_needed(volume)
except Exception:
self.RBDProxy().remove(client.ioctx, vol_name)
err_msg = (_('Failed to enable image replication'))
raise exception.ReplicationError(reason=err_msg,
volume_id=volume.id)
return volume_update
def _flatten(self, pool, volume_name):
LOG.debug('flattening %(pool)s/%(img)s',
dict(pool=pool, img=volume_name))
with RBDVolumeProxy(self, volume_name, pool) as vol:
vol.flatten()
def _clone(self, volume, src_pool, src_image, src_snap):
LOG.debug('cloning %(pool)s/%(img)s@%(snap)s to %(dst)s',
dict(pool=src_pool, img=src_image, snap=src_snap,
dst=volume.name))
chunk_size = self.configuration.rbd_store_chunk_size * units.Mi
order = int(math.log(chunk_size, 2))
vol_name = utils.convert_str(volume.name)
with RADOSClient(self, src_pool) as src_client:
with RADOSClient(self) as dest_client:
self.RBDProxy().clone(src_client.ioctx,
utils.convert_str(src_image),
utils.convert_str(src_snap),
dest_client.ioctx,
vol_name,
features=src_client.features,
order=order)
try:
volume_update = self._enable_replication_if_needed(volume)
except Exception:
self.RBDProxy().remove(dest_client.ioctx, vol_name)
err_msg = (_('Failed to enable image replication'))
raise exception.ReplicationError(reason=err_msg,
volume_id=volume.id)
return volume_update or {}
def _resize(self, volume, **kwargs):
size = kwargs.get('size', None)
if not size:
size = int(volume.size) * units.Gi
with RBDVolumeProxy(self, volume.name) as vol:
vol.resize(size)
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
volume_update = self._clone(volume, self.configuration.rbd_pool,
snapshot.volume_name, snapshot.name)
if self.configuration.rbd_flatten_volume_from_snapshot:
self._flatten(self.configuration.rbd_pool, volume.name)
if int(volume.size):
self._resize(volume)
return volume_update
def _delete_backup_snaps(self, rbd_image):
backup_snaps = self._get_backup_snaps(rbd_image)
if backup_snaps:
for snap in backup_snaps:
rbd_image.remove_snap(snap['name'])
else:
LOG.debug("volume has no backup snaps")
def _get_clone_info(self, volume, volume_name, snap=None):
"""If volume is a clone, return its parent info.
Returns a tuple of (pool, parent, snap). A snapshot may optionally be
provided for the case where a cloned volume has been flattened but it's
snapshot still depends on the parent.
"""
try:
if snap:
volume.set_snap(snap)
pool, parent, parent_snap = tuple(volume.parent_info())
if snap:
volume.set_snap(None)
# Strip the tag off the end of the volume name since it will not be
# in the snap name.
if volume_name.endswith('.deleted'):
volume_name = volume_name[:-len('.deleted')]
# Now check the snap name matches.
if parent_snap == "%s.clone_snap" % volume_name:
return pool, parent, parent_snap
except self.rbd.ImageNotFound:
LOG.debug("Volume %s is not a clone.", volume_name)
volume.set_snap(None)
return (None, None, None)
def _get_children_info(self, volume, snap):
"""List children for the given snapshot of a volume(image).
Returns a list of (pool, image).
"""
children_list = []
if snap:
volume.set_snap(snap)
children_list = volume.list_children()
volume.set_snap(None)
return children_list
def _delete_clone_parent_refs(self, client, parent_name, parent_snap):
"""Walk back up the clone chain and delete references.
Deletes references i.e. deleted parent volumes and snapshots.
"""
parent_rbd = self.rbd.Image(client.ioctx, parent_name)
parent_has_snaps = False
try:
# Check for grandparent
_pool, g_parent, g_parent_snap = self._get_clone_info(parent_rbd,
parent_name,
parent_snap)
LOG.debug("deleting parent snapshot %s", parent_snap)
parent_rbd.unprotect_snap(parent_snap)
parent_rbd.remove_snap(parent_snap)
parent_has_snaps = bool(list(parent_rbd.list_snaps()))
finally:
parent_rbd.close()
# If parent has been deleted in Cinder, delete the silent reference and
# keep walking up the chain if it is itself a clone.
if (not parent_has_snaps) and parent_name.endswith('.deleted'):
LOG.debug("deleting parent %s", parent_name)
self.RBDProxy().remove(client.ioctx, parent_name)
# Now move up to grandparent if there is one
if g_parent:
self._delete_clone_parent_refs(client, g_parent, g_parent_snap)
def delete_volume(self, volume):
"""Deletes a logical volume."""
# NOTE(dosaboy): this was broken by commit cbe1d5f. Ensure names are
# utf-8 otherwise librbd will barf.
volume_name = utils.convert_str(volume.name)
with RADOSClient(self) as client:
try:
rbd_image = self.rbd.Image(client.ioctx, volume_name)
except self.rbd.ImageNotFound:
LOG.info("volume %s no longer exists in backend",
volume_name)
return
clone_snap = None
parent = None
# Ensure any backup snapshots are deleted
self._delete_backup_snaps(rbd_image)
# If the volume has non-clone snapshots this delete is expected to
# raise VolumeIsBusy so do so straight away.
try:
snaps = rbd_image.list_snaps()
for snap in snaps:
if snap['name'].endswith('.clone_snap'):
LOG.debug("volume has clone snapshot(s)")
# We grab one of these and use it when fetching parent
# info in case the volume has been flattened.
clone_snap = snap['name']
break
raise exception.VolumeIsBusy(volume_name=volume_name)
# Determine if this volume is itself a clone
_pool, parent, parent_snap = self._get_clone_info(rbd_image,
volume_name,
clone_snap)
finally:
rbd_image.close()
@utils.retry(self.rbd.ImageBusy,
self.configuration.rados_connection_interval,
self.configuration.rados_connection_retries)
def _try_remove_volume(client, volume_name):
self.RBDProxy().remove(client.ioctx, volume_name)
if clone_snap is None:
LOG.debug("deleting rbd volume %s", volume_name)
try:
_try_remove_volume(client, volume_name)
except self.rbd.ImageBusy:
msg = (_("ImageBusy error raised while deleting rbd "
"volume. This may have been caused by a "
"connection from a client that has crashed and, "
"if so, may be resolved by retrying the delete "
"after 30 seconds has elapsed."))
LOG.warning(msg)
# Now raise this so that volume stays available so that we
# delete can be retried.
raise exception.VolumeIsBusy(msg, volume_name=volume_name)
except self.rbd.ImageNotFound:
LOG.info("RBD volume %s not found, allowing delete "
"operation to proceed.", volume_name)
return
# If it is a clone, walk back up the parent chain deleting
# references.
if parent:
LOG.debug("volume is a clone so cleaning references")
self._delete_clone_parent_refs(client, parent, parent_snap)
else:
# If the volume has copy-on-write clones we will not be able to
# delete it. Instead we will keep it as a silent volume which
# will be deleted when it's snapshot and clones are deleted.
new_name = "%s.deleted" % (volume_name)
self.RBDProxy().rename(client.ioctx, volume_name, new_name)
def create_snapshot(self, snapshot):
"""Creates an rbd snapshot."""
with RBDVolumeProxy(self, snapshot.volume_name) as volume:
snap = utils.convert_str(snapshot.name)
volume.create_snap(snap)
volume.protect_snap(snap)
def delete_snapshot(self, snapshot):
"""Deletes an rbd snapshot."""
# NOTE(dosaboy): this was broken by commit cbe1d5f. Ensure names are
# utf-8 otherwise librbd will barf.
volume_name = utils.convert_str(snapshot.volume_name)
snap_name = utils.convert_str(snapshot.name)
with RBDVolumeProxy(self, volume_name) as volume:
try:
volume.unprotect_snap(snap_name)
except self.rbd.InvalidArgument:
LOG.info(
"InvalidArgument: Unable to unprotect snapshot %s.",
snap_name)
except self.rbd.ImageNotFound:
LOG.info(
"ImageNotFound: Unable to unprotect snapshot %s.",
snap_name)
except self.rbd.ImageBusy:
children_list = self._get_children_info(volume, snap_name)
if children_list:
for (pool, image) in children_list:
LOG.info('Image %(pool)s/%(image)s is dependent '
'on the snapshot %(snap)s.',
{'pool': pool,
'image': image,
'snap': snap_name})
raise exception.SnapshotIsBusy(snapshot_name=snap_name)
try:
volume.remove_snap(snap_name)
except self.rbd.ImageNotFound:
LOG.info("Snapshot %s does not exist in backend.",
snap_name)
def _disable_replication(self, volume):
"""Disable replication on the given volume."""
vol_name = utils.convert_str(volume.name)
with RBDVolumeProxy(self, vol_name) as image:
image.mirror_image_disable(False)
driver_data = json.loads(volume.replication_driver_data)
# If 'journaling' and/or 'exclusive-lock' have
# been enabled in '_enable_replication',
# they will be disabled here. If not, it will keep
# what it was before.
if not driver_data['had_journaling']:
image.update_features(self.rbd.RBD_FEATURE_JOURNALING, False)
if not driver_data['had_exclusive_lock']:
image.update_features(self.rbd.RBD_FEATURE_EXCLUSIVE_LOCK,
False)
return {'replication_status': fields.ReplicationStatus.DISABLED,
'replication_driver_data': None}
def retype(self, context, volume, new_type, diff, host):
"""Retype from one volume type to another on the same backend."""
old_vol_replicated = self._is_replicated_type(volume.volume_type)
new_vol_replicated = self._is_replicated_type(new_type)
if old_vol_replicated and not new_vol_replicated:
try:
return True, self._disable_replication(volume)
except Exception:
err_msg = (_('Failed to disable image replication'))
raise exception.ReplicationError(reason=err_msg,
volume_id=volume.id)
elif not old_vol_replicated and new_vol_replicated:
try:
return True, self._enable_replication(volume)
except Exception:
err_msg = (_('Failed to enable image replication'))
raise exception.ReplicationError(reason=err_msg,
volume_id=volume.id)
if not new_vol_replicated and self._is_replication_enabled:
update = {'replication_status': fields.ReplicationStatus.DISABLED}
else:
update = None
return True, update
def _dumps(self, obj):
return json.dumps(obj, separators=(',', ':'), sort_keys=True)
def _exec_on_volume(self, volume_name, remote, operation, *args, **kwargs):
@utils.retry(rbd.ImageBusy,
self.configuration.rados_connection_interval,
self.configuration.rados_connection_retries)
def _do_exec():
timeout = self.configuration.replication_connect_timeout
with RBDVolumeProxy(self, volume_name, self.configuration.rbd_pool,
remote=remote, timeout=timeout) as rbd_image:
return getattr(rbd_image, operation)(*args, **kwargs)
return _do_exec()
def _failover_volume(self, volume, remote, is_demoted, replication_status):
"""Process failover for a volume.
There are 2 different cases that will return different update values
for the volume:
- Volume has replication enabled and failover succeeded: Set
replication status to failed-over.
- Volume has replication enabled and failover fails: Set status to
error, replication status to failover-error, and store previous
status in previous_status field.
"""
# Failover is allowed when volume has it enabled or it has already
# failed over, because we may want to do a second failover.
vol_name = utils.convert_str(volume.name)
try:
self._exec_on_volume(vol_name, remote,
'mirror_image_promote', not is_demoted)
return {'volume_id': volume.id,
'updates': {'replication_status': replication_status}}
except Exception as e:
replication_status = fields.ReplicationStatus.FAILOVER_ERROR
LOG.error('Failed to failover volume %(volume)s with '
'error: %(error)s.',
{'volume': volume.name, 'error': e})
# Failover failed
error_result = {
'volume_id': volume.id,
'updates': {
'status': 'error',
'previous_status': volume.status,
'replication_status': replication_status
}
}
return error_result
def _demote_volumes(self, volumes, until_failure=True):
"""Try to demote volumes on the current primary cluster."""
result = []
try_demoting = True
for volume in volumes:
demoted = False
if try_demoting:
vol_name = utils.convert_str(volume.name)
try:
self._exec_on_volume(vol_name, self._active_config,
'mirror_image_demote')
demoted = True
except Exception as e:
LOG.debug('Failed to demote %(volume)s with error: '
'%(error)s.',
{'volume': volume.name, 'error': e})
try_demoting = not until_failure
result.append(demoted)
return result
def _get_failover_target_config(self, secondary_id=None):
if not secondary_id:
# In auto mode exclude failback and active
candidates = set(self._target_names).difference(
('default', self._active_backend_id))
if not candidates:
raise exception.InvalidReplicationTarget(
reason=_('RBD: No available failover target host.'))
secondary_id = candidates.pop()
return secondary_id, self._get_target_config(secondary_id)
def failover_host(self, context, volumes, secondary_id=None, groups=None):
"""Failover to replication target."""
LOG.info('RBD driver failover started.')
if not self._is_replication_enabled:
raise exception.UnableToFailOver(
reason=_('RBD: Replication is not enabled.'))
if secondary_id == 'default':
replication_status = fields.ReplicationStatus.ENABLED
else:
replication_status = fields.ReplicationStatus.FAILED_OVER
secondary_id, remote = self._get_failover_target_config(secondary_id)
# Try to demote the volumes first
demotion_results = self._demote_volumes(volumes)
# Do the failover taking into consideration if they have been demoted
updates = [self._failover_volume(volume, remote, is_demoted,
replication_status)
for volume, is_demoted in zip(volumes, demotion_results)]
self._active_backend_id = secondary_id
self._active_config = remote
LOG.info('RBD driver failover completed.')
return secondary_id, updates, []
def ensure_export(self, context, volume):
"""Synchronously recreates an export for a logical volume."""
pass
def create_export(self, context, volume, connector):
"""Exports the volume."""
pass
def remove_export(self, context, volume):
"""Removes an export for a logical volume."""
pass
def _get_keyring_contents(self):
# NOTE(danpawlik) If keyring is not provided in Cinder configuration,
# os-brick library will take keyring from default path.
keyring_file = self.configuration.rbd_keyring_conf
keyring_data = None
try:
if os.path.isfile(keyring_file):
with open(keyring_file, 'r') as k_file:
keyring_data = k_file.read()
except IOError:
LOG.debug('Cannot read RBD keyring file: %s.', keyring_file)
return keyring_data
def initialize_connection(self, volume, connector):
hosts, ports = self._get_mon_addrs()
data = {
'driver_volume_type': 'rbd',
'data': {
'name': '%s/%s' % (self.configuration.rbd_pool,
volume.name),
'hosts': hosts,
'ports': ports,
'cluster_name': self.configuration.rbd_cluster_name,
'auth_enabled': (self.configuration.rbd_user is not None),
'auth_username': self.configuration.rbd_user,
'secret_type': 'ceph',
'secret_uuid': self.configuration.rbd_secret_uuid,
'volume_id': volume.id,
"discard": True,
'keyring': self._get_keyring_contents(),
}
}
LOG.debug('connection data: %s', data)
return data
def terminate_connection(self, volume, connector, **kwargs):
pass
def _parse_location(self, location):
prefix = 'rbd://'
if not location.startswith(prefix):
reason = _('Not stored in rbd')
raise exception.ImageUnacceptable(image_id=location, reason=reason)
pieces = [urllib.parse.unquote(loc)
for loc in location[len(prefix):].split('/')]
if any(map(lambda p: p == '', pieces)):
reason = _('Blank components')
raise exception.ImageUnacceptable(image_id=location, reason=reason)
if len(pieces) != 4:
reason = _('Not an rbd snapshot')
raise exception.ImageUnacceptable(image_id=location, reason=reason)
return pieces
def _get_fsid(self):
with RADOSClient(self) as client:
return client.cluster.get_fsid()
def _is_cloneable(self, image_location, image_meta):
try:
fsid, pool, image, snapshot = self._parse_location(image_location)
except exception.ImageUnacceptable as e:
LOG.debug('not cloneable: %s.', e)
return False
if self._get_fsid() != fsid:
LOG.debug('%s is in a different ceph cluster.', image_location)
return False
if image_meta['disk_format'] != 'raw':
LOG.debug("rbd image clone requires image format to be "
"'raw' but image %(image)s is '%(format)s'",
{"image": image_location,
"format": image_meta['disk_format']})
return False
# check that we can read the image
try:
with RBDVolumeProxy(self, image,
pool=pool,
snapshot=snapshot,
read_only=True):
return True
except self.rbd.Error as e:
LOG.debug('Unable to open image %(loc)s: %(err)s.',
dict(loc=image_location, err=e))
return False
def clone_image(self, context, volume,
image_location, image_meta,
image_service):
if image_location:
# Note: image_location[0] is glance image direct_url.
# image_location[1] contains the list of all locations (including
# direct_url) or None if show_multiple_locations is False in
# glance configuration.
if image_location[1]:
url_locations = [location['url'] for
location in image_location[1]]
else:
url_locations = [image_location[0]]
# iterate all locations to look for a cloneable one.
for url_location in url_locations:
if url_location and self._is_cloneable(
url_location, image_meta):
_prefix, pool, image, snapshot = \
self._parse_location(url_location)
volume_update = self._clone(volume, pool, image, snapshot)
volume_update['provider_location'] = None
self._resize(volume)
return volume_update, True
return ({}, False)
def _image_conversion_dir(self):
tmpdir = (CONF.image_conversion_dir or
tempfile.gettempdir())
# ensure temporary directory exists
if not os.path.exists(tmpdir):
os.makedirs(tmpdir)
return tmpdir
def copy_image_to_encrypted_volume(self, context, volume, image_service,
image_id):
self._copy_image_to_volume(context, volume, image_service, image_id,
encrypted=True)
def copy_image_to_volume(self, context, volume, image_service, image_id):
self._copy_image_to_volume(context, volume, image_service, image_id)
def _encrypt_image(self, context, volume, tmp_dir, src_image_path):
encryption = self._check_encryption_provider(volume, context)
# Fetch the key associated with the volume and decode the passphrase
keymgr = key_manager.API(CONF)
key = keymgr.get(context, encryption['encryption_key_id'])
passphrase = binascii.hexlify(key.get_encoded()).decode('utf-8')
# Decode the dm-crypt style cipher spec into something qemu-img can use
cipher_spec = image_utils.decode_cipher(encryption['cipher'],
encryption['key_size'])
tmp_dir = self._image_conversion_dir()
with tempfile.NamedTemporaryFile(prefix='luks_',
dir=tmp_dir) as pass_file:
with open(pass_file.name, 'w') as f:
f.write(passphrase)
# Convert the raw image to luks
dest_image_path = src_image_path + '.luks'
image_utils.convert_image(src_image_path, dest_image_path,
'luks', src_format='raw',
cipher_spec=cipher_spec,
passphrase_file=pass_file.name)
# Replace the original image with the now encrypted image
os.rename(dest_image_path, src_image_path)
def _copy_image_to_volume(self, context, volume, image_service, image_id,
encrypted=False):
tmp_dir = self._image_conversion_dir()
with tempfile.NamedTemporaryFile(dir=tmp_dir) as tmp:
image_utils.fetch_to_raw(context, image_service, image_id,
tmp.name,
self.configuration.volume_dd_blocksize,
size=volume.size)
if encrypted:
self._encrypt_image(context, volume, tmp_dir, tmp.name)
self.delete_volume(volume)
chunk_size = self.configuration.rbd_store_chunk_size * units.Mi
order = int(math.log(chunk_size, 2))
# keep using the command line import instead of librbd since it
# detects zeroes to preserve sparseness in the image
args = ['rbd', 'import',
'--pool', self.configuration.rbd_pool,
'--order', order,
tmp.name, volume.name,
'--new-format']
args.extend(self._ceph_args())
self._try_execute(*args)
self._resize(volume)
# We may need to re-enable replication because we have deleted the
# original image and created a new one using the command line import.
try:
self._enable_replication_if_needed(volume)
except Exception:
err_msg = (_('Failed to enable image replication'))
raise exception.ReplicationError(reason=err_msg,
volume_id=volume.id)
def copy_volume_to_image(self, context, volume, image_service, image_meta):
tmp_dir = self._image_conversion_dir()
tmp_file = os.path.join(tmp_dir,
volume.name + '-' + image_meta['id'])
with fileutils.remove_path_on_error(tmp_file):
args = ['rbd', 'export',
'--pool', self.configuration.rbd_pool,
volume.name, tmp_file]
args.extend(self._ceph_args())
self._try_execute(*args)
image_utils.upload_volume(context, image_service,
image_meta, tmp_file)
os.unlink(tmp_file)
def extend_volume(self, volume, new_size):
"""Extend an existing volume."""
old_size = volume.size
try:
size = int(new_size) * units.Gi
self._resize(volume, size=size)
except Exception:
msg = _('Failed to Extend Volume '
'%(volname)s') % {'volname': volume.name}
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
LOG.debug("Extend volume from %(old_size)s GB to %(new_size)s GB.",
{'old_size': old_size, 'new_size': new_size})
def manage_existing(self, volume, existing_ref):
"""Manages an existing image.
Renames the image name to match the expected name for the volume.
Error checking done by manage_existing_get_size is not repeated.
:param volume:
volume ref info to be set
:param existing_ref:
existing_ref is a dictionary of the form:
{'source-name': <name of rbd image>}
"""
# Raise an exception if we didn't find a suitable rbd image.
with RADOSClient(self) as client:
rbd_name = existing_ref['source-name']
self.RBDProxy().rename(client.ioctx,
utils.convert_str(rbd_name),
utils.convert_str(volume.name))
def manage_existing_get_size(self, volume, existing_ref):
"""Return size of an existing image for manage_existing.
:param volume:
volume ref info to be set
:param existing_ref:
existing_ref is a dictionary of the form:
{'source-name': <name of rbd image>}
"""
# Check that the reference is valid
if 'source-name' not in existing_ref:
reason = _('Reference must contain source-name element.')
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref, reason=reason)
rbd_name = utils.convert_str(existing_ref['source-name'])
with RADOSClient(self) as client:
# Raise an exception if we didn't find a suitable rbd image.
try:
rbd_image = self.rbd.Image(client.ioctx, rbd_name)
except self.rbd.ImageNotFound:
kwargs = {'existing_ref': rbd_name,
'reason': 'Specified rbd image does not exist.'}
raise exception.ManageExistingInvalidReference(**kwargs)
image_size = rbd_image.size()
rbd_image.close()
# RBD image size is returned in bytes. Attempt to parse
# size as a float and round up to the next integer.
try:
convert_size = int(math.ceil(float(image_size) / units.Gi))
return convert_size
except ValueError:
exception_message = (_("Failed to manage existing volume "
"%(name)s, because reported size "
"%(size)s was not a floating-point"
" number.")
% {'name': rbd_name,
'size': image_size})
raise exception.VolumeBackendAPIException(
data=exception_message)
def _get_image_status(self, image_name):
args = ['rbd', 'status',
'--pool', self.configuration.rbd_pool,
'--format=json',
image_name]
args.extend(self._ceph_args())
out, _ = self._execute(*args)
return json.loads(out)
def get_manageable_volumes(self, cinder_volumes, marker, limit, offset,
sort_keys, sort_dirs):
manageable_volumes = []
cinder_ids = [resource['id'] for resource in cinder_volumes]
with RADOSClient(self) as client:
for image_name in self.RBDProxy().list(client.ioctx):
image_id = volume_utils.extract_id_from_volume_name(image_name)
with RBDVolumeProxy(self, image_name, read_only=True) as image:
try:
image_info = {
'reference': {'source-name': image_name},
'size': int(math.ceil(
float(image.size()) / units.Gi)),
'cinder_id': None,
'extra_info': None
}
if image_id in cinder_ids:
image_info['cinder_id'] = image_id
image_info['safe_to_manage'] = False
image_info['reason_not_safe'] = 'already managed'
elif len(self._get_image_status(
image_name)['watchers']) > 0:
# If the num of watchers of image is >= 1, then the
# image is considered to be used by client(s).
image_info['safe_to_manage'] = False
image_info['reason_not_safe'] = 'volume in use'
else:
image_info['safe_to_manage'] = True
image_info['reason_not_safe'] = None
manageable_volumes.append(image_info)
except self.rbd.ImageNotFound:
LOG.debug("Image %s is not found.", image_name)
return volume_utils.paginate_entries_list(
manageable_volumes, marker, limit, offset, sort_keys, sort_dirs)
def unmanage(self, volume):
pass
def update_migrated_volume(self, ctxt, volume, new_volume,
original_volume_status):
"""Return model update from RBD for migrated volume.
This method should rename the back-end volume name(id) on the
destination host back to its original name(id) on the source host.
:param ctxt: The context used to run the method update_migrated_volume
:param volume: The original volume that was migrated to this backend
:param new_volume: The migration volume object that was created on
this backend as part of the migration process
:param original_volume_status: The status of the original volume
:returns: model_update to update DB with any needed changes
"""
name_id = None
provider_location = None
existing_name = CONF.volume_name_template % new_volume.id
wanted_name = CONF.volume_name_template % volume.id
with RADOSClient(self) as client:
try:
self.RBDProxy().rename(client.ioctx,
utils.convert_str(existing_name),
utils.convert_str(wanted_name))
except self.rbd.ImageNotFound:
LOG.error('Unable to rename the logical volume '
'for volume %s.', volume.id)
# If the rename fails, _name_id should be set to the new
# volume id and provider_location should be set to the
# one from the new volume as well.
name_id = new_volume._name_id or new_volume.id
provider_location = new_volume['provider_location']
return {'_name_id': name_id, 'provider_location': provider_location}
def migrate_volume(self, context, volume, host):
refuse_to_migrate = (False, None)
if volume.status not in ('available', 'retyping', 'maintenance'):
LOG.debug('Only available volumes can be migrated using backend '
'assisted migration. Falling back to generic migration.')
return refuse_to_migrate
if (host['capabilities']['storage_protocol'] != 'ceph'):
LOG.debug('Source and destination drivers need to be RBD '
'to use backend assisted migration. Falling back to '
'generic migration.')
return refuse_to_migrate
loc_info = host['capabilities'].get('location_info')
LOG.debug('Attempting RBD assisted volume migration. volume: %(id)s, '
'host: %(host)s, status=%(status)s.',
{'id': volume.id, 'host': host, 'status': volume.status})
if not loc_info:
LOG.debug('Could not find location_info in capabilities reported '
'by the destination driver. Falling back to generic '
'migration.')
return refuse_to_migrate
try:
(rbd_cluster_name, rbd_ceph_conf, rbd_fsid, rbd_user, rbd_pool) = (
utils.convert_str(loc_info).split(':'))
except ValueError:
LOG.error('Location info needed for backend enabled volume '
'migration not in correct format: %s. Falling back to '
'generic volume migration.', loc_info)
return refuse_to_migrate
with linuxrbd.RBDClient(rbd_user, rbd_pool, conffile=rbd_ceph_conf,
rbd_cluster_name=rbd_cluster_name) as target:
if ((rbd_fsid != self._get_fsid() or
rbd_fsid != target.client.get_fsid())):
LOG.info('Migration between clusters is not supported. '
'Falling back to generic migration.')
return refuse_to_migrate
with RBDVolumeProxy(self, volume.name, read_only=True) as source:
try:
source.copy(target.ioctx, volume.name)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error('Error copying rbd image %(vol)s to target '
'pool %(pool)s.',
{'vol': volume.name, 'pool': rbd_pool})
self.RBDProxy().remove(target.ioctx, volume.name)
try:
# If the source fails to delete for some reason, we want to leave
# the target volume in place in case deleting it might cause a lose
# of data.
self.delete_volume(volume)
except Exception:
reason = 'Failed to delete migration source volume %s.', volume.id
raise exception.VolumeMigrationFailed(reason=reason)
LOG.info('Successful RBD assisted volume migration.')
return (True, None)
def manage_existing_snapshot_get_size(self, snapshot, existing_ref):
"""Return size of an existing image for manage_existing.
:param snapshot:
snapshot ref info to be set
:param existing_ref:
existing_ref is a dictionary of the form:
{'source-name': <name of snapshot>}
"""
# Check that the reference is valid
if not isinstance(existing_ref, dict):
existing_ref = {"source-name": existing_ref}
if 'source-name' not in existing_ref:
reason = _('Reference must contain source-name element.')
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref, reason=reason)
volume_name = utils.convert_str(snapshot.volume_name)
snapshot_name = utils.convert_str(existing_ref['source-name'])
with RADOSClient(self) as client:
# Raise an exception if we didn't find a suitable rbd image.
try:
rbd_snapshot = self.rbd.Image(client.ioctx, volume_name,
snapshot=snapshot_name)
except self.rbd.ImageNotFound:
kwargs = {'existing_ref': snapshot_name,
'reason': 'Specified snapshot does not exist.'}
raise exception.ManageExistingInvalidReference(**kwargs)
snapshot_size = rbd_snapshot.size()
rbd_snapshot.close()
# RBD image size is returned in bytes. Attempt to parse
# size as a float and round up to the next integer.
try:
convert_size = int(math.ceil(float(snapshot_size) / units.Gi))
return convert_size
except ValueError:
exception_message = (_("Failed to manage existing snapshot "
"%(name)s, because reported size "
"%(size)s was not a floating-point"
" number.")
% {'name': snapshot_name,
'size': snapshot_size})
raise exception.VolumeBackendAPIException(
data=exception_message)
def manage_existing_snapshot(self, snapshot, existing_ref):
"""Manages an existing snapshot.
Renames the snapshot name to match the expected name for the snapshot.
Error checking done by manage_existing_get_size is not repeated.
:param snapshot:
snapshot ref info to be set
:param existing_ref:
existing_ref is a dictionary of the form:
{'source-name': <name of rbd snapshot>}
"""
if not isinstance(existing_ref, dict):
existing_ref = {"source-name": existing_ref}
volume_name = utils.convert_str(snapshot.volume_name)
with RBDVolumeProxy(self, volume_name) as volume:
snapshot_name = existing_ref['source-name']
volume.rename_snap(utils.convert_str(snapshot_name),
utils.convert_str(snapshot.name))
def unmanage_snapshot(self, snapshot):
"""Removes the specified snapshot from Cinder management."""
pass
|
j-griffith/cinder
|
cinder/volume/drivers/rbd.py
|
Python
|
apache-2.0
| 74,922
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from calvin.actor.actor import Actor, manage, condition, stateguard, calvinsys
class DelayToken(Actor):
"""
Sends input on after a given delay has passed. Preserves time between tokens.
Input :
token : anything
Outputs:
token : anything
"""
@manage(['delay', 'timers'])
def init(self, delay):
self.delay = delay
self.timers = []
def new_timer(self):
timer = calvinsys.open(self, "sys.timer.once", period=self.delay)
return timer
@condition(['token'])
def token_available(self, token):
self.timers.append({'token': token, 'timer': self.new_timer()})
@stateguard(lambda self: len(self.timers) > 0 and calvinsys.can_read(self.timers[0]['timer']))
@condition([], ['token'])
def timeout(self):
item = self.timers.pop(0)
calvinsys.read(item['timer'])
calvinsys.close(item['timer'])
return (item['token'], )
action_priority = (timeout, token_available)
requires = ['sys.timer.once']
test_kwargs = {'delay': 20}
test_calvinsys = {'sys.timer.once': {'read': ["dummy", "dummy", "dummy", "dummy"],
'write': [20, 20, 20, 20]}}
test_set = [
{
'inports': {'token': ["a", "b", 1, 1]},
'outports': {'token': ["a", "b", 1, 1]},
}
]
|
EricssonResearch/calvin-base
|
calvin/actorstore/systemactors/std/DelayToken.py
|
Python
|
apache-2.0
| 1,975
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Code for evaluating models as they train."""
import time
import numpy as np
import tensorflow as tf
# LDIF is an internal package, should be imported last.
# pylint: disable=g-bad-import-order
from ldif.model import hparams
from ldif.training import metrics
from ldif.inference import refine
from ldif.util import file_util
from ldif.util import image_util
from ldif.util import np_util
# pylint: enable=g-bad-import-order
def make_eval_step(model_config, training_example, prediction):
"""Returns a function that computes a model evaluation step."""
# TODO(kgenova) The eval code hasn't really been refactored yet, only train.
opt_mode = model_config.hparams.ir
if opt_mode == 'zero-set':
# TODO(kgenova) This is broken:
sample_locations = training_example.sample_sdf_near_surface(3000)[0]
point_count = sample_locations.get_shape().as_list()[1]
optimization_target = tf.zeros([model_config.hparams.bs, point_count, 1],
dtype=tf.float32)
else:
raise ValueError('Unrecognized value for hparam ir: %s' %
model_config.hparams.ir)
with tf.name_scope('eval_summaries'):
# Summarize the outputs:
if len(prediction.structured_implicit.tensor_list) == 3:
constants, centers, radii = prediction.structured_implicit.tensor_list
iparams = None
elif len(prediction.structured_implicit.tensor_list) == 4:
constants, centers, radii, iparams = (
prediction.structured_implicit.tensor_list)
mean_radius = tf.reduce_mean(radii)
tf.summary.scalar('%s-mean_radius' % training_example.split, mean_radius)
mean_center = tf.reduce_mean(centers)
tf.summary.scalar('%s-mean_center' % training_example.split, mean_center)
mean_constant = tf.reduce_mean(constants)
tf.summary.scalar('%s-mean_constant' % training_example.split,
mean_constant)
if iparams is not None:
tf.summary.scalar('%s-mean_abs_iparams' % training_example.split,
tf.reduce_mean(tf.abs(iparams)))
tf.summary.histogram('%s-iparams' % training_example.split, iparams)
tf.summary.histogram('%s-constants' % training_example.split, constants)
tf.summary.histogram('%s-centers' % training_example.split, centers)
tf.summary.histogram('%s-radii' % training_example.split, radii)
(structured_implicit_ph, samples_ph, optimization_target_ph, gradients,
render_summary_op, original_vis_ph,
iterated_render_tf) = refine.sample_gradients_wrt_placeholders(
model_config, training_example, prediction, sample_locations)
vis_count = 1
do_iterative_update = False
big_render_ph = tf.placeholder(
tf.uint8)
# [model_config.hparams.bs * vis_count, 256 * 1, 256 + 256 * 2, 4])
big_render_summary_op = tf.summary.image(
'%s-big-render' % training_example.split,
big_render_ph,
collections=[],
max_outputs=model_config.hparams.bs * vis_count)
rbf_render_at_half_ph = tf.placeholder(tf.float32)
rbf_render_at_half_summary_op = tf.summary.image(
'%s-rbf_render_at_half' % training_example.split,
rbf_render_at_half_ph,
collections=[],
max_outputs=model_config.hparams.bs * vis_count)
depth_gt_out_ph = tf.placeholder(tf.float32)
depth_gt_out_summary_op = tf.summary.image(
'%s-depth_gt_out' % training_example.split,
depth_gt_out_ph,
collections=[],
max_outputs=model_config.hparams.bs * vis_count)
# Also prefetches in case this is a property mutating the graph:
in_out_image_big = tf.image.resize_images(
prediction.in_out_image,
size=[256, 256],
align_corners=True)
tf.logging.info('in_out_image_big shape: %s',
str(in_out_image_big.get_shape().as_list()))
sample_locations, sample_gt = training_example.all_uniform_samples()
example_iou = metrics.point_iou(prediction.structured_implicit,
sample_locations, sample_gt, model_config)
iou_ph = tf.placeholder(tf.float32)
mean_iou_summary_op = tf.summary.scalar(
'%s-mean-iou' % training_example.split,
tf.reduce_mean(iou_ph),
collections=[])
iou_hist_summary_op = tf.summary.histogram(
'%s-iou-histogram' % training_example.split, iou_ph, collections=[])
def eval_step(session, global_step, desired_num_examples, eval_tag,
eval_checkpoint):
"""Runs a single eval step.
Runs over the full desired eval set, regardless of batch size.
Args:
session: A tf.Session instance.
global_step: The global step tensor.
desired_num_examples: The number of examples from the eval dataset to
evaluate.
eval_tag: A tag to specify the eval type. Defaults to 'eval'.
eval_checkpoint: The path of the checkpoint being evaluated.
Returns:
A list of tf.Summary objects computed during the eval.
"""
step_start_time = time.time()
del eval_tag, desired_num_examples
global_step_int = int(global_step)
# num_batches = max(1, desired_num_examples // model_config.hparams.bs)
big_render_images = []
all_centers_np = []
all_radii_np = []
all_constants_np = []
all_quadrics_np = []
all_iparams_np = []
all_mesh_names_np = []
all_depth_images_np = []
tf.logging.info('The eval checkpoint str is %s', eval_checkpoint)
eval_dir = '/'.join(eval_checkpoint.split('/')[:-1])
hparam_path = eval_dir + '/hparam_pickle.txt'
if not file_util.exists(hparam_path):
hparams.write_hparams(model_config.hparams, hparam_path)
output_dir = (eval_dir + '/eval-step-' + str(global_step_int) + '/')
def to_uint8(np_im):
return (np.clip(255.0 * np_im, 0, 255.0)).astype(np.uint8)
ran_count = 0
max_run_count = 500
ious = np.zeros(max_run_count, dtype=np.float32)
# Run until the end of the dataset:
for vi in range(max_run_count):
tf.logging.info('Starting eval item %i, total elapsed time is %0.2f...',
vi,
time.time() - step_start_time)
try:
vis_start_time = time.time()
if vi < vis_count:
misc_tensors_to_eval = [
model_config.summary_op, optimization_target, sample_locations,
in_out_image_big, training_example.mesh_name, example_iou,
]
np_out = session.run(misc_tensors_to_eval +
prediction.structured_implicit.tensor_list)
(summaries, optimization_target_np, samples_np, in_out_image_big_np,
mesh_names_np, example_iou_np) = np_out[:len(misc_tensors_to_eval)]
in_out_image_big_np = np.reshape(in_out_image_big_np,
[256, 256, 1])
in_out_image_big_np = image_util.get_pil_formatted_image(
in_out_image_big_np)
tf.logging.info('in_out_image_big_np shape: %s',
str(in_out_image_big_np.shape))
in_out_image_big_np = np.reshape(in_out_image_big_np,
[1, 256, 256, 4])
implicit_np_list = np_out[len(misc_tensors_to_eval):]
tf.logging.info('\tElapsed after first sess run: %0.2f',
time.time() - vis_start_time)
else:
np_out = session.run([training_example.mesh_name, example_iou] +
prediction.structured_implicit.tensor_list)
mesh_names_np = np_out[0]
example_iou_np = np_out[1]
implicit_np_list = np_out[2:]
# TODO(kgenova) It would be nice to move all this functionality into
# a numpy StructuredImplicitNp class, and hide these internals.
ious[ran_count] = example_iou_np
ran_count += 1
constants_np, centers_np, radii_np = implicit_np_list[:3]
if len(implicit_np_list) == 4:
iparams_np = implicit_np_list[3]
else:
iparams_np = None
# For now, just map to quadrics and move on:
quadrics_np = np.zeros(
[constants_np.shape[0], constants_np.shape[1], 4, 4])
quadrics_np[0, :, 3, 3] = np.reshape(constants_np[0, :], [
model_config.hparams.sc,
])
all_centers_np.append(np.copy(centers_np))
all_radii_np.append(np.copy(radii_np))
all_constants_np.append(np.copy(constants_np))
all_quadrics_np.append(np.copy(quadrics_np))
all_mesh_names_np.append(mesh_names_np)
if iparams_np is not None:
all_iparams_np.append(iparams_np)
# For most of the dataset, just do inference to get the representation.
# Everything afterwards is just for tensorboard.
if vi >= vis_count:
continue
visualize_with_marching_cubes = False
if visualize_with_marching_cubes:
# TODO(kgenova) This code is quite wrong now. If we want to enable it
# it should be rewritten to call a structured_implicit_function to
# handle evaluation (for instance the lset subtraction is bad).
marching_cubes_ims_np, output_volumes = np_util.visualize_prediction(
quadrics_np,
centers_np,
radii_np,
renormalize=model_config.hparams.pou == 't',
thresh=model_config.hparams.lset)
tf.logging.info('\tElapsed after first visualize_prediction: %0.2f',
time.time() - vis_start_time)
offset_marching_cubes_ims_np, _ = np_util.visualize_prediction(
quadrics_np,
centers_np,
radii_np,
renormalize=model_config.hparams.pou == 't',
thresh=0.1,
input_volumes=output_volumes)
tf.logging.info('\tElapsed after second visualize_prediction: %0.2f',
time.time() - vis_start_time)
tf.logging.info('About to concatenate shapes: %s, %s, %s',
str(in_out_image_big_np.shape),
str(marching_cubes_ims_np.shape),
str(offset_marching_cubes_ims_np.shape))
in_out_image_big_np = np.concatenate([
in_out_image_big_np, marching_cubes_ims_np,
offset_marching_cubes_ims_np
],
axis=2)
if do_iterative_update:
# This code will fail (it's left unasserted to give a helpful tf error
# message). The tensor it creates will now be the wrong size.
render_summary, iterated_render_np = refine.refine(
structured_implicit_ph, optimization_target_ph, samples_ph,
original_vis_ph, gradients, implicit_np_list,
optimization_target_np, samples_np, in_out_image_big_np, session,
render_summary_op, iterated_render_tf)
render_summary = [render_summary]
in_out_with_iterated = np.concatenate(
[in_out_image_big_np, iterated_render_np], axis=2)
big_render_images.append(to_uint8(in_out_with_iterated))
else:
big_render_images.append(to_uint8(in_out_image_big_np))
# TODO(kgenova) Is this really the depth image?
depth_image_np = in_out_image_big_np[:, :, :256, :]
all_depth_images_np.append(depth_image_np)
render_summary = []
except tf.errors.OutOfRangeError:
break
tf.logging.info('Elapsed after vis loop: %0.2f',
time.time() - step_start_time)
ious = ious[:ran_count]
mean_iou_summary, iou_hist_summary = session.run(
[mean_iou_summary_op, iou_hist_summary_op], feed_dict={iou_ph: ious})
all_centers_np = np.concatenate(all_centers_np)
all_radii_np = np.concatenate(all_radii_np)
all_constants_np = np.concatenate(all_constants_np)
all_quadrics_np = np.concatenate(all_quadrics_np)
all_mesh_names_np = np.concatenate(all_mesh_names_np)
all_depth_images_np = np.concatenate(all_depth_images_np)
if all_iparams_np:
all_iparams_np = np.concatenate(all_iparams_np)
file_util.mkdir(output_dir, exist_ok=True)
file_util.write_np(
'%s/%s-constants.npy' % (output_dir, training_example.split),
all_constants_np)
file_util.write_np(
'%s/%s-quadrics.npy' % (output_dir, training_example.split),
all_quadrics_np)
file_util.write_np(
'%s/%s-centers.npy' % (output_dir, training_example.split),
all_centers_np)
file_util.write_np('%s/%s-radii.npy' % (output_dir, training_example.split),
all_radii_np)
file_util.write_np(
'%s/%s-mesh_names.npy' % (output_dir, training_example.split),
all_mesh_names_np)
# We do an explicit comparison because the type of all_iparams_np might
# not be a list at this point:
# pylint: disable=g-explicit-bool-comparison
if all_iparams_np != []:
file_util.write_np(
'%s/%s-iparams.npy' % (output_dir, training_example.split),
all_iparams_np)
# Now that the full set predictions have been saved to disk, scrap
# everything after the first vis_count:
all_centers_np = all_centers_np[:vis_count, ...]
all_radii_np = all_radii_np[:vis_count, ...]
all_constants_np = all_constants_np[:vis_count, ...]
all_mesh_names_np = all_mesh_names_np[:vis_count, ...]
tf.logging.info('Elapsed after .npy save: %0.2f',
time.time() - step_start_time)
rbf_renders_at_half = np_util.plot_rbfs_at_thresh(
all_centers_np, all_radii_np, thresh=0.5)
rbf_renders_at_half_summary = session.run(
rbf_render_at_half_summary_op,
feed_dict={rbf_render_at_half_ph: rbf_renders_at_half})
tf.logging.info('Elapsed after rbf_at_half summary: %0.2f',
time.time() - step_start_time)
tf.logging.info('All depth images shape: %s',
str(all_depth_images_np.shape))
depth_gt_out_summary = session.run(
depth_gt_out_summary_op,
feed_dict={
depth_gt_out_ph:
np.concatenate([all_depth_images_np, all_depth_images_np],
axis=2)
})
tf.logging.info('Elapsed after depth_gt_out summary: %0.2f',
time.time() - step_start_time)
big_render_summary = session.run(
big_render_summary_op,
feed_dict={big_render_ph: np.concatenate(big_render_images, axis=0)})
tf.logging.info('Evaluated %d batches of size %d.', vis_count,
model_config.hparams.bs)
tf.logging.info('Elapsed at end of step: %0.2f',
time.time() - step_start_time)
return [
summaries, big_render_summary, rbf_renders_at_half_summary,
depth_gt_out_summary, mean_iou_summary, iou_hist_summary
] + render_summary
return eval_step
|
google/ldif
|
ldif/training/eval_step.py
|
Python
|
apache-2.0
| 15,487
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-06-23 03:30
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('applications', '0013_location_local_government_authority'),
]
operations = [
migrations.AddField(
model_name='application',
name='supporting_info_demonstrate_compliance_trust_policies',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='supporting_info_demonstrate_compliance_trust_policies', to='applications.Record'),
),
]
|
ropable/statdev
|
applications/migrations/0014_application_supporting_info_demonstrate_compliance_trust_policies.py
|
Python
|
apache-2.0
| 706
|
# -*- coding: utf-8 -*-
# Copyright (2018) Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# 3rd party libs
from flask import Blueprint
from flask import g
# Own libs
from oneview_redfish_toolkit.api.processor_collection \
import ProcessorCollection
from oneview_redfish_toolkit.api.resource_block_collection \
import ResourceBlockCollection
from oneview_redfish_toolkit.blueprints.util.response_builder \
import ResponseBuilder
processor_collection = Blueprint("processor_collection", __name__)
@processor_collection.route(
ResourceBlockCollection.BASE_URI + "/<uuid>/Systems/1/Processors/",
methods=["GET"])
def get_processor_collection(uuid):
"""Get the Redfish Resource Block System Processor Collection.
Get method to return Resource Block System Processor Collection JSON.
Returns:
JSON: JSON with Resource Block System Processor Collection info.
"""
server_hardware = g.oneview_client.server_hardware.get_by_id(uuid).data
processor_collection = ProcessorCollection(server_hardware)
return ResponseBuilder.success(
processor_collection,
{"ETag": "W/" + server_hardware["eTag"]})
|
HewlettPackard/oneview-redfish-toolkit
|
oneview_redfish_toolkit/blueprints/processor_collection.py
|
Python
|
apache-2.0
| 1,717
|
from typing import List
import torch
from torch import nn
class Normalize(nn.Module):
mean: torch.Tensor
std: torch.Tensor
def __init__(self, n_features: int):
super().__init__()
# initialize to NaN so the code fails hard when this layer has not been initialized
mean = torch.full([n_features], float("nan"))
std = torch.full([n_features], float("nan"))
self.register_buffer("mean", mean)
self.register_buffer("std", std)
def set_parameters(self, mean: List[float], std: List[float]) -> None:
if self.mean.size(0) != len(mean):
raise ValueError("Mean has wrong number of features!")
if self.std.size(0) != len(std):
raise ValueError("Std has wrong number of features!")
for j, channel_mean in enumerate(mean):
self.mean[j] = channel_mean
for j, channel_std in enumerate(std):
self.std[j] = channel_std
def forward(self, x: torch.Tensor) -> torch.Tensor:
return (x - self.mean[:, None, None]) / self.std[:, None, None]
|
googleinterns/out-of-distribution
|
src/modules/normalize.py
|
Python
|
apache-2.0
| 1,083
|
## Copyright 2016 Kurt Cutajar, Edwin V. Bonilla, Pietro Michiardi, Maurizio Filippone
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
import sys
sys.path.append(".")
import os
os.environ['KMP_DUPLICATE_LIB_OK']='True'
import numpy as np
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.datasets.mnist import extract_images, extract_labels
from tensorflow.python.framework import dtypes
from dataset import DataSet
import utils
import likelihoods
from dgp_rff import DgpRff
# import baselines
import losses
def process_mnist(images, dtype = dtypes.float32, reshape=True):
if reshape:
assert images.shape[3] == 1
images = images.reshape(images.shape[0],
images.shape[1] * images.shape[2])
if dtype == dtypes.float32:
# Convert from [0, 255] -> [0.0, 1.0].
images = images.astype(np.float32)
images = np.multiply(images, 1.0 / 255.0)
return images
def get_data_info(images):
rows, cols = images.shape
std = np.zeros(cols)
mean = np.zeros(cols)
for col in range(cols):
std[col] = np.std(images[:,col])
mean[col] = np.mean(images[:,col])
return mean, std
def standardize_data(images, means, stds):
data = images.copy()
rows, cols = data.shape
for col in range(cols):
if stds[col] == 0:
data[:,col] = (data[:,col] - means[col])
else:
data[:,col] = (data[:,col] - means[col]) / stds[col]
return data
def import_mnist():
"""
This import mnist and saves the data as an object of our DataSet class
:return:
"""
SOURCE_URL = 'http://yann.lecun.com/exdb/mnist/'
TRAIN_IMAGES = 'train-images-idx3-ubyte.gz'
TRAIN_LABELS = 'train-labels-idx1-ubyte.gz'
TEST_IMAGES = 't10k-images-idx3-ubyte.gz'
TEST_LABELS = 't10k-labels-idx1-ubyte.gz'
VALIDATION_SIZE = 0
ONE_HOT = True
TRAIN_DIR = 'MNIST_data'
local_file = base.maybe_download(TRAIN_IMAGES, TRAIN_DIR,
SOURCE_URL + TRAIN_IMAGES)
train_images = extract_images(open(local_file, 'rb'))
local_file = base.maybe_download(TRAIN_LABELS, TRAIN_DIR,
SOURCE_URL + TRAIN_LABELS)
train_labels = extract_labels(open(local_file, 'rb'), one_hot=ONE_HOT)
local_file = base.maybe_download(TEST_IMAGES, TRAIN_DIR,
SOURCE_URL + TEST_IMAGES)
test_images = extract_images(open(local_file, 'rb'))
local_file = base.maybe_download(TEST_LABELS, TRAIN_DIR,
SOURCE_URL + TEST_LABELS)
test_labels = extract_labels(open(local_file, 'rb'), one_hot=ONE_HOT)
validation_images = train_images[:VALIDATION_SIZE]
validation_labels = train_labels[:VALIDATION_SIZE]
train_images = train_images[VALIDATION_SIZE:]
train_labels = train_labels[VALIDATION_SIZE:]
## Process images
train_images = process_mnist(train_images)
validation_images = process_mnist(validation_images)
test_images = process_mnist(test_images)
## Standardize data
train_mean, train_std = get_data_info(train_images)
# train_images = standardize_data(train_images, train_mean, train_std)
# validation_images = standardize_data(validation_images, train_mean, train_std)
# test_images = standardize_data(test_images, train_mean, train_std)
data = DataSet(train_images, train_labels)
test = DataSet(test_images, test_labels)
val = DataSet(validation_images, validation_labels)
return data, test, val
if __name__ == '__main__':
FLAGS = utils.get_flags()
## Set random seed for tensorflow and numpy operations
tf.set_random_seed(FLAGS.seed)
np.random.seed(FLAGS.seed)
data, test, _ = import_mnist()
## Here we define a custom loss for dgp to show
error_rate = losses.ZeroOneLoss(data.Dout)
## Likelihood
like = likelihoods.Softmax()
## Optimizer
optimizer = utils.get_optimizer(FLAGS.optimizer, FLAGS.learning_rate)
## Main dgp object
dgp = DgpRff(like, data.num_examples, data.X.shape[1], data.Y.shape[1], FLAGS.nl, FLAGS.n_rff, FLAGS.df, FLAGS.kernel_type, FLAGS.kernel_arccosine_degree, FLAGS.is_ard, FLAGS.local_reparam, FLAGS.feed_forward, FLAGS.q_Omega_fixed, FLAGS.theta_fixed, FLAGS.learn_Omega)
## Learning
dgp.learn(data, FLAGS.learning_rate, FLAGS.mc_train, FLAGS.batch_size, FLAGS.n_iterations, optimizer,
FLAGS.display_step, test, FLAGS.mc_test, error_rate, FLAGS.duration, FLAGS.less_prints)
|
mauriziofilippone/deep_gp_random_features
|
code/experiments/dgp_rff_mnist.py
|
Python
|
apache-2.0
| 5,142
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import zmq
import sys
import os
import signal
import threading
try:
from PyQt4 import QtGui, QtCore, uic
except ImportError:
print("you have to have PyQt4 for your version of Python (%s) installed"
% ".".join(str(x) for x in sys.version_info))
sys.exit(-1)
class client: # (QtCore.QObject):
def __init__(self, hostname):
self._context = zmq.Context()
self._req_socket = self._context.socket(zmq.REQ)
self._req_socket.connect('tcp://%s:9876' % hostname)
self._req_poller = zmq.Poller()
self._req_poller.register(self._req_socket, zmq.POLLIN)
self._notification_handler = None
self._running = True
self._sub_thread = threading.Thread(target=self._subscriber_thread_fn)
self._sub_thread.start()
def set_notification_handler(self, handler):
assert hasattr(handler, '_on_notification')
self._notification_handler = handler
def request(self, msg):
self._req_socket.send_json(msg)
while True:
if self._req_poller.poll(1000) == []:
print('server timeout!')
continue
break
reply = self._req_socket.recv_json()
self._notification_handler._on_message("reply: %s" % reply)
return reply
def shutdown(self):
print('shutdown client..')
self._running = False
self._sub_thread.join()
self._req_socket.close()
self._context.term()
print('ready!')
def _subscriber_thread_fn(self):
print("connect to broadcasts")
_sub_socket = self._context.socket(zmq.SUB)
_sub_socket.connect('tcp://127.0.0.1:9875')
_sub_socket.setsockopt(zmq.SUBSCRIBE, b"")
_sub_poller = zmq.Poller()
_sub_poller.register(_sub_socket, zmq.POLLIN)
while True:
if not self._running:
break
if _sub_poller.poll(200) == []:
continue
_msg = _sub_socket.recv_json()
print('published "%s"' % _msg)
if self._notification_handler:
self._notification_handler._on_notification(_msg)
_sub_socket.close()
print("disconnect from broadcasts")
class yousched_ui(QtGui.QMainWindow):
def __init__(self):
QtGui.QMainWindow.__init__(self)
uic.loadUi(os.path.join(os.path.dirname(__file__), 'rrplayer.ui'), self)
self._client = client('127.0.0.1')
self._client.set_notification_handler(self)
self.pb_play.clicked.connect(self.on_pb_play_Clicked)
self.pb_skip.clicked.connect(self.on_pb_skip_Clicked)
self.pb_upvote.clicked.connect(self.on_pb_upvote_Clicked)
self.pb_ban.clicked.connect(self.on_pb_ban_Clicked)
self.pb_stop.clicked.connect(self.on_pb_stop_Clicked)
self.pb_add.clicked.connect(self.on_pb_add_Clicked)
# should be done by client
_result = self._client.request({'type': 'hello', 'name': 'frans'})
if 'current_track' in _result:
self._update_track_info(_result['current_track'])
def _update_track_info(self, filename):
self.lbl_current_track.setText(os.path.basename(filename))
def _on_notification(self, msg):
QtCore.QMetaObject.invokeMethod(
self, "_on_server_notification",
QtCore.Qt.QueuedConnection,
QtCore.Q_ARG(dict, msg))
def _on_message(self, msg):
QtCore.QMetaObject.invokeMethod(
self, "_message_out",
QtCore.Qt.QueuedConnection,
QtCore.Q_ARG(str, msg))
@QtCore.pyqtSlot(dict)
def _on_server_notification(self, msg):
self._message_out(msg)
if 'track' in msg:
self._update_track_info(msg['track'])
@QtCore.pyqtSlot(str)
def _message_out(self, msg):
self.lst_messages.addItem(str(msg))
self.lst_messages.scrollToBottom()
print(msg)
def on_pb_play_Clicked(self):
print('play')
self._client.request({'type': 'play'})
def on_pb_skip_Clicked(self):
print('skip')
self._client.request({'type': 'skip'})
def on_pb_upvote_Clicked(self):
print('upvote')
self._client.request({'type': 'upvote'})
def on_pb_ban_Clicked(self):
print('ban')
self._client.request({'type': 'ban'})
def on_pb_stop_Clicked(self):
print('stop')
self._client.request({'type': 'stop'})
def on_pb_add_Clicked(self):
print('add %s' % self.le_add.text())
self._client.request(
{'type': 'add',
'url': str(self.le_add.text())})
def closeEvent(self, event):
self._client.shutdown()
if __name__ == '__main__':
print("Python info: %s, %s" % (
'.'.join((str(e) for e in sys.version_info)),
sys.executable))
app = QtGui.QApplication(sys.argv)
ex = yousched_ui()
ex.show()
sys.exit(app.exec_())
|
frans-fuerst/music_scheduler
|
client/pyclient.py
|
Python
|
apache-2.0
| 5,018
|
import re,os
from collections import namedtuple
def check_comment(full_string,index):
#Check if index is in a comment line or not
is_comment = 0
for i in range(index,0):
if full_string[i] == "\n":
break
if full_string[i] == "#":
is_comment = 1;
return is_comment
def find_all(a_string, sub):
result = []
k = 0
while k < len(a_string):
k = a_string.find(sub, k)
if k == -1:
return result
else:
result.append(k)
k += 1 #change to k += len(sub) to not search overlapping results
return result
def check_variable_in_used(func_body,variable):
list_index = find_all(func_body,variable)
if len(list_index) == 0:
return 0 #Not use
for index in list_index:
if check_comment(func_body,index) == 1:
continue
index3 = index + 1
for index2 in range(index+1, len(func_body)):
char = func_body[index2]
if (char.isalnum() or char == '_'):
index3 = index2
else:
break
var_name = func_body[index:index3 + 1]
if var_name == variable:
return 1
return 0
def get_log(log_line):
m = re.search("write_log(.*?);",log_line)
if m != None:
log = m.group(1)
log = log.replace('\",\"',' ');
log = log.replace('(\"','')
log = log.replace(')\"','')
log = log.replace(')','')
log = log.replace('(','')
log = log.replace('".','')
return log
return ""
def get_return(func_body):
result=[]
lines = func_body.split('\n')
p= re.compile('^return (.*?)')
print len(lines)
ReturnValue = namedtuple('ReturnValue', 'value log')
log=""
pre_line=""
for line in lines:
line = line.replace('\t','')
m= re.search('return (.*?);',line)
#print m.group()
#p = re.compile('(a)b')
#m = p.match('ab')
if m != None:
#print m.group(1)
a =ReturnValue(value=m.group(1), log=get_log(pre_line))
print a
#result.append(m.group(1))
result.append(a)
pre_line = line
return result
def get_var(func_body):
result = []
lines = func_body.split('\n')
for line in lines:
line = line.replace('\t','')
m = re.search('my \((.*?)\) = @',line)
if m != None:
var= m.group(1)
for v in var.split(','):
result.append(v)
return result
perl_file = "utilities.pm"
perl_file_outout="out_"+perl_file
with open(perl_file, 'r') as myfile:
data=myfile.read()
full_statement = ""
with open(perl_file, 'r') as myfile:
data=myfile.readlines()
for line in data:
line2 = line.strip()
#if(not line2.startswith("#")):
full_statement = full_statement + line
count = 0
start_block = 0
start_index = 0
finish_index = 0
list_body = []
list_function_name = []
list_start_index = []
for i in range(0, len(full_statement)):
if (full_statement[i] == "{" and check_comment(full_statement,i) == 0):
if count == 0:
start_block = 1
start_index = i + 1
beginning_to_func_name = full_statement[finish_index:i]
beginning_to_func_name = beginning_to_func_name.rstrip()
func_name_token = re.split('\s+', beginning_to_func_name)
func_name = func_name_token[-1]
list_function_name.append(func_name)
count = count + 1
if (full_statement[i] == "}" and check_comment(full_statement,i) == 0):
count = count - 1
if (count == 0):
start_block = 0
finish_index = i
print start_index
print finish_index
block = full_statement[start_index:finish_index]
list_body.append(block)
string_global1 = """%management_params, @list_master_ldisk, @list_backup_ldisk, @list_datastore_ldisk,
@list_rdm_ldisk, @created_lock_files, %ip_wwns, %original_datastores_name,
%original_datastores_ldisk, %current_datastores_name, %backup_datastores_name,
$current_vm_name, %target_vm_info, %target_datastore_info, %current_vm_info,
@list_preserved_ldisk_rdms, %master_backup_ldisk, @list_preserved_files,
@list_preserved_directory, $current_config_path, $preserved_config_path,
$current_datastore_name, @list_vms_stop, %preserved_vms_info, %preserved_datastore_info,
@list_vms_remove, @list_moved_files, @list_unregistered_vms, %vm_name_config_path,
$restore_preserved_datastore, %preserved_mapping_file_ldisk, $host_ip_current_vm,
$return_code,"""
string_global2 = """$job_id,$job_name, $seq_num, $v_retry_int, $v_retry_num, $concurrency, $quiesce,
$backup_method, $backup_type, $retention, $datacenter, $backup_home, $tsu_home,$tsr_home,
$job_file, $config_file, $vcenter_ip, $vi_user, $cred_file, @storage_ips,
$is_preserved, $target_name, $current_num, $management_file, $lock_home,"""
list_variable_meaning = {'$a':'this is test variable','$b':'var second'}
############## Init variable ########
string_global = string_global1 + string_global2
string_tokens = string_global.split(",")
list_global = []
for string in string_tokens:
string = string.strip()
if string != "":
list_global.append(string)
list_global_result = {}
for index in range(len(list_body)):
all_global = []
body = list_body[index]
func_name = list_function_name[index]
for global_variable in list_global:
print global_variable
if check_variable_in_used(body,global_variable) == 1:
all_global.append(global_variable)
continue
global_variable2 = ""
if global_variable[0] != "$":
global_variable2 = global_variable[1:]
global_variable2 = "$" + global_variable2
if check_variable_in_used(body,global_variable2) == 1:
all_global.append(global_variable)
list_global_result[func_name] = all_global
try:
os.remove(perl_file_outout)
except OSError:
pass
text_file = open(perl_file_outout, "a+")
for index in range(len(list_body)):
body = list_body[index]
func_name = list_function_name[index]
list_global_val = list_global_result[func_name]
#text_file.write("######################################################\n\n")
text_file.write("#** @function public " + func_name)
text_file.write("\n# Brief Description: ")
text_file.write("\n#")
text_file.write("\n# Detail Description: ")
text_file.write("\n#\n#\n#")
text_file.write("\n# Input Parameter: ")
for global_var in list_global_val:
if (global_var[0] == "$"):
tmp_var = "scalar " + global_var[1:]
elif (global_var[0] == "@"):
tmp_var = "array " + global_var[1:]
else:
tmp_var = "hash " + global_var[1:]
text_file.write("\n# @params " + tmp_var + " (global):")
my_var = get_var(body)
for var in my_var:
print var
text_file.write("\n# @params "+var+" :")
des = list_variable_meaning.get(var)
if des != None:
text_file.write(list_variable_meaning.get(var)+".")
text_file.write("\n# Output:")
return_val=get_return(body)
for val in return_val:
text_file.write("\n# @retval value "+val.value)
if val.log != '':
text_file.write(' and write message "'+val.log+'" to log file')
text_file.write('.')
text_file.write("\n#*")
#text_file.write("\n######################################################\n")
text_file.write("\n\nsub " + func_name + " {")
text_file.write(body + "}\n\n")
text_file.close()
|
phunghv/mesos-framework-demo
|
change_comment.py
|
Python
|
apache-2.0
| 6,877
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
IMPORTANT: This code is taken directly from Tensorflow
(https://github.com/tensorflow/tensorflow) and is copied temporarily
until it is available in a packaged Tensorflow version on pypi.
TODO(dennybritz): Delete this code when it becomes available in TF.
A library of helpers for use with SamplingDecoders.
"""
# pylint: skip-file
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.layers import base as layers_base
from tensorflow.python.ops.distributions import bernoulli
from tensorflow.python.ops.distributions import categorical
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.util import nest
from seq2seq.contrib.seq2seq import decoder
__all__ = [
"Helper",
"TrainingHelper",
"GreedyEmbeddingHelper",
"CustomHelper",
"ScheduledEmbeddingTrainingHelper",
"ScheduledOutputTrainingHelper",
]
_transpose_batch_time = decoder._transpose_batch_time # pylint: disable=protected-access
def _unstack_ta(inp):
return tensor_array_ops.TensorArray(
dtype=inp.dtype, size=array_ops.shape(inp)[0],
element_shape=inp.get_shape()[1:]).unstack(inp)
@six.add_metaclass(abc.ABCMeta)
class Helper(object):
"""Helper interface. Helper instances are used by SamplingDecoder."""
@abc.abstractproperty
def batch_size(self):
"""Returns a scalar int32 tensor."""
raise NotImplementedError("batch_size has not been implemented")
@abc.abstractmethod
def initialize(self, name=None):
"""Returns `(initial_finished, initial_inputs)`."""
pass
@abc.abstractmethod
def sample(self, time, outputs, state, name=None):
"""Returns `sample_ids`."""
pass
@abc.abstractmethod
def next_inputs(self, time, outputs, state, sample_ids, name=None):
"""Returns `(finished, next_inputs, next_state)`."""
pass
class CustomHelper(Helper):
"""Base abstract class that allows the user to customize sampling."""
def __init__(self, initialize_fn, sample_fn, next_inputs_fn):
"""Initializer.
Args:
initialize_fn: callable that returns `(finished, next_inputs)`
for the first iteration.
sample_fn: callable that takes `(time, outputs, state)`
and emits tensor `sample_ids`.
next_inputs_fn: callable that takes `(time, outputs, state, sample_ids)`
and emits `(finished, next_inputs, next_state)`.
"""
self._initialize_fn = initialize_fn
self._sample_fn = sample_fn
self._next_inputs_fn = next_inputs_fn
self._batch_size = None
@property
def batch_size(self):
if self._batch_size is None:
raise ValueError("batch_size accessed before initialize was called")
return self._batch_size
def initialize(self, name=None):
with ops.name_scope(name, "%sInitialize" % type(self).__name__):
(finished, next_inputs) = self._initialize_fn()
if self._batch_size is None:
self._batch_size = array_ops.size(finished)
return (finished, next_inputs)
def sample(self, time, outputs, state, name=None):
with ops.name_scope(
name, "%sSample" % type(self).__name__, (time, outputs, state)):
return self._sample_fn(time=time, outputs=outputs, state=state)
def next_inputs(self, time, outputs, state, sample_ids, name=None):
with ops.name_scope(
name, "%sNextInputs" % type(self).__name__, (time, outputs, state)):
return self._next_inputs_fn(
time=time, outputs=outputs, state=state, sample_ids=sample_ids)
class TrainingHelper(Helper):
"""A helper for use during training. Only reads inputs.
Returned sample_ids are the argmax of the RNN output logits.
"""
def __init__(self, inputs, sequence_length, time_major=False, name=None):
"""Initializer.
Args:
inputs: A (structure of) input tensors.
sequence_length: An int32 vector tensor.
time_major: Python bool. Whether the tensors in `inputs` are time major.
If `False` (default), they are assumed to be batch major.
name: Name scope for any created operations.
Raises:
ValueError: if `sequence_length` is not a 1D tensor.
"""
with ops.name_scope(name, "TrainingHelper", [inputs, sequence_length]):
inputs = ops.convert_to_tensor(inputs, name="inputs")
if not time_major:
inputs = nest.map_structure(_transpose_batch_time, inputs)
self._input_tas = nest.map_structure(_unstack_ta, inputs)
self._sequence_length = ops.convert_to_tensor(
sequence_length, name="sequence_length")
if self._sequence_length.get_shape().ndims != 1:
raise ValueError(
"Expected sequence_length to be a vector, but received shape: %s" %
self._sequence_length.get_shape())
self._zero_inputs = nest.map_structure(
lambda inp: array_ops.zeros_like(inp[0, :]), inputs)
self._batch_size = array_ops.size(sequence_length)
@property
def batch_size(self):
return self._batch_size
def initialize(self, name=None):
with ops.name_scope(name, "TrainingHelperInitialize"):
finished = math_ops.equal(0, self._sequence_length)
all_finished = math_ops.reduce_all(finished)
next_inputs = control_flow_ops.cond(
all_finished, lambda: self._zero_inputs,
lambda: nest.map_structure(lambda inp: inp.read(0), self._input_tas))
return (finished, next_inputs)
def sample(self, time, outputs, name=None, **unused_kwargs):
with ops.name_scope(name, "TrainingHelperSample", [time, outputs]):
sample_ids = math_ops.cast(
math_ops.argmax(outputs, axis=-1), dtypes.int32)
return sample_ids
def next_inputs(self, time, outputs, state, name=None, **unused_kwargs):
"""next_inputs_fn for TrainingHelper."""
with ops.name_scope(name, "TrainingHelperNextInputs",
[time, outputs, state]):
next_time = time + 1
finished = (next_time >= self._sequence_length)
all_finished = math_ops.reduce_all(finished)
def read_from_ta(inp):
return inp.read(next_time)
next_inputs = control_flow_ops.cond(
all_finished, lambda: self._zero_inputs,
lambda: nest.map_structure(read_from_ta, self._input_tas))
return (finished, next_inputs, state)
class ScheduledEmbeddingTrainingHelper(TrainingHelper):
"""A training helper that adds scheduled sampling.
Returns -1s for sample_ids where no sampling took place; valid sample id
values elsewhere.
"""
def __init__(self, inputs, sequence_length, embedding, sampling_probability,
time_major=False, seed=None, scheduling_seed=None, name=None):
"""Initializer.
Args:
inputs: A (structure of) input tensors.
sequence_length: An int32 vector tensor.
embedding: A callable that takes a vector tensor of `ids` (argmax ids),
or the `params` argument for `embedding_lookup`.
sampling_probability: A 0D `float32` tensor: the probability of sampling
categorically from the output ids instead of reading directly from the
inputs.
time_major: Python bool. Whether the tensors in `inputs` are time major.
If `False` (default), they are assumed to be batch major.
seed: The sampling seed.
scheduling_seed: The schedule decision rule sampling seed.
name: Name scope for any created operations.
Raises:
ValueError: if `sampling_probability` is not a scalar or vector.
"""
with ops.name_scope(name, "ScheduledEmbeddingSamplingWrapper",
[embedding, sampling_probability]):
if callable(embedding):
self._embedding_fn = embedding
else:
self._embedding_fn = (
lambda ids: embedding_ops.embedding_lookup(embedding, ids))
self._sampling_probability = ops.convert_to_tensor(
sampling_probability, name="sampling_probability")
if self._sampling_probability.get_shape().ndims not in (0, 1):
raise ValueError(
"sampling_probability must be either a scalar or a vector. "
"saw shape: %s" % (self._sampling_probability.get_shape()))
self._seed = seed
self._scheduling_seed = scheduling_seed
super(ScheduledEmbeddingTrainingHelper, self).__init__(
inputs=inputs,
sequence_length=sequence_length,
time_major=time_major,
name=name)
def initialize(self, name=None):
return super(ScheduledEmbeddingTrainingHelper, self).initialize(name=name)
def sample(self, time, outputs, state, name=None):
with ops.name_scope(name, "ScheduledEmbeddingTrainingHelperSample",
[time, outputs, state]):
# Return -1s where we did not sample, and sample_ids elsewhere
select_sample_noise = random_ops.random_uniform(
[self.batch_size], seed=self._scheduling_seed)
select_sample = (self._sampling_probability > select_sample_noise)
sample_id_sampler = categorical.Categorical(logits=outputs)
return array_ops.where(
select_sample,
sample_id_sampler.sample(seed=self._seed),
array_ops.tile([-1], [self.batch_size]))
def next_inputs(self, time, outputs, state, sample_ids, name=None):
with ops.name_scope(name, "ScheduledEmbeddingTrainingHelperSample",
[time, outputs, state, sample_ids]):
(finished, base_next_inputs, state) = (
super(ScheduledEmbeddingTrainingHelper, self).next_inputs(
time=time,
outputs=outputs,
state=state,
sample_ids=sample_ids,
name=name))
def maybe_sample():
"""Perform scheduled sampling."""
where_sampling = math_ops.cast(
array_ops.where(sample_ids > -1), dtypes.int32)
where_not_sampling = math_ops.cast(
array_ops.where(sample_ids <= -1), dtypes.int32)
where_sampling_flat = array_ops.reshape(where_sampling, [-1])
where_not_sampling_flat = array_ops.reshape(where_not_sampling, [-1])
sample_ids_sampling = array_ops.gather(sample_ids, where_sampling_flat)
inputs_not_sampling = array_ops.gather(
base_next_inputs, where_not_sampling_flat)
sampled_next_inputs = self._embedding_fn(sample_ids_sampling)
base_shape = array_ops.shape(base_next_inputs)
return (array_ops.scatter_nd(indices=where_sampling,
updates=sampled_next_inputs,
shape=base_shape)
+ array_ops.scatter_nd(indices=where_not_sampling,
updates=inputs_not_sampling,
shape=base_shape))
all_finished = math_ops.reduce_all(finished)
next_inputs = control_flow_ops.cond(
all_finished, lambda: base_next_inputs, maybe_sample)
return (finished, next_inputs, state)
class ScheduledOutputTrainingHelper(TrainingHelper):
"""A training helper that adds scheduled sampling directly to outputs.
Returns False for sample_ids where no sampling took place; True elsewhere.
"""
def __init__(self, inputs, sequence_length, sampling_probability,
time_major=False, seed=None, next_input_layer=None,
auxiliary_inputs=None, name=None):
"""Initializer.
Args:
inputs: A (structure) of input tensors.
sequence_length: An int32 vector tensor.
sampling_probability: A 0D `float32` tensor: the probability of sampling
from the outputs instead of reading directly from the inputs.
time_major: Python bool. Whether the tensors in `inputs` are time major.
If `False` (default), they are assumed to be batch major.
seed: The sampling seed.
next_input_layer: (Optional) An instance of `tf.layers.Layer`, i.e.,
`tf.layers.Dense`. Optional layer to apply to the RNN output to create
the next input.
auxiliary_inputs: An optional (structure of) auxiliary input tensors with
a shape that matches `inputs` in all but (potentially) the final
dimension. These tensors will be concatenated to the sampled output or
the `inputs` when not sampling for use as the next input.
name: Name scope for any created operations.
Raises:
ValueError: if `sampling_probability` is not a scalar or vector.
"""
with ops.name_scope(name, "ScheduledOutputTrainingHelper",
[inputs, auxiliary_inputs, sampling_probability]):
self._sampling_probability = ops.convert_to_tensor(
sampling_probability, name="sampling_probability")
if self._sampling_probability.get_shape().ndims not in (0, 1):
raise ValueError(
"sampling_probability must be either a scalar or a vector. "
"saw shape: %s" % (self._sampling_probability.get_shape()))
if auxiliary_inputs is None:
maybe_concatenated_inputs = inputs
else:
inputs = ops.convert_to_tensor(inputs, name="inputs")
auxiliary_inputs = ops.convert_to_tensor(
auxiliary_inputs, name="auxiliary_inputs")
maybe_concatenated_inputs = nest.map_structure(
lambda x, y: array_ops.concat((x, y), -1),
inputs, auxiliary_inputs)
if not time_major:
auxiliary_inputs = nest.map_structure(
_transpose_batch_time, auxiliary_inputs)
self._auxiliary_input_tas = (
nest.map_structure(_unstack_ta, auxiliary_inputs)
if auxiliary_inputs is not None else None)
self._seed = seed
if (next_input_layer is not None and not isinstance(next_input_layer,
layers_base._Layer)): # pylint: disable=protected-access
raise TypeError("next_input_layer must be a Layer, received: %s" %
type(next_input_layer))
self._next_input_layer = next_input_layer
super(ScheduledOutputTrainingHelper, self).__init__(
inputs=maybe_concatenated_inputs,
sequence_length=sequence_length,
time_major=time_major,
name=name)
def initialize(self, name=None):
return super(ScheduledOutputTrainingHelper, self).initialize(name=name)
def sample(self, time, outputs, state, name=None):
with ops.name_scope(name, "ScheduledOutputTrainingHelperSample",
[time, outputs, state]):
sampler = bernoulli.Bernoulli(probs=self._sampling_probability)
return math_ops.cast(
sampler.sample(sample_shape=self.batch_size, seed=self._seed),
dtypes.bool)
def next_inputs(self, time, outputs, state, sample_ids, name=None):
with ops.name_scope(name, "ScheduledOutputTrainingHelperNextInputs",
[time, outputs, state, sample_ids]):
(finished, base_next_inputs, state) = (
super(ScheduledOutputTrainingHelper, self).next_inputs(
time=time,
outputs=outputs,
state=state,
sample_ids=sample_ids,
name=name))
def maybe_sample():
"""Perform scheduled sampling."""
def maybe_concatenate_auxiliary_inputs(outputs_, indices=None):
"""Concatenate outputs with auxiliary inputs, if they exist."""
if self._auxiliary_input_tas is None:
return outputs_
next_time = time + 1
auxiliary_inputs = nest.map_structure(
lambda ta: ta.read(next_time), self._auxiliary_input_tas)
if indices is not None:
auxiliary_inputs = array_ops.gather_nd(auxiliary_inputs, indices)
return nest.map_structure(
lambda x, y: array_ops.concat((x, y), -1),
outputs_, auxiliary_inputs)
if self._next_input_layer is None:
return array_ops.where(
sample_ids, maybe_concatenate_auxiliary_inputs(outputs),
base_next_inputs)
where_sampling = math_ops.cast(
array_ops.where(sample_ids), dtypes.int32)
where_not_sampling = math_ops.cast(
array_ops.where(math_ops.logical_not(sample_ids)), dtypes.int32)
outputs_sampling = array_ops.gather_nd(outputs, where_sampling)
inputs_not_sampling = array_ops.gather_nd(base_next_inputs,
where_not_sampling)
sampled_next_inputs = maybe_concatenate_auxiliary_inputs(
self._next_input_layer(outputs_sampling), where_sampling)
base_shape = array_ops.shape(base_next_inputs)
return (array_ops.scatter_nd(indices=where_sampling,
updates=sampled_next_inputs,
shape=base_shape)
+ array_ops.scatter_nd(indices=where_not_sampling,
updates=inputs_not_sampling,
shape=base_shape))
all_finished = math_ops.reduce_all(finished)
next_inputs = control_flow_ops.cond(
all_finished, lambda: base_next_inputs, maybe_sample)
return (finished, next_inputs, state)
class GreedyEmbeddingHelper(Helper):
"""A helper for use during inference.
Uses the argmax of the output (treated as logits) and passes the
result through an embedding layer to get the next input.
"""
def __init__(self, embedding, start_tokens, end_token):
"""Initializer.
Args:
embedding: A callable that takes a vector tensor of `ids` (argmax ids),
or the `params` argument for `embedding_lookup`.
start_tokens: `int32` vector shaped `[batch_size]`, the start tokens.
end_token: `int32` scalar, the token that marks end of decoding.
Raises:
ValueError: if `sequence_length` is not a 1D tensor.
"""
if callable(embedding):
self._embedding_fn = embedding
else:
self._embedding_fn = (
lambda ids: embedding_ops.embedding_lookup(embedding, ids))
self._start_tokens = ops.convert_to_tensor(
start_tokens, dtype=dtypes.int32, name="start_tokens")
self._end_token = ops.convert_to_tensor(
end_token, dtype=dtypes.int32, name="end_token")
if self._start_tokens.get_shape().ndims != 1:
raise ValueError("start_tokens must be a vector")
self._batch_size = array_ops.size(start_tokens)
if self._end_token.get_shape().ndims != 0:
raise ValueError("end_token must be a scalar")
self._start_inputs = self._embedding_fn(self._start_tokens)
@property
def batch_size(self):
return self._batch_size
def initialize(self, name=None):
finished = array_ops.tile([False], [self._batch_size])
return (finished, self._start_inputs)
def sample(self, time, outputs, state, name=None):
"""sample for GreedyEmbeddingHelper."""
del time, state # unused by sample_fn
# Outputs are logits, use argmax to get the most probable id
if not isinstance(outputs, ops.Tensor):
raise TypeError("Expected outputs to be a single Tensor, got: %s" %
type(outputs))
sample_ids = math_ops.cast(
math_ops.argmax(outputs, axis=-1), dtypes.int32)
return sample_ids
def next_inputs(self, time, outputs, state, sample_ids, name=None):
"""next_inputs_fn for GreedyEmbeddingHelper."""
del time, outputs # unused by next_inputs_fn
finished = math_ops.equal(sample_ids, self._end_token)
all_finished = math_ops.reduce_all(finished)
next_inputs = control_flow_ops.cond(
all_finished,
# If we're finished, the next_inputs value doesn't matter
lambda: self._start_inputs,
lambda: self._embedding_fn(sample_ids))
return (finished, next_inputs, state)
|
chunfengh/seq2seq
|
seq2seq/contrib/seq2seq/helper.py
|
Python
|
apache-2.0
| 20,836
|
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
To run these tests against a live database:
1. Modify the file ``keystone/tests/backend_sql.conf`` to use the connection
for your live database
2. Set up a blank, live database
3. Run the tests using::
tox keystone.tests.test_sql_upgrade
WARNING::
Your database will be wiped.
Do not do this against a database with valuable data as all data will be
lost.
"""
import copy
import json
import uuid
from migrate.versioning import api as versioning_api
import sqlalchemy
import sqlalchemy.exc
from keystone.assignment.backends import sql as assignment_sql
from keystone.common import sql
from keystone.common.sql import migration_helpers
from keystone import config
from keystone.contrib import federation
from keystone import exception
from keystone.openstack.common.db import exception as db_exception
from keystone.openstack.common.db.sqlalchemy import migration
from keystone.openstack.common.db.sqlalchemy import session as db_session
from keystone import tests
from keystone.tests import default_fixtures
CONF = config.CONF
DEFAULT_DOMAIN_ID = CONF.identity.default_domain_id
# NOTE(morganfainberg): This should be updated when each DB migration collapse
# is done to mirror the expected structure of the DB in the format of
# { <DB_TABLE_NAME>: [<COLUMN>, <COLUMN>, ...], ... }
INITIAL_TABLE_STRUCTURE = {
'credential': [
'id', 'user_id', 'project_id', 'blob', 'type', 'extra',
],
'domain': [
'id', 'name', 'enabled', 'extra',
],
'endpoint': [
'id', 'legacy_endpoint_id', 'interface', 'region', 'service_id', 'url',
'extra',
],
'group': [
'id', 'domain_id', 'name', 'description', 'extra',
],
'group_domain_metadata': [
'group_id', 'domain_id', 'data',
],
'group_project_metadata': [
'group_id', 'project_id', 'data',
],
'policy': [
'id', 'type', 'blob', 'extra',
],
'project': [
'id', 'name', 'extra', 'description', 'enabled', 'domain_id',
],
'role': [
'id', 'name', 'extra',
],
'service': [
'id', 'type', 'extra',
],
'token': [
'id', 'expires', 'extra', 'valid', 'trust_id', 'user_id',
],
'trust': [
'id', 'trustor_user_id', 'trustee_user_id', 'project_id',
'impersonation', 'deleted_at', 'expires_at', 'extra',
],
'trust_role': [
'trust_id', 'role_id',
],
'user': [
'id', 'name', 'extra', 'password', 'enabled', 'domain_id',
'default_project_id',
],
'user_domain_metadata': [
'user_id', 'domain_id', 'data',
],
'user_group_membership': [
'user_id', 'group_id',
],
'user_project_metadata': [
'user_id', 'project_id', 'data',
],
}
class SqlMigrateBase(tests.SQLDriverOverrides, tests.TestCase):
def initialize_sql(self):
self.metadata = sqlalchemy.MetaData()
self.metadata.bind = self.engine
def config_files(self):
config_files = super(SqlMigrateBase, self).config_files()
config_files.append(tests.dirs.tests_conf('backend_sql.conf'))
return config_files
def repo_package(self):
return sql
def setUp(self):
super(SqlMigrateBase, self).setUp()
conn_str = CONF.database.connection
if (conn_str != tests.IN_MEM_DB_CONN_STRING and
conn_str.startswith('sqlite') and
conn_str[10:] == tests.DEFAULT_TEST_DB_FILE):
# Override the default with a DB that is specific to the migration
# tests only if the DB Connection string is the same as the global
# default. This is required so that no conflicts occur due to the
# global default DB already being under migrate control. This is
# only needed if the DB is not-in-memory
db_file = tests.dirs.tmp('keystone_migrate_test.db')
self.config_fixture.config(
group='database',
connection='sqlite:///%s' % db_file)
# create and share a single sqlalchemy engine for testing
self.engine = sql.get_engine()
self.Session = db_session.get_maker(self.engine, autocommit=False)
self.initialize_sql()
self.repo_path = migration_helpers.find_migrate_repo(
self.repo_package())
self.schema = versioning_api.ControlledSchema.create(
self.engine,
self.repo_path, self.initial_db_version)
# auto-detect the highest available schema version in the migrate_repo
self.max_version = self.schema.repository.version().version
def tearDown(self):
sqlalchemy.orm.session.Session.close_all()
meta = sqlalchemy.MetaData()
meta.bind = self.engine
meta.reflect(self.engine)
for table in list(meta.tables.keys()):
table = sqlalchemy.Table(table, meta, autoload=True)
table.drop(self.engine, checkfirst=True)
sql.cleanup()
super(SqlMigrateBase, self).tearDown()
def select_table(self, name):
table = sqlalchemy.Table(name,
self.metadata,
autoload=True)
s = sqlalchemy.select([table])
return s
def assertTableExists(self, table_name):
try:
self.select_table(table_name)
except sqlalchemy.exc.NoSuchTableError:
raise AssertionError('Table "%s" does not exist' % table_name)
def assertTableDoesNotExist(self, table_name):
"""Asserts that a given table exists cannot be selected by name."""
# Switch to a different metadata otherwise you might still
# detect renamed or dropped tables
try:
temp_metadata = sqlalchemy.MetaData()
temp_metadata.bind = self.engine
sqlalchemy.Table(table_name, temp_metadata, autoload=True)
except sqlalchemy.exc.NoSuchTableError:
pass
else:
raise AssertionError('Table "%s" already exists' % table_name)
def upgrade(self, *args, **kwargs):
self._migrate(*args, **kwargs)
def downgrade(self, *args, **kwargs):
self._migrate(*args, downgrade=True, **kwargs)
def _migrate(self, version, repository=None, downgrade=False,
current_schema=None):
repository = repository or self.repo_path
err = ''
version = versioning_api._migrate_version(self.schema,
version,
not downgrade,
err)
if not current_schema:
current_schema = self.schema
changeset = current_schema.changeset(version)
for ver, change in changeset:
self.schema.runchange(ver, change, changeset.step)
self.assertEqual(self.schema.version, version)
def assertTableColumns(self, table_name, expected_cols):
"""Asserts that the table contains the expected set of columns."""
self.initialize_sql()
table = self.select_table(table_name)
actual_cols = [col.name for col in table.columns]
self.assertEqual(expected_cols, actual_cols, '%s table' % table_name)
@property
def initial_db_version(self):
return getattr(self, '_initial_db_version', 0)
class SqlUpgradeTests(SqlMigrateBase):
_initial_db_version = migration_helpers.DB_INIT_VERSION
def test_blank_db_to_start(self):
self.assertTableDoesNotExist('user')
def test_start_version_db_init_version(self):
version = migration.db_version(sql.get_engine(), self.repo_path,
migration_helpers.DB_INIT_VERSION)
self.assertEqual(
version,
migration_helpers.DB_INIT_VERSION,
'DB is not at version %s' % migration_helpers.DB_INIT_VERSION)
def test_two_steps_forward_one_step_back(self):
"""You should be able to cleanly undo and re-apply all upgrades.
Upgrades are run in the following order::
Starting with the initial version defined at
keystone.common.migration_helpers.DB_INIT_VERSION
INIT +1 -> INIT +2 -> INIT +1 -> INIT +2 -> INIT +3 -> INIT +2 ...
^---------------------^ ^---------------------^
Downgrade to the DB_INIT_VERSION does not occur based on the
requirement that the base version be DB_INIT_VERSION + 1 before
migration can occur. Downgrade below DB_INIT_VERSION + 1 is no longer
supported.
DB_INIT_VERSION is the number preceding the release schema version from
two releases prior. Example, Juno releases with the DB_INIT_VERSION
being 35 where Havana (Havana was two releases before Juno) release
schema version is 36.
The migrate utility requires the db must be initialized under version
control with the revision directly before the first version to be
applied.
"""
for x in range(migration_helpers.DB_INIT_VERSION + 1,
self.max_version + 1):
self.upgrade(x)
downgrade_ver = x - 1
# Don't actually downgrade to the init version. This will raise
# a not-implemented error.
if downgrade_ver != migration_helpers.DB_INIT_VERSION:
self.downgrade(x - 1)
self.upgrade(x)
def test_upgrade_add_initial_tables(self):
self.upgrade(migration_helpers.DB_INIT_VERSION + 1)
self.check_initial_table_structure()
def check_initial_table_structure(self):
for table in INITIAL_TABLE_STRUCTURE:
self.assertTableColumns(table, INITIAL_TABLE_STRUCTURE[table])
# Ensure the default domain was properly created.
default_domain = migration_helpers.get_default_domain()
meta = sqlalchemy.MetaData()
meta.bind = self.engine
domain_table = sqlalchemy.Table('domain', meta, autoload=True)
session = self.Session()
q = session.query(domain_table)
refs = q.all()
self.assertEqual(1, len(refs))
for k in default_domain.keys():
self.assertEqual(default_domain[k], getattr(refs[0], k))
def test_downgrade_to_db_init_version(self):
self.upgrade(self.max_version)
if self.engine.name == 'mysql':
self._mysql_check_all_tables_innodb()
self.downgrade(migration_helpers.DB_INIT_VERSION + 1)
self.check_initial_table_structure()
meta = sqlalchemy.MetaData()
meta.bind = self.engine
meta.reflect(self.engine)
initial_table_set = set(INITIAL_TABLE_STRUCTURE.keys())
table_set = set(meta.tables.keys())
# explicitly remove the migrate_version table, this is not controlled
# by the migration scripts and should be exempt from this check.
table_set.remove('migrate_version')
self.assertSetEqual(initial_table_set, table_set)
# Downgrade to before Havana's release schema version (036) is not
# supported. A NotImplementedError should be raised when attempting to
# downgrade.
self.assertRaises(NotImplementedError, self.downgrade,
migration_helpers.DB_INIT_VERSION)
def insert_dict(self, session, table_name, d, table=None):
"""Naively inserts key-value pairs into a table, given a dictionary."""
if table is None:
this_table = sqlalchemy.Table(table_name, self.metadata,
autoload=True)
else:
this_table = table
insert = this_table.insert()
insert.execute(d)
session.commit()
def test_region_migration(self):
self.assertTableDoesNotExist('region')
self.upgrade(37)
self.assertTableExists('region')
self.downgrade(36)
self.assertTableDoesNotExist('region')
def test_assignment_table_migration(self):
def create_base_data(session):
domain_table = sqlalchemy.Table('domain', self.metadata,
autoload=True)
user_table = sqlalchemy.Table('user', self.metadata, autoload=True)
group_table = sqlalchemy.Table('group', self.metadata,
autoload=True)
role_table = sqlalchemy.Table('role', self.metadata, autoload=True)
project_table = sqlalchemy.Table(
'project', self.metadata, autoload=True)
base_data = {}
# Create a Domain
base_data['domain'] = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'enabled': True}
session.execute(domain_table.insert().values(base_data['domain']))
# Create another Domain
base_data['domain2'] = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'enabled': True}
session.execute(domain_table.insert().values(base_data['domain2']))
# Create a Project
base_data['project'] = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'domain_id': base_data['domain']['id'],
'extra': "{}"}
session.execute(
project_table.insert().values(base_data['project']))
# Create another Project
base_data['project2'] = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'domain_id': base_data['domain']['id'],
'extra': "{}"}
session.execute(
project_table.insert().values(base_data['project2']))
# Create a User
base_data['user'] = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'domain_id': base_data['domain']['id'],
'password': uuid.uuid4().hex,
'enabled': True,
'extra': "{}"}
session.execute(user_table.insert().values(base_data['user']))
# Create a Group
base_data['group'] = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'domain_id': base_data['domain']['id'],
'extra': "{}"}
session.execute(group_table.insert().values(base_data['group']))
# Create roles
base_data['roles'] = []
for _ in range(9):
role = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
session.execute(role_table.insert().values(role))
base_data['roles'].append(role)
return base_data
def populate_grants(session, base_data):
user_project_table = sqlalchemy.Table(
'user_project_metadata', self.metadata, autoload=True)
user_domain_table = sqlalchemy.Table(
'user_domain_metadata', self.metadata, autoload=True)
group_project_table = sqlalchemy.Table(
'group_project_metadata', self.metadata, autoload=True)
group_domain_table = sqlalchemy.Table(
'group_domain_metadata', self.metadata, autoload=True)
# Grant a role to user on project
grant = {'user_id': base_data['user']['id'],
'project_id': base_data['project']['id'],
'data': json.dumps(
{'roles': [{'id': base_data['roles'][0]['id']}]})}
session.execute(user_project_table.insert().values(grant))
# Grant two roles to user on project2
grant = {'user_id': base_data['user']['id'],
'project_id': base_data['project2']['id'],
'data': json.dumps(
{'roles': [{'id': base_data['roles'][1]['id']},
{'id': base_data['roles'][2]['id']}]})}
session.execute(user_project_table.insert().values(grant))
# Grant role to group on project
grant = {'group_id': base_data['group']['id'],
'project_id': base_data['project']['id'],
'data': json.dumps(
{'roles': [{'id': base_data['roles'][3]['id']}]})}
session.execute(group_project_table.insert().values(grant))
# Grant two roles to group on project2
grant = {'group_id': base_data['group']['id'],
'project_id': base_data['project2']['id'],
'data': json.dumps(
{'roles': [{'id': base_data['roles'][4]['id']},
{'id': base_data['roles'][5]['id']}]})}
session.execute(group_project_table.insert().values(grant))
# Grant two roles to group on domain, one inherited, one not
grant = {'group_id': base_data['group']['id'],
'domain_id': base_data['domain']['id'],
'data': json.dumps(
{'roles': [{'id': base_data['roles'][6]['id']},
{'id': base_data['roles'][7]['id'],
'inherited_to': 'projects'}]})}
session.execute(group_domain_table.insert().values(grant))
# Grant inherited role to user on domain
grant = {'user_id': base_data['user']['id'],
'domain_id': base_data['domain']['id'],
'data': json.dumps(
{'roles': [{'id': base_data['roles'][8]['id'],
'inherited_to': 'projects'}]})}
session.execute(user_domain_table.insert().values(grant))
# Grant two non-inherited roles to user on domain2, using roles
# that are also assigned to other actors/targets
grant = {'user_id': base_data['user']['id'],
'domain_id': base_data['domain2']['id'],
'data': json.dumps(
{'roles': [{'id': base_data['roles'][6]['id']},
{'id': base_data['roles'][7]['id']}]})}
session.execute(user_domain_table.insert().values(grant))
session.commit()
def check_grants(session, base_data):
user_project_table = sqlalchemy.Table(
'user_project_metadata', self.metadata, autoload=True)
user_domain_table = sqlalchemy.Table(
'user_domain_metadata', self.metadata, autoload=True)
group_project_table = sqlalchemy.Table(
'group_project_metadata', self.metadata, autoload=True)
group_domain_table = sqlalchemy.Table(
'group_domain_metadata', self.metadata, autoload=True)
s = sqlalchemy.select([user_project_table.c.data]).where(
(user_project_table.c.user_id == base_data['user']['id']) &
(user_project_table.c.project_id ==
base_data['project']['id']))
r = session.execute(s)
data = json.loads(r.fetchone()['data'])
self.assertEqual(len(data['roles']), 1)
self.assertIn({'id': base_data['roles'][0]['id']}, data['roles'])
s = sqlalchemy.select([user_project_table.c.data]).where(
(user_project_table.c.user_id == base_data['user']['id']) &
(user_project_table.c.project_id ==
base_data['project2']['id']))
r = session.execute(s)
data = json.loads(r.fetchone()['data'])
self.assertEqual(len(data['roles']), 2)
self.assertIn({'id': base_data['roles'][1]['id']}, data['roles'])
self.assertIn({'id': base_data['roles'][2]['id']}, data['roles'])
s = sqlalchemy.select([group_project_table.c.data]).where(
(group_project_table.c.group_id == base_data['group']['id']) &
(group_project_table.c.project_id ==
base_data['project']['id']))
r = session.execute(s)
data = json.loads(r.fetchone()['data'])
self.assertEqual(len(data['roles']), 1)
self.assertIn({'id': base_data['roles'][3]['id']}, data['roles'])
s = sqlalchemy.select([group_project_table.c.data]).where(
(group_project_table.c.group_id == base_data['group']['id']) &
(group_project_table.c.project_id ==
base_data['project2']['id']))
r = session.execute(s)
data = json.loads(r.fetchone()['data'])
self.assertEqual(len(data['roles']), 2)
self.assertIn({'id': base_data['roles'][4]['id']}, data['roles'])
self.assertIn({'id': base_data['roles'][5]['id']}, data['roles'])
s = sqlalchemy.select([group_domain_table.c.data]).where(
(group_domain_table.c.group_id == base_data['group']['id']) &
(group_domain_table.c.domain_id == base_data['domain']['id']))
r = session.execute(s)
data = json.loads(r.fetchone()['data'])
self.assertEqual(len(data['roles']), 2)
self.assertIn({'id': base_data['roles'][6]['id']}, data['roles'])
self.assertIn({'id': base_data['roles'][7]['id'],
'inherited_to': 'projects'}, data['roles'])
s = sqlalchemy.select([user_domain_table.c.data]).where(
(user_domain_table.c.user_id == base_data['user']['id']) &
(user_domain_table.c.domain_id == base_data['domain']['id']))
r = session.execute(s)
data = json.loads(r.fetchone()['data'])
self.assertEqual(len(data['roles']), 1)
self.assertIn({'id': base_data['roles'][8]['id'],
'inherited_to': 'projects'}, data['roles'])
s = sqlalchemy.select([user_domain_table.c.data]).where(
(user_domain_table.c.user_id == base_data['user']['id']) &
(user_domain_table.c.domain_id == base_data['domain2']['id']))
r = session.execute(s)
data = json.loads(r.fetchone()['data'])
self.assertEqual(len(data['roles']), 2)
self.assertIn({'id': base_data['roles'][6]['id']}, data['roles'])
self.assertIn({'id': base_data['roles'][7]['id']}, data['roles'])
def check_assignments(session, base_data):
def check_assignment_type(refs, type):
for ref in refs:
self.assertEqual(ref.type, type)
assignment_table = sqlalchemy.Table(
'assignment', self.metadata, autoload=True)
refs = session.query(assignment_table).all()
self.assertEqual(len(refs), 11)
q = session.query(assignment_table)
q = q.filter_by(actor_id=base_data['user']['id'])
q = q.filter_by(target_id=base_data['project']['id'])
refs = q.all()
self.assertEqual(len(refs), 1)
self.assertEqual(refs[0].role_id, base_data['roles'][0]['id'])
self.assertFalse(refs[0].inherited)
check_assignment_type(refs,
assignment_sql.AssignmentType.USER_PROJECT)
q = session.query(assignment_table)
q = q.filter_by(actor_id=base_data['user']['id'])
q = q.filter_by(target_id=base_data['project2']['id'])
refs = q.all()
self.assertEqual(len(refs), 2)
role_ids = [base_data['roles'][1]['id'],
base_data['roles'][2]['id']]
self.assertIn(refs[0].role_id, role_ids)
self.assertIn(refs[1].role_id, role_ids)
self.assertFalse(refs[0].inherited)
self.assertFalse(refs[1].inherited)
check_assignment_type(refs,
assignment_sql.AssignmentType.USER_PROJECT)
q = session.query(assignment_table)
q = q.filter_by(actor_id=base_data['group']['id'])
q = q.filter_by(target_id=base_data['project']['id'])
refs = q.all()
self.assertEqual(len(refs), 1)
self.assertEqual(refs[0].role_id, base_data['roles'][3]['id'])
self.assertFalse(refs[0].inherited)
check_assignment_type(refs,
assignment_sql.AssignmentType.GROUP_PROJECT)
q = session.query(assignment_table)
q = q.filter_by(actor_id=base_data['group']['id'])
q = q.filter_by(target_id=base_data['project2']['id'])
refs = q.all()
self.assertEqual(len(refs), 2)
role_ids = [base_data['roles'][4]['id'],
base_data['roles'][5]['id']]
self.assertIn(refs[0].role_id, role_ids)
self.assertIn(refs[1].role_id, role_ids)
self.assertFalse(refs[0].inherited)
self.assertFalse(refs[1].inherited)
check_assignment_type(refs,
assignment_sql.AssignmentType.GROUP_PROJECT)
q = session.query(assignment_table)
q = q.filter_by(actor_id=base_data['group']['id'])
q = q.filter_by(target_id=base_data['domain']['id'])
refs = q.all()
self.assertEqual(len(refs), 2)
role_ids = [base_data['roles'][6]['id'],
base_data['roles'][7]['id']]
self.assertIn(refs[0].role_id, role_ids)
self.assertIn(refs[1].role_id, role_ids)
if refs[0].role_id == base_data['roles'][7]['id']:
self.assertTrue(refs[0].inherited)
self.assertFalse(refs[1].inherited)
else:
self.assertTrue(refs[1].inherited)
self.assertFalse(refs[0].inherited)
check_assignment_type(refs,
assignment_sql.AssignmentType.GROUP_DOMAIN)
q = session.query(assignment_table)
q = q.filter_by(actor_id=base_data['user']['id'])
q = q.filter_by(target_id=base_data['domain']['id'])
refs = q.all()
self.assertEqual(len(refs), 1)
self.assertEqual(refs[0].role_id, base_data['roles'][8]['id'])
self.assertTrue(refs[0].inherited)
check_assignment_type(refs,
assignment_sql.AssignmentType.USER_DOMAIN)
q = session.query(assignment_table)
q = q.filter_by(actor_id=base_data['user']['id'])
q = q.filter_by(target_id=base_data['domain2']['id'])
refs = q.all()
self.assertEqual(len(refs), 2)
role_ids = [base_data['roles'][6]['id'],
base_data['roles'][7]['id']]
self.assertIn(refs[0].role_id, role_ids)
self.assertIn(refs[1].role_id, role_ids)
self.assertFalse(refs[0].inherited)
self.assertFalse(refs[1].inherited)
check_assignment_type(refs,
assignment_sql.AssignmentType.USER_DOMAIN)
self.upgrade(37)
session = self.Session()
self.assertTableDoesNotExist('assignment')
base_data = create_base_data(session)
populate_grants(session, base_data)
check_grants(session, base_data)
session.commit()
session.close()
self.upgrade(40)
session = self.Session()
self.assertTableExists('assignment')
self.assertTableDoesNotExist('user_project_metadata')
self.assertTableDoesNotExist('group_project_metadata')
self.assertTableDoesNotExist('user_domain_metadata')
self.assertTableDoesNotExist('group_domain_metadata')
check_assignments(session, base_data)
session.close()
self.downgrade(37)
session = self.Session()
self.assertTableDoesNotExist('assignment')
check_grants(session, base_data)
session.close()
def test_limited_trusts_upgrade(self):
# make sure that the remaining_uses column is created
self.upgrade(41)
self.assertTableColumns('trust',
['id', 'trustor_user_id',
'trustee_user_id',
'project_id', 'impersonation',
'deleted_at',
'expires_at', 'extra',
'remaining_uses'])
def test_limited_trusts_downgrade(self):
# make sure that the remaining_uses column is removed
self.upgrade(41)
self.downgrade(40)
self.assertTableColumns('trust',
['id', 'trustor_user_id',
'trustee_user_id',
'project_id', 'impersonation',
'deleted_at',
'expires_at', 'extra'])
def test_limited_trusts_downgrade_trusts_cleanup(self):
# make sure that only trusts with unlimited uses are kept in the
# downgrade
self.upgrade(41)
session = self.Session()
limited_trust = {
'id': uuid.uuid4().hex,
'trustor_user_id': uuid.uuid4().hex,
'trustee_user_id': uuid.uuid4().hex,
'project_id': uuid.uuid4().hex,
'impersonation': True,
'remaining_uses': 5
}
consumed_trust = {
'id': uuid.uuid4().hex,
'trustor_user_id': uuid.uuid4().hex,
'trustee_user_id': uuid.uuid4().hex,
'project_id': uuid.uuid4().hex,
'impersonation': True,
'remaining_uses': 0
}
unlimited_trust = {
'id': uuid.uuid4().hex,
'trustor_user_id': uuid.uuid4().hex,
'trustee_user_id': uuid.uuid4().hex,
'project_id': uuid.uuid4().hex,
'impersonation': True,
'remaining_uses': None
}
self.insert_dict(session, 'trust', limited_trust)
self.insert_dict(session, 'trust', consumed_trust)
self.insert_dict(session, 'trust', unlimited_trust)
trust_table = sqlalchemy.Table(
'trust', self.metadata, autoload=True)
# we should have 3 trusts in base
self.assertEqual(3, session.query(trust_table).count())
self.downgrade(40)
session = self.Session()
trust_table = sqlalchemy.Table(
'trust', self.metadata, autoload=True)
# Now only one trust remains ...
self.assertEqual(1, session.query(trust_table.columns.id).count())
# ... and this trust is the one that was not limited in uses
self.assertEqual(
unlimited_trust['id'],
session.query(trust_table.columns.id).one()[0])
def test_upgrade_service_enabled_cols(self):
"""Migration 44 added `enabled` column to `service` table."""
self.upgrade(44)
# Verify that there's an 'enabled' field.
exp_cols = ['id', 'type', 'extra', 'enabled']
self.assertTableColumns('service', exp_cols)
def test_downgrade_service_enabled_cols(self):
"""Check columns when downgrade to migration 43.
The downgrade from migration 44 removes the `enabled` column from the
`service` table.
"""
self.upgrade(44)
self.downgrade(43)
exp_cols = ['id', 'type', 'extra']
self.assertTableColumns('service', exp_cols)
def test_upgrade_service_enabled_data(self):
"""Migration 44 has to migrate data from `extra` to `enabled`."""
session = self.Session()
def add_service(**extra_data):
service_id = uuid.uuid4().hex
service = {
'id': service_id,
'type': uuid.uuid4().hex,
'extra': json.dumps(extra_data),
}
self.insert_dict(session, 'service', service)
return service_id
self.upgrade(43)
# Different services with expected enabled and extra values, and a
# description.
random_attr_name = uuid.uuid4().hex
random_attr_value = uuid.uuid4().hex
random_attr = {random_attr_name: random_attr_value}
random_attr_str = "%s='%s'" % (random_attr_name, random_attr_value)
random_attr_enabled_false = {random_attr_name: random_attr_value,
'enabled': False}
random_attr_enabled_false_str = 'enabled=False,%s' % random_attr_str
services = [
# Some values for True.
(add_service(), (True, {}), 'no enabled'),
(add_service(enabled=True), (True, {}), 'enabled=True'),
(add_service(enabled='true'), (True, {}), "enabled='true'"),
(add_service(**random_attr),
(True, random_attr), random_attr_str),
(add_service(enabled=None), (True, {}), 'enabled=None'),
# Some values for False.
(add_service(enabled=False), (False, {}), 'enabled=False'),
(add_service(enabled='false'), (False, {}), "enabled='false'"),
(add_service(enabled='0'), (False, {}), "enabled='0'"),
(add_service(**random_attr_enabled_false),
(False, random_attr), random_attr_enabled_false_str),
]
self.upgrade(44)
# Verify that the services have the expected values.
self.metadata.clear()
service_table = sqlalchemy.Table('service', self.metadata,
autoload=True)
def fetch_service(service_id):
cols = [service_table.c.enabled, service_table.c.extra]
f = service_table.c.id == service_id
s = sqlalchemy.select(cols).where(f)
ep = session.execute(s).fetchone()
return ep.enabled, json.loads(ep.extra)
for service_id, exp, msg in services:
exp_enabled, exp_extra = exp
enabled, extra = fetch_service(service_id)
self.assertIs(exp_enabled, enabled, msg)
self.assertEqual(exp_extra, extra, msg)
def test_downgrade_service_enabled_data(self):
"""Downgrade from migration 44 migrates data.
Downgrade from migration 44 migrates data from `enabled` to
`extra`. Any disabled services have 'enabled': False put into 'extra'.
"""
session = self.Session()
def add_service(enabled=True, **extra_data):
service_id = uuid.uuid4().hex
service = {
'id': service_id,
'type': uuid.uuid4().hex,
'extra': json.dumps(extra_data),
'enabled': enabled
}
self.insert_dict(session, 'service', service)
return service_id
self.upgrade(44)
# Insert some services using the new format.
# We'll need a service entry since it's the foreign key for services.
service_id = add_service(True)
new_service = (lambda enabled, **extra_data:
add_service(enabled, **extra_data))
# Different services with expected extra values, and a
# description.
services = [
# True tests
(new_service(True), {}, 'enabled'),
(new_service(True, something='whatever'),
{'something': 'whatever'},
"something='whatever'"),
# False tests
(new_service(False), {'enabled': False}, 'enabled=False'),
(new_service(False, something='whatever'),
{'enabled': False, 'something': 'whatever'},
"enabled=False, something='whatever'"),
]
self.downgrade(43)
# Verify that the services have the expected values.
self.metadata.clear()
service_table = sqlalchemy.Table('service', self.metadata,
autoload=True)
def fetch_service(service_id):
cols = [service_table.c.extra]
f = service_table.c.id == service_id
s = sqlalchemy.select(cols).where(f)
ep = session.execute(s).fetchone()
return json.loads(ep.extra)
for service_id, exp_extra, msg in services:
extra = fetch_service(service_id)
self.assertEqual(exp_extra, extra, msg)
def test_upgrade_endpoint_enabled_cols(self):
"""Migration 42 added `enabled` column to `endpoint` table."""
self.upgrade(42)
# Verify that there's an 'enabled' field.
exp_cols = ['id', 'legacy_endpoint_id', 'interface', 'region',
'service_id', 'url', 'extra', 'enabled']
self.assertTableColumns('endpoint', exp_cols)
def test_downgrade_endpoint_enabled_cols(self):
"""Check columns when downgrade from migration 41.
The downgrade from migration 42 removes the `enabled` column from the
`endpoint` table.
"""
self.upgrade(42)
self.downgrade(41)
exp_cols = ['id', 'legacy_endpoint_id', 'interface', 'region',
'service_id', 'url', 'extra']
self.assertTableColumns('endpoint', exp_cols)
def test_upgrade_endpoint_enabled_data(self):
"""Migration 42 has to migrate data from `extra` to `enabled`."""
session = self.Session()
def add_service():
service_id = uuid.uuid4().hex
service = {
'id': service_id,
'type': uuid.uuid4().hex
}
self.insert_dict(session, 'service', service)
return service_id
def add_endpoint(service_id, **extra_data):
endpoint_id = uuid.uuid4().hex
endpoint = {
'id': endpoint_id,
'interface': uuid.uuid4().hex[:8],
'service_id': service_id,
'url': uuid.uuid4().hex,
'extra': json.dumps(extra_data)
}
self.insert_dict(session, 'endpoint', endpoint)
return endpoint_id
self.upgrade(41)
# Insert some endpoints using the old format where `enabled` is in
# `extra` JSON.
# We'll need a service entry since it's the foreign key for endpoints.
service_id = add_service()
new_ep = lambda **extra_data: add_endpoint(service_id, **extra_data)
# Different endpoints with expected enabled and extra values, and a
# description.
random_attr_name = uuid.uuid4().hex
random_attr_value = uuid.uuid4().hex
random_attr = {random_attr_name: random_attr_value}
random_attr_str = "%s='%s'" % (random_attr_name, random_attr_value)
random_attr_enabled_false = {random_attr_name: random_attr_value,
'enabled': False}
random_attr_enabled_false_str = 'enabled=False,%s' % random_attr_str
endpoints = [
# Some values for True.
(new_ep(), (True, {}), 'no enabled'),
(new_ep(enabled=True), (True, {}), 'enabled=True'),
(new_ep(enabled='true'), (True, {}), "enabled='true'"),
(new_ep(**random_attr),
(True, random_attr), random_attr_str),
(new_ep(enabled=None), (True, {}), 'enabled=None'),
# Some values for False.
(new_ep(enabled=False), (False, {}), 'enabled=False'),
(new_ep(enabled='false'), (False, {}), "enabled='false'"),
(new_ep(enabled='0'), (False, {}), "enabled='0'"),
(new_ep(**random_attr_enabled_false),
(False, random_attr), random_attr_enabled_false_str),
]
self.upgrade(42)
# Verify that the endpoints have the expected values.
self.metadata.clear()
endpoint_table = sqlalchemy.Table('endpoint', self.metadata,
autoload=True)
def fetch_endpoint(endpoint_id):
cols = [endpoint_table.c.enabled, endpoint_table.c.extra]
f = endpoint_table.c.id == endpoint_id
s = sqlalchemy.select(cols).where(f)
ep = session.execute(s).fetchone()
return ep.enabled, json.loads(ep.extra)
for endpoint_id, exp, msg in endpoints:
exp_enabled, exp_extra = exp
enabled, extra = fetch_endpoint(endpoint_id)
self.assertIs(exp_enabled, enabled, msg)
self.assertEqual(exp_extra, extra, msg)
def test_downgrade_endpoint_enabled_data(self):
"""Downgrade from migration 42 migrates data.
Downgrade from migration 42 migrates data from `enabled` to
`extra`. Any disabled endpoints have 'enabled': False put into 'extra'.
"""
session = self.Session()
def add_service():
service_id = uuid.uuid4().hex
service = {
'id': service_id,
'type': uuid.uuid4().hex
}
self.insert_dict(session, 'service', service)
return service_id
def add_endpoint(service_id, enabled, **extra_data):
endpoint_id = uuid.uuid4().hex
endpoint = {
'id': endpoint_id,
'interface': uuid.uuid4().hex[:8],
'service_id': service_id,
'url': uuid.uuid4().hex,
'extra': json.dumps(extra_data),
'enabled': enabled
}
self.insert_dict(session, 'endpoint', endpoint)
return endpoint_id
self.upgrade(42)
# Insert some endpoints using the new format.
# We'll need a service entry since it's the foreign key for endpoints.
service_id = add_service()
new_ep = (lambda enabled, **extra_data:
add_endpoint(service_id, enabled, **extra_data))
# Different endpoints with expected extra values, and a
# description.
endpoints = [
# True tests
(new_ep(True), {}, 'enabled'),
(new_ep(True, something='whatever'), {'something': 'whatever'},
"something='whatever'"),
# False tests
(new_ep(False), {'enabled': False}, 'enabled=False'),
(new_ep(False, something='whatever'),
{'enabled': False, 'something': 'whatever'},
"enabled=False, something='whatever'"),
]
self.downgrade(41)
# Verify that the endpoints have the expected values.
self.metadata.clear()
endpoint_table = sqlalchemy.Table('endpoint', self.metadata,
autoload=True)
def fetch_endpoint(endpoint_id):
cols = [endpoint_table.c.extra]
f = endpoint_table.c.id == endpoint_id
s = sqlalchemy.select(cols).where(f)
ep = session.execute(s).fetchone()
return json.loads(ep.extra)
for endpoint_id, exp_extra, msg in endpoints:
extra = fetch_endpoint(endpoint_id)
self.assertEqual(exp_extra, extra, msg)
def test_upgrade_region_non_unique_description(self):
"""Test upgrade to migration 43.
This migration should occur with no unique constraint on the region
description column.
Create two regions with the same description.
"""
session = self.Session()
def add_region():
region_uuid = uuid.uuid4().hex
region = {
'id': region_uuid,
'description': ''
}
self.insert_dict(session, 'region', region)
return region_uuid
self.upgrade(43)
# Write one region to the database
add_region()
# Write another region to the database with the same description
add_region()
def test_upgrade_region_unique_description(self):
"""Test upgrade to migration 43.
This test models a migration where there is a unique constraint on the
description column.
Create two regions with the same description.
"""
session = self.Session()
def add_region(table):
region_uuid = uuid.uuid4().hex
region = {
'id': region_uuid,
'description': ''
}
self.insert_dict(session, 'region', region, table=table)
return region_uuid
def get_metadata():
meta = sqlalchemy.MetaData()
meta.bind = self.engine
return meta
# Migrate to version 42
self.upgrade(42)
region_table = sqlalchemy.Table('region',
get_metadata(),
autoload=True)
# create the unique constraint and load the new version of the
# reflection cache
idx = sqlalchemy.Index('description', region_table.c.description,
unique=True)
idx.create(self.engine)
region_unique_table = sqlalchemy.Table('region',
get_metadata(),
autoload=True)
add_region(region_unique_table)
self.assertEqual(1, session.query(region_unique_table).count())
# verify the unique constraint is enforced
self.assertRaises(sqlalchemy.exc.IntegrityError,
add_region,
table=region_unique_table)
# migrate to 43, unique constraint should be dropped
self.upgrade(43)
# reload the region table from the schema
region_nonunique = sqlalchemy.Table('region',
get_metadata(),
autoload=True)
self.assertEqual(1, session.query(region_nonunique).count())
# Write a second region to the database with the same description
add_region(region_nonunique)
self.assertEqual(2, session.query(region_nonunique).count())
def populate_user_table(self, with_pass_enab=False,
with_pass_enab_domain=False):
# Populate the appropriate fields in the user
# table, depending on the parameters:
#
# Default: id, name, extra
# pass_enab: Add password, enabled as well
# pass_enab_domain: Add password, enabled and domain as well
#
this_table = sqlalchemy.Table("user",
self.metadata,
autoload=True)
for user in default_fixtures.USERS:
extra = copy.deepcopy(user)
extra.pop('id')
extra.pop('name')
if with_pass_enab:
password = extra.pop('password', None)
enabled = extra.pop('enabled', True)
ins = this_table.insert().values(
{'id': user['id'],
'name': user['name'],
'password': password,
'enabled': bool(enabled),
'extra': json.dumps(extra)})
else:
if with_pass_enab_domain:
password = extra.pop('password', None)
enabled = extra.pop('enabled', True)
extra.pop('domain_id')
ins = this_table.insert().values(
{'id': user['id'],
'name': user['name'],
'domain_id': user['domain_id'],
'password': password,
'enabled': bool(enabled),
'extra': json.dumps(extra)})
else:
ins = this_table.insert().values(
{'id': user['id'],
'name': user['name'],
'extra': json.dumps(extra)})
self.engine.execute(ins)
def populate_tenant_table(self, with_desc_enab=False,
with_desc_enab_domain=False):
# Populate the appropriate fields in the tenant or
# project table, depending on the parameters
#
# Default: id, name, extra
# desc_enab: Add description, enabled as well
# desc_enab_domain: Add description, enabled and domain as well,
# plus use project instead of tenant
#
if with_desc_enab_domain:
# By this time tenants are now projects
this_table = sqlalchemy.Table("project",
self.metadata,
autoload=True)
else:
this_table = sqlalchemy.Table("tenant",
self.metadata,
autoload=True)
for tenant in default_fixtures.TENANTS:
extra = copy.deepcopy(tenant)
extra.pop('id')
extra.pop('name')
if with_desc_enab:
desc = extra.pop('description', None)
enabled = extra.pop('enabled', True)
ins = this_table.insert().values(
{'id': tenant['id'],
'name': tenant['name'],
'description': desc,
'enabled': bool(enabled),
'extra': json.dumps(extra)})
else:
if with_desc_enab_domain:
desc = extra.pop('description', None)
enabled = extra.pop('enabled', True)
extra.pop('domain_id')
ins = this_table.insert().values(
{'id': tenant['id'],
'name': tenant['name'],
'domain_id': tenant['domain_id'],
'description': desc,
'enabled': bool(enabled),
'extra': json.dumps(extra)})
else:
ins = this_table.insert().values(
{'id': tenant['id'],
'name': tenant['name'],
'extra': json.dumps(extra)})
self.engine.execute(ins)
def _mysql_check_all_tables_innodb(self):
database = self.engine.url.database
connection = self.engine.connect()
# sanity check
total = connection.execute("SELECT count(*) "
"from information_schema.TABLES "
"where TABLE_SCHEMA='%(database)s'" %
dict(database=database))
self.assertTrue(total.scalar() > 0, "No tables found. Wrong schema?")
noninnodb = connection.execute("SELECT table_name "
"from information_schema.TABLES "
"where TABLE_SCHEMA='%(database)s' "
"and ENGINE!='InnoDB' "
"and TABLE_NAME!='migrate_version'" %
dict(database=database))
names = [x[0] for x in noninnodb]
self.assertEqual(names, [],
"Non-InnoDB tables exist")
connection.close()
class VersionTests(SqlMigrateBase):
_initial_db_version = migration_helpers.DB_INIT_VERSION
def test_core_initial(self):
"""Get the version before migrated, it's the initial DB version."""
version = migration_helpers.get_db_version()
self.assertEqual(migration_helpers.DB_INIT_VERSION, version)
def test_core_max(self):
"""When get the version after upgrading, it's the new version."""
self.upgrade(self.max_version)
version = migration_helpers.get_db_version()
self.assertEqual(self.max_version, version)
def test_extension_not_controlled(self):
"""When get the version before controlling, raises DbMigrationError."""
self.assertRaises(db_exception.DbMigrationError,
migration_helpers.get_db_version,
extension='federation')
def test_extension_initial(self):
"""When get the initial version of an extension, it's 0."""
abs_path = migration_helpers.find_migrate_repo(federation)
migration.db_version_control(sql.get_engine(), abs_path)
version = migration_helpers.get_db_version(extension='federation')
self.assertEqual(0, version)
def test_extension_migrated(self):
"""When get the version after migrating an extension, it's not 0."""
abs_path = migration_helpers.find_migrate_repo(federation)
migration.db_version_control(sql.get_engine(), abs_path)
migration.db_sync(sql.get_engine(), abs_path)
version = migration_helpers.get_db_version(extension='federation')
self.assertTrue(version > 0, "Version didn't change after migrated?")
def test_unexpected_extension(self):
"""The version for an extension that doesn't exist raises ImportError.
"""
extension_name = uuid.uuid4().hex
self.assertRaises(ImportError,
migration_helpers.get_db_version,
extension=extension_name)
def test_unversioned_extension(self):
"""The version for extensions without migrations raise an exception.
"""
self.assertRaises(exception.MigrationNotProvided,
migration_helpers.get_db_version,
extension='access')
|
atheendra/access_keys
|
keystone/tests/test_sql_upgrade.py
|
Python
|
apache-2.0
| 54,893
|
########
# Copyright (c) 2013 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from setuptools import setup
install_requires = [
'cloudify-rest-client==3.2rc1',
'pika==0.9.13',
'networkx==1.8.1',
'proxy_tools==0.1.0',
'bottle==0.12.7'
]
try:
import importlib # noqa
except ImportError:
install_requires.append('importlib')
try:
import argparse # NOQA
except ImportError, e:
install_requires.append('argparse==1.2.2')
setup(
name='cloudify-plugins-common',
version='3.2rc1',
author='cosmo-admin',
author_email='cosmo-admin@gigaspaces.com',
packages=['cloudify',
'cloudify.workflows',
'cloudify.plugins',
'cloudify.proxy'],
license='LICENSE',
description='Contains necessary decorators and utility methods for '
'writing Cloudify plugins',
zip_safe=False,
install_requires=install_requires,
entry_points={
'console_scripts': [
'ctx = cloudify.proxy.client:main',
]
}
)
|
geokala/cloudify-plugins-common
|
setup.py
|
Python
|
apache-2.0
| 1,607
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013, Mirantis Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Tatiana Mazur
from django.utils.translation import ugettext_lazy as _ # noqa
from horizon import exceptions
from horizon import tabs
from horizon import workflows
import logging
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.vpn import tabs as vpn_tabs
from openstack_dashboard.dashboards.project.vpn import \
workflows as vpn_workflow
import re
LOG = logging.getLogger(__name__)
class IndexView(tabs.TabbedTableView):
tab_group_class = vpn_tabs.VPNTabs
template_name = 'project/vpn/index.html'
def post(self, request, *args, **kwargs):
obj_ids = request.POST.getlist('object_ids')
action = request.POST['action']
m = re.search('.delete([a-z]+)', action).group(1)
if obj_ids == []:
obj_ids.append(re.search('([0-9a-z-]+)$', action).group(1))
if m == 'vpnservice':
for obj_id in obj_ids:
try:
api.vpn.vpnservice_delete(request, obj_id)
except Exception:
exceptions.handle(request,
_('Unable to delete VPN Service.'))
elif m == 'ikepolicy':
for obj_id in obj_ids:
try:
api.vpn.ikepolicy_delete(request, obj_id)
except Exception:
exceptions.handle(request,
_('Unable to delete IKE Policy.'))
elif m == 'ipsecpolicy':
for obj_id in obj_ids:
try:
api.vpn.ipsecpolicy_delete(request, obj_id)
except Exception:
exceptions.handle(request,
_('Unable to delete IPSec Policy.'))
elif m == 'ipsecsiteconnection':
for obj_id in obj_ids:
try:
api.vpn.ipsecsiteconnection_delete(request, obj_id)
except Exception:
exceptions.handle(request,
_('Unable to delete IPSec Site Connection.'))
return self.get(request, *args, **kwargs)
class AddVPNServiceView(workflows.WorkflowView):
workflow_class = vpn_workflow.AddVPNService
def get_initial(self):
initial = super(AddVPNServiceView, self).get_initial()
return initial
class AddIPSecSiteConnectionView(workflows.WorkflowView):
workflow_class = vpn_workflow.AddIPSecSiteConnection
def get_initial(self):
initial = super(AddIPSecSiteConnectionView, self).get_initial()
return initial
class AddIKEPolicyView(workflows.WorkflowView):
workflow_class = vpn_workflow.AddIKEPolicy
def get_initial(self):
initial = super(AddIKEPolicyView, self).get_initial()
return initial
class AddIPSecPolicyView(workflows.WorkflowView):
workflow_class = vpn_workflow.AddIPSecPolicy
def get_initial(self):
initial = super(AddIPSecPolicyView, self).get_initial()
return initial
class IKEPolicyDetailsView(tabs.TabView):
tab_group_class = (vpn_tabs.IKEPolicyDetailsTabs)
template_name = 'project/vpn/details_tabs.html'
class IPSecPolicyDetailsView(tabs.TabView):
tab_group_class = (vpn_tabs.IPSecPolicyDetailsTabs)
template_name = 'project/vpn/details_tabs.html'
class VPNServiceDetailsView(tabs.TabView):
tab_group_class = (vpn_tabs.VPNServiceDetailsTabs)
template_name = 'project/vpn/details_tabs.html'
class IPSecSiteConnectionDetailsView(tabs.TabView):
tab_group_class = (vpn_tabs.IPSecSiteConnectionDetailsTabs)
template_name = 'project/vpn/details_tabs.html'
|
r-icarus/openstack_microserver
|
openstack_dashboard/dashboards/project/vpn/views.py
|
Python
|
apache-2.0
| 4,302
|
# coding: utf-8
from __future__ import absolute_import
import requests
import json
import flockos
def call_api(resource_path, params=None):
base_url = flockos.base_url
url = base_url + resource_path
response_data = requests.post(url, data=json.dumps(params))
return json.loads(response_data.text)
|
flockchat/pyflock
|
flockos/api_client.py
|
Python
|
apache-2.0
| 316
|
from win32com.client import Dispatch;
from win32com.client import VARIANT as variant;
from pythoncom import *;
import sys;
import matplotlib.pyplot as plt;
fileName = sys.argv[1];
obj = Dispatch('MSFileReader.XRawFile');
obj.open(fileName);
obj.SetCurrentController(0, 1);
numSpec = obj.GetNumSpectra();
print(' ');
print(' ');
print('================');
print('Number of Spectra: ' + str(numSpec));
lowMass = obj.GetLowMass();
highMass = obj.GetHighMass();
print('Mass Range: ' + str(lowMass) + '-' + str(highMass));
pdrt = obj.RTFromScanNum(1);
pdrt1 = obj.RTFromScanNum(numSpec - 1);
# The VARIANT type is very important!
dummyVariant0 = variant(VT_UNKNOWN, 0);
dummyVariant1 = variant(VT_UNKNOWN, []);
dummyVariant2 = variant(VT_EMPTY, []);
dummyVariant3 = variant(VT_UI8, 0);
# Don't ask me why it works. It works...
temp = obj.GetMassListFromRT(pdrt, '', 0, 0, 0, 0, dummyVariant0.value, dummyVariant1, dummyVariant1);
massList = temp[2];
temp = obj.GetChroData(1, 0, 0, '', '', '', 0.0, pdrt, pdrt1, 0, 0, dummyVariant2, dummyVariant2, dummyVariant3.value);
chroList = temp[2];
plt.subplot(211);
plt.plot(massList[0], massList[1]);
plt.xlabel('$Mass (Th)$');
plt.ylabel('$Counts (s^{-1})$');
plt.xlim((lowMass, highMass));
plt.subplot(212);
plt.plot(chroList[0], chroList[1]);
plt.xlabel('$Time (min)$');
plt.ylabel('$Counts (s^{-1})$');
plt.xlim((pdrt, pdrt1));
plt.show();
|
PhysicalChemist/ThermoRaw
|
thermoRawReader.py
|
Python
|
apache-2.0
| 1,392
|
import unittest
class WheelTests(unittest.TestCase):
def _getTargetClass(self):
from pkginfo.wheel import Wheel
return Wheel
def _makeOne(self, filename=None, metadata_version=None):
if metadata_version is not None:
return self._getTargetClass()(filename, metadata_version)
return self._getTargetClass()(filename)
def _checkSample(self, wheel, filename):
self.assertEqual(wheel.filename, filename)
self.assertEqual(wheel.name, 'mypackage')
self.assertEqual(wheel.version, '0.1')
self.assertEqual(wheel.keywords, None)
def _checkClassifiers(self, wheel):
self.assertEqual(list(wheel.classifiers),
['Development Status :: 4 - Beta',
'Environment :: Console (Text Based)',
])
self.assertEqual(list(wheel.supported_platforms), [])
def test_ctor_w_bogus_filename(self):
import os
d, _ = os.path.split(__file__)
filename = '%s/../../docs/examples/nonesuch-0.1-any.whl' % d
self.assertRaises(ValueError, self._makeOne, filename)
def test_ctor_w_non_wheel(self):
import os
d, _ = os.path.split(__file__)
filename = '%s/../../docs/examples/mypackage-0.1.zip' % d
self.assertRaises(ValueError, self._makeOne, filename)
def test_ctor_wo_dist_info(self):
import os
d, _ = os.path.split(__file__)
filename = '%s/../../docs/examples/nodistinfo-0.1-any.whl' % d
self.assertRaises(ValueError, self._makeOne, filename)
def test_ctor_w_valid_wheel(self):
import os
d, _ = os.path.split(__file__)
filename = ('%s/../../docs/examples/'
'mypackage-0.1-cp26-none-linux_x86_64.whl') % d
wheel = self._makeOne(filename)
self.assertEqual(wheel.metadata_version, '2.0')
self._checkSample(wheel, filename)
self._checkClassifiers(wheel)
def test_ctor_w_installed_wheel(self):
import os
d, _ = os.path.split(__file__)
filename = (
'%s/../../docs/examples/mypackage-0.1.dist-info') % d
wheel = self._makeOne(filename)
self.assertEqual(wheel.metadata_version, '2.0')
self._checkSample(wheel, filename)
self._checkClassifiers(wheel)
def test_ctor_w_valid_wheel_and_metadata_version(self):
import os
d, _ = os.path.split(__file__)
filename = ('%s/../../docs/examples/'
'mypackage-0.1-cp26-none-linux_x86_64.whl') % d
wheel = self._makeOne(filename, metadata_version='1.1')
self.assertEqual(wheel.metadata_version, '1.1')
self._checkSample(wheel, filename)
self._checkClassifiers(wheel)
def test_ctor_w_valid_installed_wheel(self):
import os
import shutil
import tempfile
import zipfile
d, _ = os.path.split(__file__)
filename = ('%s/../../docs/examples/'
'mypackage-0.1-cp26-none-linux_x86_64.whl') % d
try:
# note: we mock a wheel installation by unzipping
test_dir = tempfile.mkdtemp()
with zipfile.ZipFile(filename) as zipf:
zipf.extractall(test_dir)
wheel = self._makeOne(filename)
self.assertEqual(wheel.metadata_version, '2.0')
self._checkSample(wheel, filename)
self._checkClassifiers(wheel)
finally:
if os.path.exists(test_dir):
shutil.rmtree(test_dir)
|
sonntagsgesicht/regtest
|
.aux/venv/lib/python3.9/site-packages/pkginfo/tests/test_wheel.py
|
Python
|
apache-2.0
| 3,582
|
# =========================================================================
# Copyright 2016-present Yunify, Inc.
# -------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
import os
import sys
from .base import BaseAction
from ...misc.utils import prints_body, json_dumps
from ..constants import (
BUFSIZE,
HTTP_OK,
HTTP_OK_CREATED,
HTTP_OK_NO_CONTENT,
HTTP_OK_PARTIAL_CONTENT,
)
class CreateObjectAction(BaseAction):
command = "create-object"
usage = "%(prog)s -b <bucket> -k <key> -d <data> [-t <type> -f <conf_file>]"
@classmethod
def add_ext_arguments(cls, parser):
parser.add_argument(
"-b",
"--bucket",
dest="bucket",
required=True,
help="The bucket name"
)
parser.add_argument(
"-k",
"--key",
dest="key",
help="The object name"
)
parser.add_argument(
"-F",
"--file",
dest="file",
help="The object file"
)
parser.add_argument(
"-d",
"--data",
dest="data",
help="The object data"
)
parser.add_argument(
"-t",
"--type",
dest="type",
default="application/octet-stream",
help="The object type"
)
return parser
@classmethod
def send_request(cls, options):
if options.file:
if not os.path.isfile(options.file):
print("No such file: %s" % options.file)
sys.exit(-1)
key = options.key or os.path.basename(options.file)
data = open(options.file, "rb")
elif options.data:
key = options.key
if not key:
print("Must specify --key parameter")
sys.exit(-1)
data = options.data
else:
print("Must specify --file or --data parameter")
sys.exit(1)
headers = {}
if options.type:
headers["Content-Type"] = options.type
resp = cls.conn.make_request("PUT", options.bucket, key,
headers=headers, data=data)
if resp.status != HTTP_OK_CREATED:
prints_body(resp)
class GetObjectAction(BaseAction):
command = "get-object"
usage = "%(prog)s -b <bucket> -k <key> [-F <file> -B <bytes> -z <zone> -f <conf_file>]"
@classmethod
def add_ext_arguments(cls, parser):
parser.add_argument(
"-b",
"--bucket",
dest="bucket",
required=True,
help="The bucket name"
)
parser.add_argument(
"-k",
"--key",
dest="key",
required=True,
help="The object name"
)
parser.add_argument(
"-F",
"--file",
dest="file",
help="The file that the object content should save to"
)
parser.add_argument(
"-B",
"--bytes",
dest="bytes",
help="The object data range"
)
return parser
@classmethod
def send_request(cls, options):
if options.file:
if os.path.isdir(options.file):
path = "%s/%s" % (options.file, options.key)
else:
path = options.file
else:
path = "%s/%s" % (os.getcwd(), options.key)
directory = os.path.dirname(path)
if not os.path.isdir(directory):
print("No such directory: %s" % directory)
sys.exit(-1)
headers = {}
if options.bytes:
headers["Range"] = "bytes=%s" % options.bytes
resp = cls.conn.make_request("GET", options.bucket, options.key, headers=headers)
if resp.status in (HTTP_OK, HTTP_OK_PARTIAL_CONTENT):
with open(path, "wb") as f:
while True:
buf = resp.read(BUFSIZE)
if not buf:
break
f.write(buf)
else:
prints_body(resp)
class DeleteObjectAction(BaseAction):
command = "delete-object"
usage = "%(prog)s -b <bucket> -k <key> [-f <conf_file>]"
@classmethod
def add_ext_arguments(cls, parser):
parser.add_argument(
"-b",
"--bucket",
dest="bucket",
type=str,
required=True,
help="The bucket name"
)
parser.add_argument(
"-k",
"--key",
dest="key",
required=True,
help="The object name"
)
return parser
@classmethod
def send_request(cls, options):
resp = cls.conn.make_request("DELETE", options.bucket, options.key)
if resp.status != HTTP_OK_NO_CONTENT:
prints_body(resp)
class HeadObjectAction(BaseAction):
command = "head-object"
usage = "%(prog)s -b <bucket> -k <key> [-f <conf_file>]"
@classmethod
def add_ext_arguments(cls, parser):
parser.add_argument(
"-b",
"--bucket",
dest="bucket",
action="store",
type=str,
required=True,
help="The bucket name"
)
parser.add_argument(
"-k",
"--key",
dest="key",
required=True,
help="The object name"
)
return parser
@classmethod
def send_request(cls, options):
resp = cls.conn.make_request("HEAD", options.bucket, options.key)
if resp.status == HTTP_OK:
data = {
"Content-Length": resp.getheader("content-length"),
"Content-Type": resp.getheader("content-type"),
"ETag": resp.getheader("etag"),
"Last-Modified": resp.getheader("last-modified")
}
print(json_dumps(data, indent=2))
else:
print("Error: %s %s" % (resp.status, resp.reason))
|
yunify/qingcloud-cli
|
qingcloud/cli/qs_client/actions/key.py
|
Python
|
apache-2.0
| 6,790
|
import unittest
import subprocess
class TestJupyterNbconvert(unittest.TestCase):
def test_nbconvert(self):
result = subprocess.run([
'jupyter',
'nbconvert',
'--to',
'notebook',
'--template',
'/opt/kaggle/nbconvert-extensions.tpl',
'--execute',
'--stdout',
'/input/tests/data/notebook.ipynb',
], stdout=subprocess.PIPE)
self.assertEqual(0, result.returncode)
self.assertTrue(b'999' in result.stdout)
|
Kaggle/docker-python
|
tests/test_jupyter_nbconvert.py
|
Python
|
apache-2.0
| 545
|
#!/usr/bin/python
def a(val):
return b(val)
def b(val):
return c(val)
def c(val):
return d(val)
def d(val):
return e(val)
def e(val):
return f(val)
def f(val):
return g(val, 2)
def g(v1, v2):
return h(v1, v2, 3)
def h(v1, v2, v3):
return i(v1, v2, v3, 4)
def i(v1, v2, v3, v4):
return j(v1, v2, v3, v4, 5)
def j(v1, v2, v3, v4, v5):
return v1 + v2 + v3 + v4 + v5
n = 100000
while n > 0:
x = a(n)
n = n - 1
print "x=%d" % x
|
bitkeeper-scm/bitkeeper
|
src/gui/tcltk/tcl/tests/langbench/proc.py
|
Python
|
apache-2.0
| 440
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gym
import numpy as np
import time
import unittest
import ray
from ray.rllib.agents.ppo import PPOTrainer
from ray.rllib.agents.ppo.ppo_policy import PPOTFPolicy
from ray.rllib.evaluation import SampleBatch
from ray.rllib.evaluation.rollout_worker import RolloutWorker
from ray.rllib.evaluation.worker_set import WorkerSet
from ray.rllib.optimizers import AsyncGradientsOptimizer, AsyncSamplesOptimizer
from ray.rllib.optimizers.aso_tree_aggregator import TreeAggregator
from ray.rllib.tests.mock_worker import _MockWorker
from ray.rllib.utils import try_import_tf
tf = try_import_tf()
class AsyncOptimizerTest(unittest.TestCase):
def tearDown(self):
ray.shutdown()
def testBasic(self):
ray.init(num_cpus=4)
local = _MockWorker()
remotes = ray.remote(_MockWorker)
remote_workers = [remotes.remote() for i in range(5)]
workers = WorkerSet._from_existing(local, remote_workers)
test_optimizer = AsyncGradientsOptimizer(workers, grads_per_step=10)
test_optimizer.step()
self.assertTrue(all(local.get_weights() == 0))
class PPOCollectTest(unittest.TestCase):
def tearDown(self):
ray.shutdown()
def testPPOSampleWaste(self):
ray.init(num_cpus=4)
# Check we at least collect the initial wave of samples
ppo = PPOTrainer(
env="CartPole-v0",
config={
"sample_batch_size": 200,
"train_batch_size": 128,
"num_workers": 3,
})
ppo.train()
self.assertEqual(ppo.optimizer.num_steps_sampled, 600)
ppo.stop()
# Check we collect at least the specified amount of samples
ppo = PPOTrainer(
env="CartPole-v0",
config={
"sample_batch_size": 200,
"train_batch_size": 900,
"num_workers": 3,
})
ppo.train()
self.assertEqual(ppo.optimizer.num_steps_sampled, 1000)
ppo.stop()
# Check in vectorized mode
ppo = PPOTrainer(
env="CartPole-v0",
config={
"sample_batch_size": 200,
"num_envs_per_worker": 2,
"train_batch_size": 900,
"num_workers": 3,
})
ppo.train()
self.assertEqual(ppo.optimizer.num_steps_sampled, 1200)
ppo.stop()
# Check legacy mode
ppo = PPOTrainer(
env="CartPole-v0",
config={
"sample_batch_size": 200,
"train_batch_size": 128,
"num_workers": 3,
"straggler_mitigation": True,
})
ppo.train()
self.assertEqual(ppo.optimizer.num_steps_sampled, 200)
ppo.stop()
class SampleBatchTest(unittest.TestCase):
def testConcat(self):
b1 = SampleBatch({"a": np.array([1, 2, 3]), "b": np.array([4, 5, 6])})
b2 = SampleBatch({"a": np.array([1]), "b": np.array([4])})
b3 = SampleBatch({"a": np.array([1]), "b": np.array([5])})
b12 = b1.concat(b2)
self.assertEqual(b12["a"].tolist(), [1, 2, 3, 1])
self.assertEqual(b12["b"].tolist(), [4, 5, 6, 4])
b = SampleBatch.concat_samples([b1, b2, b3])
self.assertEqual(b["a"].tolist(), [1, 2, 3, 1, 1])
self.assertEqual(b["b"].tolist(), [4, 5, 6, 4, 5])
class AsyncSamplesOptimizerTest(unittest.TestCase):
@classmethod
def tearDownClass(cls):
ray.shutdown()
@classmethod
def setUpClass(cls):
ray.init(num_cpus=8)
def testSimple(self):
local, remotes = self._make_evs()
workers = WorkerSet._from_existing(local, remotes)
optimizer = AsyncSamplesOptimizer(workers)
self._wait_for(optimizer, 1000, 1000)
def testMultiGPU(self):
local, remotes = self._make_evs()
workers = WorkerSet._from_existing(local, remotes)
optimizer = AsyncSamplesOptimizer(workers, num_gpus=2, _fake_gpus=True)
self._wait_for(optimizer, 1000, 1000)
def testMultiGPUParallelLoad(self):
local, remotes = self._make_evs()
workers = WorkerSet._from_existing(local, remotes)
optimizer = AsyncSamplesOptimizer(
workers, num_gpus=2, num_data_loader_buffers=2, _fake_gpus=True)
self._wait_for(optimizer, 1000, 1000)
def testMultiplePasses(self):
local, remotes = self._make_evs()
workers = WorkerSet._from_existing(local, remotes)
optimizer = AsyncSamplesOptimizer(
workers,
minibatch_buffer_size=10,
num_sgd_iter=10,
sample_batch_size=10,
train_batch_size=50)
self._wait_for(optimizer, 1000, 10000)
self.assertLess(optimizer.stats()["num_steps_sampled"], 5000)
self.assertGreater(optimizer.stats()["num_steps_trained"], 8000)
def testReplay(self):
local, remotes = self._make_evs()
workers = WorkerSet._from_existing(local, remotes)
optimizer = AsyncSamplesOptimizer(
workers,
replay_buffer_num_slots=100,
replay_proportion=10,
sample_batch_size=10,
train_batch_size=10,
)
self._wait_for(optimizer, 1000, 1000)
stats = optimizer.stats()
self.assertLess(stats["num_steps_sampled"], 5000)
replay_ratio = stats["num_steps_replayed"] / stats["num_steps_sampled"]
self.assertGreater(replay_ratio, 0.7)
self.assertLess(stats["num_steps_trained"], stats["num_steps_sampled"])
def testReplayAndMultiplePasses(self):
local, remotes = self._make_evs()
workers = WorkerSet._from_existing(local, remotes)
optimizer = AsyncSamplesOptimizer(
workers,
minibatch_buffer_size=10,
num_sgd_iter=10,
replay_buffer_num_slots=100,
replay_proportion=10,
sample_batch_size=10,
train_batch_size=10)
self._wait_for(optimizer, 1000, 1000)
stats = optimizer.stats()
print(stats)
self.assertLess(stats["num_steps_sampled"], 5000)
replay_ratio = stats["num_steps_replayed"] / stats["num_steps_sampled"]
train_ratio = stats["num_steps_sampled"] / stats["num_steps_trained"]
self.assertGreater(replay_ratio, 0.7)
self.assertLess(train_ratio, 0.4)
def testMultiTierAggregationBadConf(self):
local, remotes = self._make_evs()
workers = WorkerSet._from_existing(local, remotes)
aggregators = TreeAggregator.precreate_aggregators(4)
optimizer = AsyncSamplesOptimizer(workers, num_aggregation_workers=4)
self.assertRaises(ValueError,
lambda: optimizer.aggregator.init(aggregators))
def testMultiTierAggregation(self):
local, remotes = self._make_evs()
workers = WorkerSet._from_existing(local, remotes)
aggregators = TreeAggregator.precreate_aggregators(1)
optimizer = AsyncSamplesOptimizer(workers, num_aggregation_workers=1)
optimizer.aggregator.init(aggregators)
self._wait_for(optimizer, 1000, 1000)
def testRejectBadConfigs(self):
local, remotes = self._make_evs()
workers = WorkerSet._from_existing(local, remotes)
self.assertRaises(
ValueError, lambda: AsyncSamplesOptimizer(
local, remotes,
num_data_loader_buffers=2, minibatch_buffer_size=4))
optimizer = AsyncSamplesOptimizer(
workers,
num_gpus=2,
train_batch_size=100,
sample_batch_size=50,
_fake_gpus=True)
self._wait_for(optimizer, 1000, 1000)
optimizer = AsyncSamplesOptimizer(
workers,
num_gpus=2,
train_batch_size=100,
sample_batch_size=25,
_fake_gpus=True)
self._wait_for(optimizer, 1000, 1000)
optimizer = AsyncSamplesOptimizer(
workers,
num_gpus=2,
train_batch_size=100,
sample_batch_size=74,
_fake_gpus=True)
self._wait_for(optimizer, 1000, 1000)
def _make_evs(self):
def make_sess():
return tf.Session(config=tf.ConfigProto(device_count={"CPU": 2}))
local = RolloutWorker(
env_creator=lambda _: gym.make("CartPole-v0"),
policy=PPOTFPolicy,
tf_session_creator=make_sess)
remotes = [
RolloutWorker.as_remote().remote(
env_creator=lambda _: gym.make("CartPole-v0"),
policy=PPOTFPolicy,
tf_session_creator=make_sess)
]
return local, remotes
def _wait_for(self, optimizer, num_steps_sampled, num_steps_trained):
start = time.time()
while time.time() - start < 30:
optimizer.step()
if optimizer.num_steps_sampled > num_steps_sampled and \
optimizer.num_steps_trained > num_steps_trained:
print("OK", optimizer.stats())
return
raise AssertionError("TIMED OUT", optimizer.stats())
if __name__ == "__main__":
unittest.main(verbosity=2)
|
atumanov/ray
|
python/ray/rllib/tests/test_optimizers.py
|
Python
|
apache-2.0
| 9,360
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.core_tasks.bash_completion import BashCompletion
from pants_test.task_test_base import ConsoleTaskTestBase
class MockedBashCompletion(BashCompletion):
"""A version of the BashCompletion, with the help introspection mocked out."""
def get_autocomplete_options_by_scope(self):
return {'': []}
class BashCompletionTest(ConsoleTaskTestBase):
@classmethod
def task_type(cls):
return MockedBashCompletion
def test_bash_completion_loads_template(self):
self.assert_console_output_contains("# Pants Autocompletion Support")
|
baroquebobcat/pants
|
tests/python/pants_test/core_tasks/test_bash_completion.py
|
Python
|
apache-2.0
| 855
|
"""Test zha device discovery."""
import re
from unittest import mock
import pytest
import homeassistant.components.zha.core.const as zha_const
import homeassistant.components.zha.core.discovery as disc
import homeassistant.components.zha.core.gateway as core_zha_gw
import homeassistant.helpers.entity_registry
from .common import get_zha_gateway
from .zha_devices_list import DEVICES
NO_TAIL_ID = re.compile("_\\d$")
@pytest.mark.parametrize("device", DEVICES)
async def test_devices(
device, hass, zigpy_device_mock, monkeypatch, zha_device_joined_restored
):
"""Test device discovery."""
zigpy_device = zigpy_device_mock(
device["endpoints"],
"00:11:22:33:44:55:66:77",
device["manufacturer"],
device["model"],
node_descriptor=device["node_descriptor"],
)
_dispatch = mock.MagicMock(wraps=disc.async_dispatch_discovery_info)
monkeypatch.setattr(core_zha_gw, "async_dispatch_discovery_info", _dispatch)
entity_registry = await homeassistant.helpers.entity_registry.async_get_registry(
hass
)
with mock.patch(
"homeassistant.components.zha.core.discovery._async_create_cluster_channel",
wraps=disc._async_create_cluster_channel,
):
await zha_device_joined_restored(zigpy_device)
await hass.async_block_till_done()
entity_ids = hass.states.async_entity_ids()
await hass.async_block_till_done()
zha_entities = {
ent for ent in entity_ids if ent.split(".")[0] in zha_const.COMPONENTS
}
zha_gateway = get_zha_gateway(hass)
zha_dev = zha_gateway.get_device(zigpy_device.ieee)
event_channels = { # pylint: disable=protected-access
ch.id for ch in zha_dev._relay_channels.values()
}
assert zha_entities == set(device["entities"])
assert event_channels == set(device["event_channels"])
entity_map = device["entity_map"]
for calls in _dispatch.call_args_list:
discovery_info = calls[0][2]
unique_id = discovery_info["unique_id"]
channels = discovery_info["channels"]
component = discovery_info["component"]
key = (component, unique_id)
entity_id = entity_registry.async_get_entity_id(component, "zha", unique_id)
assert key in entity_map
assert entity_id is not None
no_tail_id = NO_TAIL_ID.sub("", entity_map[key]["entity_id"])
assert entity_id.startswith(no_tail_id)
assert set([ch.name for ch in channels]) == set(entity_map[key]["channels"])
|
postlund/home-assistant
|
tests/components/zha/test_discover.py
|
Python
|
apache-2.0
| 2,626
|
# Copyright 2017, Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from google.cloud.future import operation
from google.longrunning import operations_pb2
from google.protobuf import struct_pb2
from google.rpc import code_pb2
from google.rpc import status_pb2
TEST_OPERATION_NAME = 'test/operation'
def make_operation_proto(
name=TEST_OPERATION_NAME, metadata=None, response=None,
error=None, **kwargs):
operation_proto = operations_pb2.Operation(
name=name, **kwargs)
if metadata is not None:
operation_proto.metadata.Pack(metadata)
if response is not None:
operation_proto.response.Pack(response)
if error is not None:
operation_proto.error.CopyFrom(error)
return operation_proto
def make_operation_future(client_operations_responses=None):
if client_operations_responses is None:
client_operations_responses = [make_operation_proto()]
refresh = mock.Mock(
spec=['__call__'], side_effect=client_operations_responses)
refresh.responses = client_operations_responses
cancel = mock.Mock(spec=['__call__'])
operation_future = operation.Operation(
client_operations_responses[0],
refresh,
cancel,
result_type=struct_pb2.Struct,
metadata_type=struct_pb2.Struct)
return operation_future, refresh, cancel
def test_constructor():
future, refresh, _ = make_operation_future()
assert future.operation == refresh.responses[0]
assert future.operation.done is False
assert future.operation.name == TEST_OPERATION_NAME
assert future.metadata is None
assert future.running()
def test_metadata():
expected_metadata = struct_pb2.Struct()
future, _, _ = make_operation_future(
[make_operation_proto(metadata=expected_metadata)])
assert future.metadata == expected_metadata
def test_cancellation():
responses = [
make_operation_proto(),
# Second response indicates that the operation was cancelled.
make_operation_proto(
done=True,
error=status_pb2.Status(code=code_pb2.CANCELLED))]
future, _, cancel = make_operation_future(responses)
assert future.cancel()
assert future.cancelled()
cancel.assert_called_once_with()
# Cancelling twice should have no effect.
assert not future.cancel()
cancel.assert_called_once_with()
def test_result():
expected_result = struct_pb2.Struct()
responses = [
make_operation_proto(),
# Second operation response includes the result.
make_operation_proto(done=True, response=expected_result)]
future, _, _ = make_operation_future(responses)
result = future.result()
assert result == expected_result
assert future.done()
def test_exception():
expected_exception = status_pb2.Status(message='meep')
responses = [
make_operation_proto(),
# Second operation response includes the error.
make_operation_proto(done=True, error=expected_exception)]
future, _, _ = make_operation_future(responses)
exception = future.exception()
assert expected_exception.message in '{!r}'.format(exception)
def test_unexpected_result():
responses = [
make_operation_proto(),
# Second operation response is done, but has not error or response.
make_operation_proto(done=True)]
future, _, _ = make_operation_future(responses)
exception = future.exception()
assert 'Unexpected state' in '{!r}'.format(exception)
def test__refresh_http():
api_request = mock.Mock(
return_value={'name': TEST_OPERATION_NAME, 'done': True})
result = operation._refresh_http(api_request, TEST_OPERATION_NAME)
assert result.name == TEST_OPERATION_NAME
assert result.done is True
api_request.assert_called_once_with(
method='GET', path='operations/{}'.format(TEST_OPERATION_NAME))
def test__cancel_http():
api_request = mock.Mock()
operation._cancel_http(api_request, TEST_OPERATION_NAME)
api_request.assert_called_once_with(
method='POST', path='operations/{}:cancel'.format(TEST_OPERATION_NAME))
def test_from_http_json():
operation_json = {'name': TEST_OPERATION_NAME, 'done': True}
api_request = mock.sentinel.api_request
future = operation.from_http_json(
operation_json, api_request, struct_pb2.Struct,
metadata_type=struct_pb2.Struct)
assert future._result_type == struct_pb2.Struct
assert future._metadata_type == struct_pb2.Struct
assert future.operation.name == TEST_OPERATION_NAME
assert future.done
def test__refresh_grpc():
operations_stub = mock.Mock(spec=['GetOperation'])
expected_result = make_operation_proto(done=True)
operations_stub.GetOperation.return_value = expected_result
result = operation._refresh_grpc(operations_stub, TEST_OPERATION_NAME)
assert result == expected_result
expected_request = operations_pb2.GetOperationRequest(
name=TEST_OPERATION_NAME)
operations_stub.GetOperation.assert_called_once_with(expected_request)
def test__cancel_grpc():
operations_stub = mock.Mock(spec=['CancelOperation'])
operation._cancel_grpc(operations_stub, TEST_OPERATION_NAME)
expected_request = operations_pb2.CancelOperationRequest(
name=TEST_OPERATION_NAME)
operations_stub.CancelOperation.assert_called_once_with(expected_request)
def test_from_grpc():
operation_proto = make_operation_proto(done=True)
operations_stub = mock.sentinel.operations_stub
future = operation.from_grpc(
operation_proto, operations_stub, struct_pb2.Struct,
metadata_type=struct_pb2.Struct)
assert future._result_type == struct_pb2.Struct
assert future._metadata_type == struct_pb2.Struct
assert future.operation.name == TEST_OPERATION_NAME
assert future.done
|
tartavull/google-cloud-python
|
core/tests/unit/future/test_operation.py
|
Python
|
apache-2.0
| 6,408
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=C,R,W
"""Utility functions used across Superset"""
# Superset framework imports
from superset import app
from superset.utils.core import get_celery_app
# Globals
config = app.config
app = get_celery_app(config)
|
airbnb/caravel
|
superset/tasks/celery_app.py
|
Python
|
apache-2.0
| 1,017
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2016 MIT Probabilistic Computing Project
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from cgpm.uncorrelated.undirected import UnDirectedXyGpm
from cgpm.utils import general as gu
from cgpm.utils import mvnormal as multivariate_normal
class XCross(UnDirectedXyGpm):
"""Y = (+/- w.p .5) X + N(0,noise)."""
def simulate_joint(self):
if self.rng.rand() < .5:
cov = np.array([[1,1-self.noise],[1-self.noise,1]])
else:
cov = np.array([[1,-1+self.noise],[-1+self.noise,1]])
return self.rng.multivariate_normal([0,0], cov=cov)
def logpdf_joint(self, x, y):
X = np.array([x, y])
Mu = np.array([0, 0])
Sigma0 = np.array([[1, 1 - self.noise], [1 - self.noise, 1]])
Sigma1 = np.array([[1, -1 + self.noise], [-1 + self.noise, 1]])
return gu.logsumexp([
np.log(.5)+multivariate_normal.logpdf(X, Mu, Sigma0),
np.log(.5)+multivariate_normal.logpdf(X, Mu, Sigma1),
])
|
probcomp/cgpm
|
src/uncorrelated/xcross.py
|
Python
|
apache-2.0
| 1,552
|
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render
from django.core.urlresolvers import reverse
from django.template import RequestContext, loader
from django.views.decorators.csrf import csrf_exempt
from redigit import clf
capacity = 10
pointer = 0
def index(request):
t = loader.get_template("index.html")
c = RequestContext(request, {})
return HttpResponse(t.render(c))
@csrf_exempt
def predict(request):
result=6
try:
imgdata = request.POST['img']
imgbase64 = str(imgdata).replace('data:image/png;base64,', "")
imagename = "digits/image_"+str(clf.pointer % 100)+".png"
digitsname = "digits/digit_"+str(clf.pointer % 100)+".jpg"
clf.pointer = clf.pointer+1
fh = open(imagename, "wb")
data = imgbase64.decode('base64')
fh.write(data)
fh.close()
data = clf.get_image_data(imagename)
clf.save_image(data, digitsname)
preds = clf.dp.predict_image(data)
result = preds
except KeyError as e:
# Redisplay the question voting form.
# return render(request, 'polls/detail.html', {
# 'question': p,
# 'error_message': "You didn't select a choice.",
# })
# print "error_message"
# return HttpResponse("Your predict is %s." % result)
return HttpResponseRedirect(reverse('index'))
else:
# Always return an HttpResponseRedirect after successfully dealing
# with POST data. This prevents data from being posted twice if a
# user hits the Back button.
# return HttpResponseRedirect(reverse('polls:results', args=(p.id,)))
return HttpResponse("My predict is %s." % result)
# pass
# name = request.POST.get('name')
# return HttpResponse(json.dumps({'name': name}), content_type="application/json")
|
osgee/redigit
|
redigit/views.py
|
Python
|
apache-2.0
| 1,901
|
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class config(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/bgp/global/config. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Configuration parameters relating to the global BGP router
"""
__slots__ = ("_path_helper", "_extmethods", "__as_", "__router_id")
_yang_name = "config"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__as_ = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="as",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-inet:as-number",
is_config=True,
)
self.__router_id = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "^(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])$"
},
),
is_leaf=True,
yang_name="router-id",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-yang:dotted-quad",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"bgp",
"global",
"config",
]
def _get_as_(self):
"""
Getter method for as_, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/config/as (oc-inet:as-number)
YANG Description: Local autonomous system number of the router. Uses
the 32-bit as-number type from the model in RFC 6991.
"""
return self.__as_
def _set_as_(self, v, load=False):
"""
Setter method for as_, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/config/as (oc-inet:as-number)
If this variable is read-only (config: false) in the
source YANG file, then _set_as_ is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_as_() directly.
YANG Description: Local autonomous system number of the router. Uses
the 32-bit as-number type from the model in RFC 6991.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="as",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-inet:as-number",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """as_ must be of a type compatible with oc-inet:as-number""",
"defined-type": "oc-inet:as-number",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="as", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-inet:as-number', is_config=True)""",
}
)
self.__as_ = t
if hasattr(self, "_set"):
self._set()
def _unset_as_(self):
self.__as_ = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="as",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-inet:as-number",
is_config=True,
)
def _get_router_id(self):
"""
Getter method for router_id, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/config/router_id (oc-yang:dotted-quad)
YANG Description: Router id of the router - an unsigned 32-bit integer
expressed in dotted quad notation.
"""
return self.__router_id
def _set_router_id(self, v, load=False):
"""
Setter method for router_id, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/config/router_id (oc-yang:dotted-quad)
If this variable is read-only (config: false) in the
source YANG file, then _set_router_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_router_id() directly.
YANG Description: Router id of the router - an unsigned 32-bit integer
expressed in dotted quad notation.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "^(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])$"
},
),
is_leaf=True,
yang_name="router-id",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-yang:dotted-quad",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """router_id must be of a type compatible with oc-yang:dotted-quad""",
"defined-type": "oc-yang:dotted-quad",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '^(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])$'}), is_leaf=True, yang_name="router-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-yang:dotted-quad', is_config=True)""",
}
)
self.__router_id = t
if hasattr(self, "_set"):
self._set()
def _unset_router_id(self):
self.__router_id = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "^(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])$"
},
),
is_leaf=True,
yang_name="router-id",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-yang:dotted-quad",
is_config=True,
)
as_ = __builtin__.property(_get_as_, _set_as_)
router_id = __builtin__.property(_get_router_id, _set_router_id)
_pyangbind_elements = OrderedDict([("as_", as_), ("router_id", router_id)])
class config(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/bgp/global/config. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Configuration parameters relating to the global BGP router
"""
__slots__ = ("_path_helper", "_extmethods", "__as_", "__router_id")
_yang_name = "config"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__as_ = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="as",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-inet:as-number",
is_config=True,
)
self.__router_id = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "^(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])$"
},
),
is_leaf=True,
yang_name="router-id",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-yang:dotted-quad",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"bgp",
"global",
"config",
]
def _get_as_(self):
"""
Getter method for as_, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/config/as (oc-inet:as-number)
YANG Description: Local autonomous system number of the router. Uses
the 32-bit as-number type from the model in RFC 6991.
"""
return self.__as_
def _set_as_(self, v, load=False):
"""
Setter method for as_, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/config/as (oc-inet:as-number)
If this variable is read-only (config: false) in the
source YANG file, then _set_as_ is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_as_() directly.
YANG Description: Local autonomous system number of the router. Uses
the 32-bit as-number type from the model in RFC 6991.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="as",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-inet:as-number",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """as_ must be of a type compatible with oc-inet:as-number""",
"defined-type": "oc-inet:as-number",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="as", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-inet:as-number', is_config=True)""",
}
)
self.__as_ = t
if hasattr(self, "_set"):
self._set()
def _unset_as_(self):
self.__as_ = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="as",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-inet:as-number",
is_config=True,
)
def _get_router_id(self):
"""
Getter method for router_id, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/config/router_id (oc-yang:dotted-quad)
YANG Description: Router id of the router - an unsigned 32-bit integer
expressed in dotted quad notation.
"""
return self.__router_id
def _set_router_id(self, v, load=False):
"""
Setter method for router_id, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/config/router_id (oc-yang:dotted-quad)
If this variable is read-only (config: false) in the
source YANG file, then _set_router_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_router_id() directly.
YANG Description: Router id of the router - an unsigned 32-bit integer
expressed in dotted quad notation.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "^(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])$"
},
),
is_leaf=True,
yang_name="router-id",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-yang:dotted-quad",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """router_id must be of a type compatible with oc-yang:dotted-quad""",
"defined-type": "oc-yang:dotted-quad",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '^(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])$'}), is_leaf=True, yang_name="router-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-yang:dotted-quad', is_config=True)""",
}
)
self.__router_id = t
if hasattr(self, "_set"):
self._set()
def _unset_router_id(self):
self.__router_id = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "^(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])$"
},
),
is_leaf=True,
yang_name="router-id",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-yang:dotted-quad",
is_config=True,
)
as_ = __builtin__.property(_get_as_, _set_as_)
router_id = __builtin__.property(_get_router_id, _set_router_id)
_pyangbind_elements = OrderedDict([("as_", as_), ("router_id", router_id)])
|
napalm-automation/napalm-yang
|
napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/bgp/global_/config/__init__.py
|
Python
|
apache-2.0
| 21,009
|
"""
Hiss is a minimalist language built on function composition.
"""
VERSION = '1.0'
from callable_tuple import s, run
|
vic/hiss
|
src/hiss/__init__.py
|
Python
|
apache-2.0
| 127
|
""" Copyright 2014 Luis Ruiz Ruiz
Copyright 2014 Ana Isabel Lopera Martínez
Copyright 2014 Miguel Ortega Moreno
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class NotFoundException(Exception):
"""NotFound exception"""
def __init__(self, value):
self.arg = value
def __str__(self):
return repr(self.value)
class RateNotUpdatedException(Exception):
"""RateNotUpdated exception"""
def __init__(self, value):
self.arg = value
def __str__(self):
return repr(self.value)
class ComponentAlreadyStoredException(Exception):
"""RateNotUpdated exception"""
def __init__(self, value):
self.arg = value
def __str__(self):
return repr(self.value)
|
lruizr/pol-brick
|
src/excepciones_metodos_base.py
|
Python
|
apache-2.0
| 1,170
|
from catkin_tools.jobs.utils import merge_envs
def test_merge_envs_basic():
job_env = { 'PATH': '/usr/local/bin:/usr/bin', 'FOO': 'foo' }
merge_envs(job_env, [
{ 'PATH': '/usr/local/bin:/bar/baz/bin' },
{ 'BAR': 'bar' } ])
# Validate that the known path was not moved from the existing order, and the unfamiliar
# path was correctly prepended.
assert job_env['PATH'] == '/bar/baz/bin:/usr/local/bin:/usr/bin'
# Confirm that a key only in the original env persists.
assert job_env['FOO'] == 'foo'
# Confirm that a new key is added.
assert job_env['BAR'] == 'bar'
def test_merge_envs_complex():
''' Confirm that merged paths are deduplicated and that order is maintained. '''
job_env = { 'PATH': 'C:B:A' }
merge_envs(job_env, [{ 'PATH': 'D:C' }, { 'PATH': 'E:A:C' }])
assert job_env['PATH'] == 'E:D:C:B:A', job_env['PATH']
def test_merge_envs_nonpaths():
''' Confirm that non-path vars are simply overwritten on a last-wins policy. '''
job_env = { 'FOO': 'foo:bar' }
merge_envs(job_env, [{ 'FOO': 'bar:baz' }, { 'FOO': 'baz:bar' }])
assert job_env['FOO'] == 'baz:bar'
|
rhaschke/catkin_tools
|
tests/unit/test_jobs.py
|
Python
|
apache-2.0
| 1,160
|
#!/usr/bin/env python
# EASY-INSTALL-SCRIPT: 'Pillow==2.7.0','pilfile.py'
__requires__ = 'Pillow==2.7.0'
__import__('pkg_resources').run_script('Pillow==2.7.0', 'pilfile.py')
|
fkolacek/FIT-VUT
|
bp-revok/python/bin/pilfile.py
|
Python
|
apache-2.0
| 175
|
# -*- coding: utf-8 -*-
#
# Doric Stacks - Installation Guide documentation build configuration file, created by
# sphinx-quickstart on Fri Dec 4 10:26:47 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Doric Stacks - Installation Guide'
copyright = u'2015, Janis Lejins'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'DoricStacks-InstallationGuidedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'DoricStacks-InstallationGuide.tex', u'Doric Stacks - Installation Guide Documentation',
u'Janis Lejins', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'doricstacks-installationguide', u'Doric Stacks - Installation Guide Documentation',
[u'Janis Lejins'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'DoricStacks-InstallationGuide', u'Doric Stacks - Installation Guide Documentation',
u'Janis Lejins', 'DoricStacks-InstallationGuide', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'Doric Stacks - Installation Guide'
epub_author = u'Janis Lejins'
epub_publisher = u'Janis Lejins'
epub_copyright = u'2015, Janis Lejins'
# The basename for the epub file. It defaults to the project name.
#epub_basename = u'Doric Stacks - Installation Guide'
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the PIL.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
|
janislejins/DoricStacksInstallGuide
|
docs/conf.py
|
Python
|
artistic-2.0
| 10,483
|
"""
Test the Prime Finder class
Still working on getting dependency injection working.
Injecting the Generator into the Finder allows for many possibilities.
From the testing perspective this would allow me to inject a mock object
for the Generator that returns a set value speeding up the testing of the
Prime Finder class.
"""
import unittest
from primes.Primes import PrimeFinder
#from primes.Primes import PrimeGenerator
class PrimeFinderTests(unittest.TestCase):
def test_find_prime(self):
prime_finder = PrimeFinder.PrimeFinder(PrimeGenerator.PrimeGenerator())
self.assertEqual(prime_finder.find_prime(6), 13)
|
bigfatpanda-training/pandas-practical-python-primer
|
training/level-2-command-line-interfaces/dragon-warrior/tmarsha1/primes/Tests/PrimeFinderTests.py
|
Python
|
artistic-2.0
| 639
|
#!/usr/bin/env python2
"""
Generates a wholesale order form in spreadsheet form.
"""
import argparse
import datetime
import itertools
import logging
import os
import ConfigParser
import calc_price
import cctools
import notify_send_handler
import openpyxl # sudo pip install openpyxl
CHECK_FOR_LACK_OF_ANY = False # until most "Any" variants have been added
NUMBER_FORMAT_USD = "$#,##0.00;-$#,##0.00"
# Column numbers of product values.
COL_ITEM_NO = 1
COL_DESCRIPTION = 2
COL_PRICE = 3
COL_QTY = 4
COL_TOTAL = 5
COL_SKU = 6
COL_SIZE = 7
def set_cell(
worksheet,
row,
col,
value,
font_bold=False,
font_size=11,
alignment_horizontal="general",
alignment_vertical="bottom",
number_format="General"
):
"""Set cell value and style."""
cell = worksheet.cell(row=row, column=col)
cell.value = value
cell.font = openpyxl.styles.Font(bold=font_bold, size=font_size)
cell.alignment = openpyxl.styles.Alignment(
horizontal=alignment_horizontal,
vertical=alignment_vertical
)
if number_format != "General":
cell.number_format = number_format
def col_letter(col):
"""Return column letter for given column."""
return chr(ord("A") + col - 1)
def add_title(args, config, worksheet):
"""Add worksheet title."""
row = 1
doc_title = config.get("wholesale_order", "title")
set_cell(worksheet, row, 1, doc_title, font_bold=True, font_size=20)
worksheet.row_dimensions[1].height = 25
row += 1
now = datetime.datetime.now()
cell_text = now.strftime("Date: %Y-%m-%d")
set_cell(worksheet, row, 1, cell_text)
row += 1
valid_date = now + datetime.timedelta(days=args.valid_ndays)
cell_text = valid_date.strftime("Valid until: %Y-%m-%d")
set_cell(worksheet, row, 1, cell_text)
row += 1
cell_text = "Prices are {:.0%} of retail".format(args.wholesale_fraction)
set_cell(worksheet, row, 1, cell_text)
row += 1
for merge_row in range(1, row):
worksheet.merge_cells(
start_row=merge_row,
start_column=1,
end_row=merge_row,
end_column=2
)
return row
def add_ship_to(worksheet, row):
"""Add Ship To block."""
start_col = 1
end_col = 2
worksheet.merge_cells(
start_row=row,
start_column=start_col,
end_row=row,
end_column=end_col
)
set_cell(
worksheet,
row,
start_col,
"Ship To:",
font_bold=True,
alignment_horizontal="left"
)
row += 1
nrows = 3
for i in range(0, nrows):
cell = worksheet.cell(row=row, column=start_col)
cell.alignment = openpyxl.styles.Alignment(horizontal="left")
worksheet.merge_cells(
start_row=row,
start_column=start_col,
end_row=row,
end_column=end_col
)
for col in range(start_col, end_col + 1):
default_side = openpyxl.styles.Side(
border_style=None,
color='FF000000'
)
if i == 0:
top = openpyxl.styles.Side("thin") # BORDER_THIN
else:
top = default_side
if i == nrows - 1:
bottom = openpyxl.styles.Side("thin")
else:
bottom = default_side
if col == start_col:
left = openpyxl.styles.Side("thin")
else:
left = default_side
if col == end_col:
right = openpyxl.styles.Side("thin")
else:
right = default_side
cell = worksheet.cell(row=row, column=col)
cell.border = openpyxl.styles.Border(
left=left,
right=right,
top=top,
bottom=bottom
)
row += 1
return row
def set_label_dollar_value(
worksheet,
row,
col_label_start,
col_label_end,
col_total,
label,
value,
font_bold=False
):
"""Add label: value."""
worksheet.merge_cells(
start_row=row,
start_column=col_label_start,
end_row=row,
end_column=col_label_end
)
set_cell(
worksheet,
row,
col_label_start,
label,
font_bold=font_bold,
alignment_horizontal="right"
)
set_cell(
worksheet,
row,
col_total,
value,
font_bold=font_bold,
number_format=NUMBER_FORMAT_USD
)
def add_variant(
worksheet,
row,
item_no,
size,
sku,
description,
wholesale_price
):
"""Add a row for a variant."""
set_cell(worksheet, row, COL_ITEM_NO, item_no)
set_cell(worksheet, row, COL_DESCRIPTION, description)
set_cell(
worksheet,
row,
COL_PRICE,
wholesale_price,
number_format=NUMBER_FORMAT_USD
)
total_formula = "=IF({}{}=\"\", \"\", {}{} * {}{})".format(
col_letter(COL_QTY),
row,
col_letter(COL_PRICE),
row,
col_letter(COL_QTY),
row
)
set_cell(
worksheet,
row,
COL_TOTAL,
total_formula,
number_format=NUMBER_FORMAT_USD
)
set_cell(worksheet, row, COL_SIZE, size)
set_cell(worksheet, row, COL_SKU, sku)
def get_product_variants(variants, sku):
"""Returns a list of variants for a product."""
product_variants = [
variant for variant in variants
if variant["Product SKU"] == sku and variant["Variant Enabled"] == "Y"
]
product_variants.sort(key=lambda variant: variant["Variant Sort"])
return product_variants
def add_product(args, worksheet, row, item_no, product, variants):
"""Add row for each variant."""
size = product["Size"]
product_name = product["Product Name"]
sku = product["SKU"]
teaser = cctools.html_to_plain_text(product["Teaser"])
price = float(product["Price"])
if args.include_variants:
product_variants = get_product_variants(variants, sku)
else:
product_variants = []
if len(product_variants) == 0:
description = "{}: {}".format(product_name, teaser)
add_variant(
worksheet,
row,
item_no,
size,
sku,
description,
calc_price.calc_wholesale_price(price, args.wholesale_fraction)
)
row += 1
item_no += 1
else:
any_variant_exists = False
for variant in product_variants:
variant_sku = variant["Variant SKU"]
if variant_sku == "ANY" or variant_sku == "VAR":
any_variant_exists = True
variant_sku = "{}-{}".format(sku, variant_sku)
variant_add_price = float(variant["Variant Add Price"])
variant_name = variant["Variant Name"]
description = "{} ({}): {}".format(
product_name,
variant_name,
teaser
)
add_variant(
worksheet,
row,
item_no,
size,
variant_sku,
description,
calc_price.calc_wholesale_price(
price + variant_add_price,
args.wholesale_fraction
)
)
row += 1
item_no += 1
if CHECK_FOR_LACK_OF_ANY and not any_variant_exists:
logging.getLogger().warning(
"No 'Any' or 'Variety' variant exists for {} {}".format(
sku,
product_name
)
)
return row, item_no
def add_products(args, worksheet, row, cc_browser, products):
"""Add row for each product."""
# Add header row.
set_cell(
worksheet,
row,
COL_ITEM_NO,
"Item No",
font_bold=True,
alignment_horizontal="right"
)
set_cell(worksheet, row, COL_DESCRIPTION, "Description", font_bold=True)
set_cell(
worksheet,
row,
COL_PRICE,
"Price",
font_bold=True,
alignment_horizontal="right"
)
set_cell(
worksheet,
row,
COL_QTY,
"Qty",
font_bold=True,
alignment_horizontal="right"
)
set_cell(
worksheet,
row,
COL_TOTAL,
"Total",
font_bold=True,
alignment_horizontal="right"
)
set_cell(
worksheet,
row,
COL_SKU,
"SKU",
font_bold=True,
alignment_horizontal="right"
)
set_cell(
worksheet,
row,
COL_SIZE,
"Size",
font_bold=True
)
row += 1
# Remove excluded SKUs.
if args.exclude_skus:
products = [
x for x in products if str(x["SKU"]) not in args.exclude_skus
]
# Sort products by category, product_name.
products = sorted(products, key=cc_browser.product_key_by_cat_and_name)
# Fetch variants list.
variants = cc_browser.get_variants()
# Group products by category.
first_product_row = row
item_no = 1
for _, product_group in itertools.groupby(
products,
key=cc_browser.product_key_by_category
):
# Leave a row for the category name.
category = "unknown"
category_row = row
row += 1
# Add product rows.
for product in product_group:
if product["Available"] != "Y":
continue
row, item_no = add_product(
args,
worksheet,
row,
item_no,
product,
variants
)
category = product["Category"]
last_product_row = row - 1
# Go back and insert category name.
if category == "":
category = "Uncategorized"
set_cell(
worksheet,
category_row,
COL_DESCRIPTION,
category,
font_bold=True
)
# Set column widths.
worksheet.column_dimensions[col_letter(COL_ITEM_NO)].width = 8
worksheet.column_dimensions[col_letter(COL_DESCRIPTION)].width = 100
worksheet.column_dimensions[col_letter(COL_PRICE)].width = 8
worksheet.column_dimensions[col_letter(COL_QTY)].width = 5
worksheet.column_dimensions[col_letter(COL_TOTAL)].width = 10
worksheet.column_dimensions[col_letter(COL_SKU)].width = 14
worksheet.column_dimensions[col_letter(COL_SIZE)].width = 28
# Blank row.
row += 1
col_label_start = COL_TOTAL - 2
col_label_end = COL_TOTAL - 1
# Subtotal.
subtotal_formula = "=SUM({}{}:{}{})".format(
col_letter(COL_TOTAL),
first_product_row,
col_letter(COL_TOTAL),
last_product_row
)
set_label_dollar_value(
worksheet,
row,
col_label_start,
col_label_end,
COL_TOTAL,
"Subtotal:",
subtotal_formula
)
subtotal_row = row
row += 1
# Shipping.
set_label_dollar_value(
worksheet,
row,
col_label_start,
col_label_end,
COL_TOTAL,
"Shipping:",
0.0
)
row += 1
# Adjustment.
set_label_dollar_value(
worksheet,
row,
col_label_start,
col_label_end,
COL_TOTAL,
"Adjustment:",
0.0
)
row += 1
# Total.
total_formula = "=SUM({}{}:{}{})".format(
col_letter(COL_TOTAL),
subtotal_row,
col_letter(COL_TOTAL),
row - 1
)
set_label_dollar_value(
worksheet,
row,
col_label_start,
col_label_end,
COL_TOTAL,
"Total:",
total_formula,
font_bold=True
)
def add_order_form(args, config, cc_browser, products, worksheet):
"""Create the Wholesale Order Form worksheet."""
# Prepare worksheet.
worksheet.title = "Wholesale Order Form"
# Add title.
row = add_title(args, config, worksheet)
# Blank row.
row += 1
# Ship To block.
row = add_ship_to(worksheet, row)
# Blank row.
row += 1
# Add products.
add_products(
args,
worksheet,
row,
cc_browser,
products
)
def generate_xlsx(args, config, cc_browser, products):
"""Generate the XLS file."""
# Construct a document.
workbook = openpyxl.workbook.Workbook()
# Create PO-Invoice worksheet.
add_order_form(
args,
config,
cc_browser,
products,
workbook.worksheets[0]
)
# Write to file.
workbook.save(args.xlsx_filename)
def main():
"""main"""
default_config = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"cctools.cfg"
)
now = datetime.datetime.now()
default_xlsx_filename = now.strftime("%Y-%m-%d-WholesaleOrderForm.xlsx")
arg_parser = argparse.ArgumentParser(
description="Generates a wholesale order form."
)
arg_parser.add_argument(
"--config",
action="store",
dest="config",
metavar="FILE",
default=default_config,
help="configuration filename (default=%(default)s)"
)
arg_parser.add_argument(
"--wholesale-fraction",
metavar="FRAC",
default=0.5,
help="wholesale price fraction (default=%(default).2f)"
)
arg_parser.add_argument(
"--valid-ndays",
metavar="N",
type=int,
default=30,
help="number of days prices are valid (default=%(default)i)"
)
arg_parser.add_argument(
"--outfile",
action="store",
dest="xlsx_filename",
metavar="FILE",
default=default_xlsx_filename,
help="output XLSX filename (default=%(default)s)"
)
arg_parser.add_argument(
"--include-variants",
action="store_true",
default=False,
help="add row for each product variant"
)
arg_parser.add_argument(
"--exclude-sku",
action="append",
dest="exclude_skus",
metavar="SKU",
help="exclude SKU from output"
)
arg_parser.add_argument(
"--verbose",
action="store_true",
default=False,
help="display progress messages"
)
# Parse command line arguments.
args = arg_parser.parse_args()
# Configure logging.
logging.basicConfig(
level=logging.INFO if args.verbose else logging.WARNING
)
logger = logging.getLogger()
# Also log using notify-send if it is available.
if notify_send_handler.NotifySendHandler.is_available():
logger.addHandler(
notify_send_handler.NotifySendHandler(
os.path.splitext(os.path.basename(__file__))[0]
)
)
# Read config file.
config = ConfigParser.RawConfigParser()
config.readfp(open(args.config))
# Create a connection to CoreCommerce.
cc_browser = cctools.CCBrowser(
config.get("website", "base_url"),
config.get("website", "username"),
config.get("website", "password")
)
# Fetch products list.
products = cc_browser.get_products()
# Generate spreadsheet.
logger.debug("Generating {}".format(args.xlsx_filename))
generate_xlsx(args, config, cc_browser, products)
logger.debug("Generation complete")
return 0
if __name__ == "__main__":
main()
|
tschutter/cctools
|
gen-wholesale-order.py
|
Python
|
bsd-2-clause
| 15,604
|
# Coin sums
#
# Problem 31
#
# In England the currency is made up of pound, £, and pence, p, and there are eight coins in general circulation:
#
# 1p, 2p, 5p, 10p, 20p, 50p, £1 (100p) and £2 (200p).
# It is possible to make £2 in the following way:
#
# 1×£1 + 1×50p + 2×20p + 1×5p + 1×2p + 3×1p
# How many different ways can £2 be made using any number of coins?
COINS = (1, 2, 5, 10, 20, 50, 100, 200)
def ways_2_split(number):
table = [1] + [0] * number
for coin in COINS:
for num in range(coin, len(table)):
table[num] += table[num - coin]
return table[-1]
print(ways_2_split(200))
|
chjdev/euler
|
python/problem31.py
|
Python
|
bsd-2-clause
| 634
|
import datetime
import os
import sys
import unittest
from pathlib import Path, PosixPath, WindowsPath
from unittest.mock import Mock, call, patch
import freezegun
import streamlink_cli.main
import tests.resources
from streamlink.session import Streamlink
from streamlink.stream.stream import Stream
from streamlink_cli.compat import DeprecatedPath, is_win32, stdout
from streamlink_cli.main import (
Formatter,
NoPluginError,
check_file_output,
create_output,
format_valid_streams,
handle_stream,
handle_url,
log_current_arguments,
resolve_stream_name,
setup_config_args
)
from streamlink_cli.output import FileOutput, PlayerOutput
from tests.plugin.testplugin import TestPlugin as _TestPlugin
class FakePlugin(_TestPlugin):
module = "fake"
arguments = []
_streams = {}
def streams(self, *args, **kwargs):
return self._streams
def _get_streams(self): # pragma: no cover
pass
class TestCLIMain(unittest.TestCase):
def test_resolve_stream_name(self):
a = Mock()
b = Mock()
c = Mock()
d = Mock()
e = Mock()
streams = {
"160p": a,
"360p": b,
"480p": c,
"720p": d,
"1080p": e,
"worst": b,
"best": d,
"worst-unfiltered": a,
"best-unfiltered": e
}
self.assertEqual(resolve_stream_name(streams, "unknown"), "unknown")
self.assertEqual(resolve_stream_name(streams, "160p"), "160p")
self.assertEqual(resolve_stream_name(streams, "360p"), "360p")
self.assertEqual(resolve_stream_name(streams, "480p"), "480p")
self.assertEqual(resolve_stream_name(streams, "720p"), "720p")
self.assertEqual(resolve_stream_name(streams, "1080p"), "1080p")
self.assertEqual(resolve_stream_name(streams, "worst"), "360p")
self.assertEqual(resolve_stream_name(streams, "best"), "720p")
self.assertEqual(resolve_stream_name(streams, "worst-unfiltered"), "160p")
self.assertEqual(resolve_stream_name(streams, "best-unfiltered"), "1080p")
def test_format_valid_streams(self):
a = Mock()
b = Mock()
c = Mock()
streams = {
"audio": a,
"720p": b,
"1080p": c,
"worst": b,
"best": c
}
self.assertEqual(
format_valid_streams(_TestPlugin, streams),
", ".join([
"audio",
"720p (worst)",
"1080p (best)"
])
)
streams = {
"audio": a,
"720p": b,
"1080p": c,
"worst-unfiltered": b,
"best-unfiltered": c
}
self.assertEqual(
format_valid_streams(_TestPlugin, streams),
", ".join([
"audio",
"720p (worst-unfiltered)",
"1080p (best-unfiltered)"
])
)
class TestCLIMainJsonAndStreamUrl(unittest.TestCase):
@patch("streamlink_cli.main.args", json=True, stream_url=True, subprocess_cmdline=False)
@patch("streamlink_cli.main.console")
def test_handle_stream_with_json_and_stream_url(self, console, args):
stream = Mock()
streams = dict(best=stream)
plugin = FakePlugin("")
plugin._streams = streams
handle_stream(plugin, streams, "best")
self.assertEqual(console.msg.mock_calls, [])
self.assertEqual(console.msg_json.mock_calls, [call(
stream,
metadata=dict(
author="Tѥst Āuƭhǿr",
category=None,
title="Test Title"
)
)])
self.assertEqual(console.error.mock_calls, [])
console.msg_json.mock_calls.clear()
args.json = False
handle_stream(plugin, streams, "best")
self.assertEqual(console.msg.mock_calls, [call(stream.to_url())])
self.assertEqual(console.msg_json.mock_calls, [])
self.assertEqual(console.error.mock_calls, [])
console.msg.mock_calls.clear()
stream.to_url.side_effect = TypeError()
handle_stream(plugin, streams, "best")
self.assertEqual(console.msg.mock_calls, [])
self.assertEqual(console.msg_json.mock_calls, [])
self.assertEqual(console.exit.mock_calls, [call("The stream specified cannot be translated to a URL")])
@patch("streamlink_cli.main.args", json=True, stream_url=True, stream=[], default_stream=[], retry_max=0, retry_streams=0)
@patch("streamlink_cli.main.console")
def test_handle_url_with_json_and_stream_url(self, console, args):
stream = Mock()
streams = dict(worst=Mock(), best=stream)
class _FakePlugin(FakePlugin):
_streams = streams
with patch("streamlink_cli.main.streamlink", resolve_url=Mock(return_value=(_FakePlugin, ""))):
handle_url()
self.assertEqual(console.msg.mock_calls, [])
self.assertEqual(console.msg_json.mock_calls, [call(
plugin="fake",
metadata=dict(
author="Tѥst Āuƭhǿr",
category=None,
title="Test Title"
),
streams=streams
)])
self.assertEqual(console.error.mock_calls, [])
console.msg_json.mock_calls.clear()
args.json = False
handle_url()
self.assertEqual(console.msg.mock_calls, [call(stream.to_manifest_url())])
self.assertEqual(console.msg_json.mock_calls, [])
self.assertEqual(console.error.mock_calls, [])
console.msg.mock_calls.clear()
stream.to_manifest_url.side_effect = TypeError()
handle_url()
self.assertEqual(console.msg.mock_calls, [])
self.assertEqual(console.msg_json.mock_calls, [])
self.assertEqual(console.exit.mock_calls, [call("The stream specified cannot be translated to a URL")])
console.exit.mock_calls.clear()
class TestCLIMainCheckFileOutput(unittest.TestCase):
@staticmethod
def mock_path(path, is_file=True):
return Mock(
spec=Path(path),
is_file=Mock(return_value=is_file),
__str__=Mock(return_value=path)
)
def test_check_file_output(self):
path = self.mock_path("foo", is_file=False)
output = check_file_output(path, False)
self.assertIsInstance(output, FileOutput)
self.assertIs(output.filename, path)
def test_check_file_output_exists_force(self):
path = self.mock_path("foo", is_file=True)
output = check_file_output(path, True)
self.assertIsInstance(output, FileOutput)
self.assertIs(output.filename, path)
@patch("streamlink_cli.main.console")
@patch("streamlink_cli.main.sys")
def test_check_file_output_exists_ask_yes(self, mock_sys: Mock, mock_console: Mock):
mock_sys.stdin.isatty.return_value = True
mock_console.ask = Mock(return_value="y")
path = self.mock_path("foo", is_file=True)
output = check_file_output(path, False)
self.assertEqual(mock_console.ask.call_args_list, [call("File foo already exists! Overwrite it? [y/N] ")])
self.assertIsInstance(output, FileOutput)
self.assertIs(output.filename, path)
@patch("streamlink_cli.main.console")
@patch("streamlink_cli.main.sys")
def test_check_file_output_exists_ask_no(self, mock_sys: Mock, mock_console: Mock):
mock_sys.stdin.isatty.return_value = True
mock_sys.exit.side_effect = SystemExit
mock_console.ask = Mock(return_value="N")
path = self.mock_path("foo", is_file=True)
with self.assertRaises(SystemExit):
check_file_output(path, False)
self.assertEqual(mock_console.ask.call_args_list, [call("File foo already exists! Overwrite it? [y/N] ")])
@patch("streamlink_cli.main.console")
@patch("streamlink_cli.main.sys")
def test_check_file_output_exists_notty(self, mock_sys: Mock, mock_console: Mock):
mock_sys.stdin.isatty.return_value = False
mock_sys.exit.side_effect = SystemExit
path = self.mock_path("foo", is_file=True)
with self.assertRaises(SystemExit):
check_file_output(path, False)
self.assertEqual(mock_console.ask.call_args_list, [])
class TestCLIMainCreateOutput(unittest.TestCase):
@patch("streamlink_cli.main.args")
@patch("streamlink_cli.main.console", Mock())
@patch("streamlink_cli.main.DEFAULT_STREAM_METADATA", {"title": "bar"})
def test_create_output_no_file_output_options(self, args: Mock):
formatter = Formatter({
"author": lambda: "foo"
})
args.output = None
args.stdout = None
args.record = None
args.record_and_pipe = None
args.title = None
args.url = "URL"
args.player = "mpv"
args.player_args = ""
output = create_output(formatter)
self.assertIsInstance(output, PlayerOutput)
self.assertEqual(output.title, "URL")
args.title = "{author} - {title}"
output = create_output(formatter)
self.assertIsInstance(output, PlayerOutput)
self.assertEqual(output.title, "foo - bar")
@patch("streamlink_cli.main.args")
@patch("streamlink_cli.main.check_file_output")
def test_create_output_file_output(self, mock_check_file_output: Mock, args: Mock):
formatter = Formatter({})
mock_check_file_output.side_effect = lambda path, force: FileOutput(path)
args.output = "foo"
args.stdout = None
args.record = None
args.record_and_pipe = None
args.force = False
args.fs_safe_rules = None
output = create_output(formatter)
self.assertEqual(mock_check_file_output.call_args_list, [call(Path("foo"), False)])
self.assertIsInstance(output, FileOutput)
self.assertEqual(output.filename, Path("foo"))
self.assertIsNone(output.fd)
self.assertIsNone(output.record)
@patch("streamlink_cli.main.args")
def test_create_output_stdout(self, args: Mock):
formatter = Formatter({})
args.output = None
args.stdout = True
args.record = None
args.record_and_pipe = None
output = create_output(formatter)
self.assertIsInstance(output, FileOutput)
self.assertIsNone(output.filename)
self.assertIs(output.fd, stdout)
self.assertIsNone(output.record)
args.output = "-"
args.stdout = False
output = create_output(formatter)
self.assertIsInstance(output, FileOutput)
self.assertIsNone(output.filename)
self.assertIs(output.fd, stdout)
self.assertIsNone(output.record)
@patch("streamlink_cli.main.args")
@patch("streamlink_cli.main.check_file_output")
def test_create_output_record_and_pipe(self, mock_check_file_output: Mock, args: Mock):
formatter = Formatter({})
mock_check_file_output.side_effect = lambda path, force: FileOutput(path)
args.output = None
args.stdout = None
args.record_and_pipe = "foo"
args.force = False
args.fs_safe_rules = None
output = create_output(formatter)
self.assertEqual(mock_check_file_output.call_args_list, [call(Path("foo"), False)])
self.assertIsInstance(output, FileOutput)
self.assertIsNone(output.filename)
self.assertIs(output.fd, stdout)
self.assertIsInstance(output.record, FileOutput)
self.assertEqual(output.record.filename, Path("foo"))
self.assertIsNone(output.record.fd)
self.assertIsNone(output.record.record)
@patch("streamlink_cli.main.args")
@patch("streamlink_cli.main.check_file_output")
@patch("streamlink_cli.main.DEFAULT_STREAM_METADATA", {"title": "bar"})
def test_create_output_record(self, mock_check_file_output: Mock, args: Mock):
formatter = Formatter({
"author": lambda: "foo"
})
mock_check_file_output.side_effect = lambda path, force: FileOutput(path)
args.output = None
args.stdout = None
args.record = "foo"
args.record_and_pipe = None
args.force = False
args.fs_safe_rules = None
args.title = None
args.url = "URL"
args.player = "mpv"
args.player_args = ""
args.player_fifo = None
args.player_http = None
output = create_output(formatter)
self.assertIsInstance(output, PlayerOutput)
self.assertEqual(output.title, "URL")
self.assertIsInstance(output.record, FileOutput)
self.assertEqual(output.record.filename, Path("foo"))
self.assertIsNone(output.record.fd)
self.assertIsNone(output.record.record)
args.title = "{author} - {title}"
output = create_output(formatter)
self.assertIsInstance(output, PlayerOutput)
self.assertEqual(output.title, "foo - bar")
self.assertIsInstance(output.record, FileOutput)
self.assertEqual(output.record.filename, Path("foo"))
self.assertIsNone(output.record.fd)
self.assertIsNone(output.record.record)
@patch("streamlink_cli.main.args")
@patch("streamlink_cli.main.console")
def test_create_output_record_and_other_file_output(self, console: Mock, args: Mock):
formatter = Formatter({})
args.output = None
args.stdout = True
args.record_and_pipe = True
create_output(formatter)
console.exit.assert_called_with("Cannot use record options with other file output options.")
class TestCLIMainHandleStream(unittest.TestCase):
@patch("streamlink_cli.main.output_stream")
@patch("streamlink_cli.main.args")
def test_handle_stream_output_stream(self, args: Mock, mock_output_stream: Mock):
"""
Test that the formatter does define the correct variables
"""
args.json = False
args.subprocess_cmdline = False
args.stream_url = False
args.output = False
args.stdout = False
args.url = "URL"
args.player_passthrough = []
args.player_external_http = False
args.player_continuous_http = False
mock_output_stream.return_value = True
plugin = _TestPlugin("")
plugin.author = "AUTHOR"
plugin.category = "CATEGORY"
plugin.title = "TITLE"
stream = Stream(session=Mock())
streams = {"best": stream}
handle_stream(plugin, streams, "best")
self.assertEqual(mock_output_stream.call_count, 1)
paramStream, paramFormatter = mock_output_stream.call_args[0]
self.assertIs(paramStream, stream)
self.assertIsInstance(paramFormatter, Formatter)
self.assertEqual(
paramFormatter.title("{url} - {author} - {category}/{game} - {title}"),
"URL - AUTHOR - CATEGORY/CATEGORY - TITLE"
)
@patch("streamlink_cli.main.log")
class TestCLIMainSetupConfigArgs(unittest.TestCase):
configdir = Path(tests.resources.__path__[0], "cli", "config")
parser = Mock()
@classmethod
def subject(cls, config_files, **args):
def resolve_url(name):
if name == "noplugin":
raise NoPluginError()
return Mock(module="testplugin"), name
session = Mock()
session.resolve_url.side_effect = resolve_url
args.setdefault("url", "testplugin")
with patch("streamlink_cli.main.setup_args") as mock_setup_args, \
patch("streamlink_cli.main.args", **args), \
patch("streamlink_cli.main.streamlink", session), \
patch("streamlink_cli.main.CONFIG_FILES", config_files):
setup_config_args(cls.parser)
return mock_setup_args
def test_no_plugin(self, mock_log):
mock_setup_args = self.subject(
[self.configdir / "primary", DeprecatedPath(self.configdir / "secondary")],
config=None,
url="noplugin"
)
expected = [self.configdir / "primary"]
mock_setup_args.assert_called_once_with(self.parser, expected, ignore_unknown=False)
self.assertEqual(mock_log.info.mock_calls, [])
def test_default_primary(self, mock_log):
mock_setup_args = self.subject(
[self.configdir / "primary", DeprecatedPath(self.configdir / "secondary")],
config=None
)
expected = [self.configdir / "primary", self.configdir / "primary.testplugin"]
mock_setup_args.assert_called_once_with(self.parser, expected, ignore_unknown=False)
self.assertEqual(mock_log.info.mock_calls, [])
def test_default_secondary_deprecated(self, mock_log):
mock_setup_args = self.subject(
[self.configdir / "non-existent", DeprecatedPath(self.configdir / "secondary")],
config=None
)
expected = [self.configdir / "secondary", self.configdir / "secondary.testplugin"]
mock_setup_args.assert_called_once_with(self.parser, expected, ignore_unknown=False)
self.assertEqual(mock_log.info.mock_calls, [
call(f"Loaded config from deprecated path, see CLI docs for how to migrate: {expected[0]}"),
call(f"Loaded plugin config from deprecated path, see CLI docs for how to migrate: {expected[1]}")
])
def test_custom_with_primary_plugin(self, mock_log):
mock_setup_args = self.subject(
[self.configdir / "primary", DeprecatedPath(self.configdir / "secondary")],
config=[str(self.configdir / "custom")]
)
expected = [self.configdir / "custom", self.configdir / "primary.testplugin"]
mock_setup_args.assert_called_once_with(self.parser, expected, ignore_unknown=False)
self.assertEqual(mock_log.info.mock_calls, [])
def test_custom_with_deprecated_plugin(self, mock_log):
mock_setup_args = self.subject(
[self.configdir / "non-existent", DeprecatedPath(self.configdir / "secondary")],
config=[str(self.configdir / "custom")]
)
expected = [self.configdir / "custom", DeprecatedPath(self.configdir / "secondary.testplugin")]
mock_setup_args.assert_called_once_with(self.parser, expected, ignore_unknown=False)
self.assertEqual(mock_log.info.mock_calls, [
call(f"Loaded plugin config from deprecated path, see CLI docs for how to migrate: {expected[1]}")
])
def test_custom_multiple(self, mock_log):
mock_setup_args = self.subject(
[self.configdir / "primary", DeprecatedPath(self.configdir / "secondary")],
config=[str(self.configdir / "non-existent"), str(self.configdir / "primary"), str(self.configdir / "secondary")]
)
expected = [self.configdir / "secondary", self.configdir / "primary", self.configdir / "primary.testplugin"]
mock_setup_args.assert_called_once_with(self.parser, expected, ignore_unknown=False)
self.assertEqual(mock_log.info.mock_calls, [])
class _TestCLIMainLogging(unittest.TestCase):
@classmethod
def subject(cls, argv):
session = Streamlink()
session.load_plugins(os.path.join(os.path.dirname(__file__), "plugin"))
def _log_current_arguments(*args, **kwargs):
log_current_arguments(*args, **kwargs)
raise SystemExit
with patch("streamlink_cli.main.streamlink", session), \
patch("streamlink_cli.main.log_current_arguments", side_effect=_log_current_arguments), \
patch("streamlink_cli.main.CONFIG_FILES", []), \
patch("streamlink_cli.main.setup_signals"), \
patch("streamlink_cli.main.setup_streamlink"), \
patch("streamlink_cli.main.setup_plugins"), \
patch("streamlink_cli.main.setup_http_session"), \
patch("streamlink.session.Streamlink.load_builtin_plugins"), \
patch("sys.argv") as mock_argv:
mock_argv.__getitem__.side_effect = lambda x: argv[x]
try:
streamlink_cli.main.main()
except SystemExit:
pass
def tearDown(self):
streamlink_cli.main.logger.root.handlers.clear()
# python >=3.7.2: https://bugs.python.org/issue35046
_write_calls = (
([call("[cli][info] foo\n")]
if sys.version_info >= (3, 7, 2)
else [call("[cli][info] foo"), call("\n")])
+ [call("bar\n")]
)
def write_file_and_assert(self, mock_mkdir: Mock, mock_write: Mock, mock_stdout: Mock):
streamlink_cli.main.log.info("foo")
streamlink_cli.main.console.msg("bar")
self.assertEqual(mock_mkdir.mock_calls, [call(parents=True, exist_ok=True)])
self.assertEqual(mock_write.mock_calls, self._write_calls)
self.assertFalse(mock_stdout.write.called)
class TestCLIMainLogging(_TestCLIMainLogging):
@unittest.skipIf(is_win32, "test only applicable on a POSIX OS")
@patch("streamlink_cli.main.log")
@patch("streamlink_cli.main.os.geteuid", Mock(return_value=0))
def test_log_root_warning(self, mock_log):
self.subject(["streamlink"])
self.assertEqual(mock_log.info.mock_calls, [call("streamlink is running as root! Be careful!")])
@patch("streamlink_cli.main.log")
@patch("streamlink_cli.main.streamlink_version", "streamlink")
@patch("streamlink_cli.main.requests.__version__", "requests")
@patch("streamlink_cli.main.socks_version", "socks")
@patch("streamlink_cli.main.websocket_version", "websocket")
@patch("platform.python_version", Mock(return_value="python"))
def test_log_current_versions(self, mock_log):
self.subject(["streamlink", "--loglevel", "info"])
self.assertEqual(mock_log.debug.mock_calls, [], "Doesn't log anything if not debug logging")
with patch("sys.platform", "linux"), \
patch("platform.platform", Mock(return_value="linux")):
self.subject(["streamlink", "--loglevel", "debug"])
self.assertEqual(
mock_log.debug.mock_calls[:4],
[
call("OS: linux"),
call("Python: python"),
call("Streamlink: streamlink"),
call("Requests(requests), Socks(socks), Websocket(websocket)")
]
)
mock_log.debug.reset_mock()
with patch("sys.platform", "darwin"), \
patch("platform.mac_ver", Mock(return_value=["0.0.0"])):
self.subject(["streamlink", "--loglevel", "debug"])
self.assertEqual(
mock_log.debug.mock_calls[:4],
[
call("OS: macOS 0.0.0"),
call("Python: python"),
call("Streamlink: streamlink"),
call("Requests(requests), Socks(socks), Websocket(websocket)")
]
)
mock_log.debug.reset_mock()
with patch("sys.platform", "win32"), \
patch("platform.system", Mock(return_value="Windows")), \
patch("platform.release", Mock(return_value="0.0.0")):
self.subject(["streamlink", "--loglevel", "debug"])
self.assertEqual(
mock_log.debug.mock_calls[:4],
[
call("OS: Windows 0.0.0"),
call("Python: python"),
call("Streamlink: streamlink"),
call("Requests(requests), Socks(socks), Websocket(websocket)")
]
)
mock_log.debug.reset_mock()
@patch("streamlink_cli.main.log")
def test_log_current_arguments(self, mock_log):
self.subject([
"streamlink",
"--loglevel", "info"
])
self.assertEqual(mock_log.debug.mock_calls, [], "Doesn't log anything if not debug logging")
self.subject([
"streamlink",
"--loglevel", "debug",
"-p", "custom",
"--testplugin-bool",
"--testplugin-password=secret",
"test.se/channel",
"best,worst"
])
self.assertEqual(
mock_log.debug.mock_calls[-7:],
[
call("Arguments:"),
call(" url=test.se/channel"),
call(" stream=['best', 'worst']"),
call(" --loglevel=debug"),
call(" --player=custom"),
call(" --testplugin-bool=True"),
call(" --testplugin-password=********")
]
)
class TestCLIMainLoggingLogfile(_TestCLIMainLogging):
@patch("sys.stdout")
@patch("builtins.open")
def test_logfile_no_logfile(self, mock_open, mock_stdout):
self.subject(["streamlink"])
streamlink_cli.main.log.info("foo")
streamlink_cli.main.console.msg("bar")
self.assertEqual(streamlink_cli.main.console.output, sys.stdout)
self.assertFalse(mock_open.called)
self.assertEqual(mock_stdout.write.mock_calls, self._write_calls)
@patch("sys.stdout")
@patch("builtins.open")
def test_logfile_loglevel_none(self, mock_open, mock_stdout):
self.subject(["streamlink", "--loglevel", "none", "--logfile", "foo"])
streamlink_cli.main.log.info("foo")
streamlink_cli.main.console.msg("bar")
self.assertEqual(streamlink_cli.main.console.output, sys.stdout)
self.assertFalse(mock_open.called)
self.assertEqual(mock_stdout.write.mock_calls, [call("bar\n")])
@patch("sys.stdout")
@patch("builtins.open")
@patch("pathlib.Path.mkdir", Mock())
def test_logfile_path_relative(self, mock_open, mock_stdout):
path = Path("foo").resolve()
self.subject(["streamlink", "--logfile", "foo"])
self.write_file_and_assert(
mock_mkdir=path.mkdir,
mock_write=mock_open(str(path), "a").write,
mock_stdout=mock_stdout
)
@unittest.skipIf(is_win32, "test only applicable on a POSIX OS")
class TestCLIMainLoggingLogfilePosix(_TestCLIMainLogging):
@patch("sys.stdout")
@patch("builtins.open")
@patch("pathlib.Path.mkdir", Mock())
def test_logfile_path_absolute(self, mock_open, mock_stdout):
self.subject(["streamlink", "--logfile", "/foo/bar"])
self.write_file_and_assert(
mock_mkdir=PosixPath("/foo").mkdir,
mock_write=mock_open("/foo/bar", "a").write,
mock_stdout=mock_stdout
)
@patch("sys.stdout")
@patch("builtins.open")
@patch("pathlib.Path.mkdir", Mock())
def test_logfile_path_expanduser(self, mock_open, mock_stdout):
with patch.dict(os.environ, {"HOME": "/foo"}):
self.subject(["streamlink", "--logfile", "~/bar"])
self.write_file_and_assert(
mock_mkdir=PosixPath("/foo").mkdir,
mock_write=mock_open("/foo/bar", "a").write,
mock_stdout=mock_stdout
)
@patch("sys.stdout")
@patch("builtins.open")
@patch("pathlib.Path.mkdir", Mock())
@freezegun.freeze_time(datetime.datetime(2000, 1, 2, 3, 4, 5))
def test_logfile_path_auto(self, mock_open, mock_stdout):
with patch("streamlink_cli.constants.LOG_DIR", PosixPath("/foo")):
self.subject(["streamlink", "--logfile", "-"])
self.write_file_and_assert(
mock_mkdir=PosixPath("/foo").mkdir,
mock_write=mock_open("/foo/2000-01-02_03-04-05.log", "a").write,
mock_stdout=mock_stdout
)
@unittest.skipIf(not is_win32, "test only applicable on Windows")
class TestCLIMainLoggingLogfileWindows(_TestCLIMainLogging):
@patch("sys.stdout")
@patch("builtins.open")
@patch("pathlib.Path.mkdir", Mock())
def test_logfile_path_absolute(self, mock_open, mock_stdout):
self.subject(["streamlink", "--logfile", "C:\\foo\\bar"])
self.write_file_and_assert(
mock_mkdir=WindowsPath("C:\\foo").mkdir,
mock_write=mock_open("C:\\foo\\bar", "a").write,
mock_stdout=mock_stdout
)
@patch("sys.stdout")
@patch("builtins.open")
@patch("pathlib.Path.mkdir", Mock())
def test_logfile_path_expanduser(self, mock_open, mock_stdout):
with patch.dict(os.environ, {"USERPROFILE": "C:\\foo"}):
self.subject(["streamlink", "--logfile", "~\\bar"])
self.write_file_and_assert(
mock_mkdir=WindowsPath("C:\\foo").mkdir,
mock_write=mock_open("C:\\foo\\bar", "a").write,
mock_stdout=mock_stdout
)
@patch("sys.stdout")
@patch("builtins.open")
@patch("pathlib.Path.mkdir", Mock())
@freezegun.freeze_time(datetime.datetime(2000, 1, 2, 3, 4, 5))
def test_logfile_path_auto(self, mock_open, mock_stdout):
with patch("streamlink_cli.constants.LOG_DIR", WindowsPath("C:\\foo")):
self.subject(["streamlink", "--logfile", "-"])
self.write_file_and_assert(
mock_mkdir=WindowsPath("C:\\foo").mkdir,
mock_write=mock_open("C:\\foo\\2000-01-02_03-04-05.log", "a").write,
mock_stdout=mock_stdout
)
|
bastimeyer/streamlink
|
tests/test_cli_main.py
|
Python
|
bsd-2-clause
| 29,440
|
#!/usr/bin/env python
# Copyright (C) 2014-2015 Job Snijders <job@instituut.net>
#
# This file is part of ACLHound
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function, division, absolute_import, unicode_literals
import sys
import unittest
import os
from grako.parsing import * # noqa
from grako.exceptions import * # noqa
from aclhound.cli import ACLHoundClient
from cStringIO import StringIO
class Capturing(list):
def __enter__(self):
self._stdout = sys.stdout
sys.stdout = self._stringio = StringIO()
return self
def __exit__(self, *args):
self._stringio.write('EXCEPTION RAISED')
self.extend(self._stringio.getvalue().splitlines())
sys.stdout = self._stdout
return True # Ignore any exception
class TestAclhound(unittest.TestCase):
def test_00__parse_ebnf_grammar(self):
grammar_file = 'aclhound/doc/grammar.ebnf'
grammar = open(grammar_file).read()
from grako.parser import GrakoGrammarGenerator
parser = GrakoGrammarGenerator('aclhound', trace=False)
state = parser.parse(grammar, filename=None)
self.assertTrue(state)
def test_01__build_ios(self):
os.environ["WORKSPACE"] = os.getcwd() + "/tests/data"
with Capturing() as output:
cli = ACLHoundClient({u'--help': False, u'--version': False,
u'<args>': ['all'], u'<command>': 'build',
u'debug': False, u'jenkins': True})
cli.build({u'--help': False, u'--version': False, u'<devicename>':
'devices/s2-ios.meerval.net', u'<command>': 'build',
u'debug': False, u'jenkins': True})
self.assertNotIn('ERROR', '\n'.join(output))
predefined_output = open('build_ios.txt').read().splitlines()
# remove first line, as this contains system specific output
output.pop(0)
predefined_output.pop(0)
output = "\n".join(output)
predefined_output = "\n".join(predefined_output)
# compare generated & predefined output blob, should be same
self.maxDiff = None
self.assertEquals(output, predefined_output)
def test_02__build_asa(self):
os.environ["WORKSPACE"] = os.getcwd()
with Capturing() as output:
cli = ACLHoundClient({u'--help': False, u'--version': False,
u'<args>': ['all'], u'<command>': 'build',
u'debug': False, u'jenkins': True})
cli.build({u'--help': False, u'--version': False, u'<devicename>':
'devices/s2-asa.meerval.net', u'<command>': 'build',
u'debug': False, u'jenkins': True})
self.assertNotIn('ERROR', '\n'.join(output))
predefined_output = open('build_asa.txt').read().splitlines()
output.pop(0)
predefined_output.pop(0)
output = "\n".join(output)
predefined_output = "\n".join(predefined_output)
self.maxDiff = None
self.assertEquals(output, predefined_output)
def test_03__build_junos(self):
os.environ["WORKSPACE"] = os.getcwd()
with Capturing() as output:
cli = ACLHoundClient({u'--help': False, u'--version': False,
u'<args>': ['all'], u'<command>': 'build',
u'debug': False, u'jenkins': True})
cli.build({u'--help': False, u'--version': False, u'<devicename>':
'devices/junos.eby', u'<command>': 'build',
u'debug': False, u'jenkins': True})
predefined_output = open('build_junos.txt').read().splitlines()
output.pop(0)
predefined_output.pop(0)
output = "\n".join(output)
predefined_output = "\n".join(predefined_output)
self.maxDiff = None
self.assertEquals(output, predefined_output)
def test_04__deploy_ios(self):
if not "TRAVIS" in os.environ:
self.skipTest("Not inside Travis")
return
elif not os.environ["TRAVIS_REPO_SLUG"] == "job/aclhound":
self.skipTest("Skipping this test when triggered by a fork")
return
os.environ["WORKSPACE"] = os.getcwd()
with Capturing() as output:
cli = ACLHoundClient({u'--help': False, u'--version': False,
u'<args>': ['all'], u'<command>': 'deploy',
u'debug': False, u'jenkins': True})
cli.deploy({u'--help': False, u'--version': False, u'<devicename>':
'devices/steffann.mu.meerval.net',
u'<command>': 'deploy', u'debug': False,
u'jenkins': True})
# predefined_output = open('deploy_ios.txt').read().splitlines()
# output = "\n".join(output)
# predefined_output = "\n".join(predefined_output)
# self.maxDiff = None
# self.assertEquals(output, predefined_output)
# self.assertTrue(parse_examples('policy/generic_policy'))
# self.assertTrue(True)
# tree = radix.Radix()
# self.assertTrue('radix.Radix' in str(type(tree)))
# del tree
# self.assertEquals(num_nodes_in - num_nodes_del, num_nodes_out)
# self.assertNotEquals(node, None)
def main():
unittest.main()
if __name__ == '__main__':
main()
|
job/aclhound
|
tests/test_regression.py
|
Python
|
bsd-2-clause
| 6,698
|
class Data(dict):
def __init__(self, xml, status=None, messages=None):
"""Process the xml instance into a friendly dictionary."""
content = {
'data': None,
'status': status or 200,
'messages': messages or []
}
struct_nodes = xml.findall('./')
data = self._breakdown_tree(struct_nodes)
content['data'] = data
dict.__init__(self, content)
def _breakdown_tree(self, nodes):
# All properties in a single item
_data = {}
for node in nodes:
if node.tag == 'member':
# Dictionary item
key = node.find('name').text
value_node = node.find('value')[0]
if value_node.tag == 'int':
value = int(value_node.text.strip())
elif value_node.tag in ['array', 'struct', 'data', 'param']:
value = self._breakdown_tree(value_node.findall('./'))
elif value_node.tag == 'string':
try:
value = value_node.text.strip()
except AttributeError:
# Maliciously constructed data is detected in the responses for the string nodes
value = value_node.text
else:
# dateTime.iso8601 or something exotic
value = value_node.text
_data[key] = value
elif node.tag == 'value':
# Nodes are list items
if not isinstance(_data, list):
_data = []
_data.append(self._breakdown_tree(node.findall('./')))
else:
# Recursively find data as this is not a data node
return self._breakdown_tree(node.findall('./'))
return _data
|
JorrandeWit/ithenticate-api-python
|
iThenticate/API/Object/data.py
|
Python
|
bsd-2-clause
| 1,868
|
from datetime import datetime, date, timedelta
import unittest
from businesstime import BusinessTime
from businesstime.holidays.usa import USFederalHolidays
class BusinessTimeTest(unittest.TestCase):
def setUp(self):
"""
Tests mostly based around January 2014, where two holidays, New Years Day
and MLK day, fall on the 1st and 20th, respectively.
January 2014
Su Mo Tu We Th Fr Sa
1 2 3 4
5 6 7 8 9 10 11
12 13 14 15 16 17 18
19 20 21 22 23 24 25
26 27 28 29 30 31
"""
self.bt = BusinessTime(holidays=USFederalHolidays())
def test_iterdays(self):
start = datetime(2014, 1, 16)
end = datetime(2014, 1, 22)
self.assertEqual(
tuple(self.bt.iterdays(start, end)),
(datetime(2014, 1, 16), datetime(2014, 1, 17), datetime(
2014, 1, 18), datetime(2014, 1, 19), datetime(2014, 1, 20),
datetime(2014, 1, 21)))
def test_iterdays_same_day(self):
start = datetime(2014, 1, 16, 12, 15)
end = datetime(2014, 1, 16, 12, 16)
self.assertEqual(
tuple(self.bt.iterdays(start, end)), (datetime(2014, 1, 16), ))
def test_iterdays_clears_time(self):
start = datetime(2014, 1, 16, 12, 12, 11)
end = datetime(2014, 1, 18, 15)
self.assertEqual(
tuple(self.bt.iterdays(start, end)),
(datetime(2014, 1, 16), datetime(2014, 1, 17)))
def test_iterweekdays(self):
start = datetime(2014, 1, 16)
end = datetime(2014, 1, 22)
self.assertEqual(
tuple(self.bt.iterweekdays(start, end)),
(datetime(2014, 1, 16), datetime(2014, 1, 17), datetime(
2014, 1, 20), datetime(2014, 1, 21)))
def test_iterbusinessdays(self):
start = datetime(2014, 1, 16)
end = datetime(2014, 1, 22)
self.assertEqual(
tuple(self.bt.iterbusinessdays(start, end)), (datetime(
2014, 1, 16), datetime(2014, 1, 17), datetime(2014, 1, 21)))
def test_iterbusinessdays_conforms_to_business_hours(self):
start = datetime(2014, 1, 16, 17, 1)
end = datetime(2014, 1, 23, 2)
self.assertEqual(
tuple(self.bt.iterbusinessdays(start, end)), (datetime(
2014, 1, 17), datetime(2014, 1, 21), datetime(2014, 1, 22)))
def test_isduringbusinessday(self):
self.assertTrue(
self.bt.isduringbusinesshours(datetime(2014, 1, 15, 12)))
self.assertFalse(self.bt.isduringbusinesshours(datetime(2014, 1, 15)))
self.assertFalse(
self.bt.isduringbusinesshours(datetime(2014, 1, 18, 11)))
self.assertFalse(
self.bt.isduringbusinesshours(datetime(2014, 1, 20, 11, 46, 43)))
def test_holidays_specified_as_list(self):
bd = BusinessTime(holidays=[date(2014, 1, 1)])
self.assertTrue(bd.isholiday(date(2014, 1, 1)))
self.assertFalse(bd.isholiday(date(2014, 1, 2)))
def test_no_holidays(self):
bt = BusinessTime()
self.assertFalse(bt.isholiday(date(2014, 1, 1)))
def test_businesstimedelta_after_during(self):
start = datetime(2014, 1, 16, 18, 30)
end = datetime(2014, 1, 22, 10, 0)
self.assertEqual(
self.bt.businesstimedelta(start, end), timedelta(days=2, hours=1))
def test_businesstimedelta_1_minute_after_during(self):
"""https://github.com/seatgeek/businesstime/issues/7"""
start = datetime(2015, 2, 23, 17, 0)
end = datetime(2015, 2, 24, 14, 20)
self.assertEqual(
self.bt.businesstimedelta(start, end),
timedelta(hours=5, minutes=20))
start = datetime(2015, 2, 23, 17, 1)
self.assertEqual(
self.bt.businesstimedelta(start, end),
timedelta(hours=5, minutes=20))
def test_businesstimedelta_nonbusiness_after(self):
start = datetime(2014, 1, 12, 12)
end = datetime(2014, 1, 17, 19, 30)
self.assertEqual(
self.bt.businesstimedelta(start, end), timedelta(days=4, hours=8))
def test_businesstimedelta_before_after(self):
start = datetime(2014, 1, 13, 4)
end = datetime(2014, 1, 17, 19, 30)
self.assertEqual(
self.bt.businesstimedelta(start, end), timedelta(days=4, hours=8))
def test_businesstimedelta_during_after(self):
start = datetime(2014, 1, 30, 12, 15)
end = datetime(2014, 1, 31, 19, 30)
self.assertEqual(
self.bt.businesstimedelta(start, end),
timedelta(days=1, hours=4, minutes=45))
def test_businesstimedelta_during_before(self):
start = datetime(2014, 8, 4, 11)
end = datetime(2014, 8, 6, 5)
self.assertEqual(
self.bt.businesstimedelta(start, end), timedelta(days=1, hours=6))
def test_businesstimedelta_before_before(self):
start = datetime(2014, 8, 4, 1)
end = datetime(2014, 8, 4, 5)
self.assertEqual(
self.bt.businesstimedelta(start, end), timedelta(days=0))
def test_businesstimedelta_after_after(self):
start = datetime(2014, 8, 4, 22)
end = datetime(2014, 8, 4, 23)
self.assertEqual(
self.bt.businesstimedelta(start, end), timedelta(days=0))
def test_businesstimedelta_during_nonbusiness(self):
start = datetime(2014, 1, 10, 16, 15)
end = datetime(2014, 1, 12, 12, 30)
self.assertEqual(
self.bt.businesstimedelta(start, end), timedelta(minutes=45))
def test_businesstimedelta_during_nonbusiness2(self):
start = datetime(2014, 1, 9, 16, 15)
end = datetime(2014, 1, 12, 12, 30)
self.assertEqual(
self.bt.businesstimedelta(start, end), timedelta(
days=1, minutes=45))
def test_businesstimedelta_after_nonbusiness(self):
start = datetime(2014, 1, 10, 17, 15)
end = datetime(2014, 1, 12, 12, 30)
self.assertEqual(self.bt.businesstimedelta(start, end), timedelta())
def test_businesstimedelta_during_during(self):
start = datetime(2014, 1, 2, 9, 12)
end = datetime(2014, 1, 3, 9, 10)
self.assertEqual(
self.bt.businesstimedelta(start, end),
timedelta(hours=7, minutes=58))
def test_businesstimedelta_during_during2(self):
start = datetime(2014, 1, 2, 9, 10)
end = datetime(2014, 1, 3, 9, 12)
self.assertEqual(
self.bt.businesstimedelta(start, end), timedelta(
days=1, minutes=2))
def test_businesstimedelta_during_during3(self):
start = datetime(2014, 1, 2, 9, 10)
end = datetime(2014, 1, 2, 9, 12)
self.assertEqual(
self.bt.businesstimedelta(start, end), timedelta(minutes=2))
def test_businesstimedelta_nonbusiness_nonbusiness(self):
start = datetime(2014, 1, 4, 9, 10)
end = datetime(2014, 1, 4, 9, 12)
self.assertEqual(self.bt.businesstimedelta(start, end), timedelta())
def test_businesstimedelta_exactly_one_day(self):
start = datetime(2014, 1, 7, 10)
end = datetime(2014, 1, 8, 10)
self.assertEqual(
self.bt.businesstimedelta(start, end), timedelta(days=1))
def test_businesstimedelta_exactly_one_day2(self):
"""
Test for https://github.com/seatgeek/businesstime/issues/3
"""
start = datetime(2014, 1, 7, 9)
end = datetime(2014, 1, 8, 9)
self.assertEqual(
self.bt.businesstimedelta(start, end), timedelta(days=1))
def test_businesstimedelta_during_during_reverse(self):
end = datetime(2014, 1, 2, 9, 12)
start = datetime(2014, 1, 3, 9, 10)
self.assertEqual(
self.bt.businesstimedelta(start, end),
timedelta(hours=-7, minutes=-58))
def test_businesstime_hours_exactly_one_day(self):
start = datetime(2014, 1, 16, 9, 0)
end = datetime(2014, 1, 17, 9, 0)
self.assertEqual(
self.bt.businesstime_hours(start, end), timedelta(hours=8))
def test_businesstime_hours_one_day(self):
start = datetime(2014, 1, 16, 9, 0)
end = datetime(2014, 1, 17, 15, 0)
self.assertEqual(
self.bt.businesstime_hours(start, end), timedelta(hours=14))
def test_businesstime_hours_one_day_reverse(self):
start = datetime(2014, 1, 17, 9, 0)
end = datetime(2014, 1, 16, 9, 0)
self.assertEqual(
self.bt.businesstime_hours(start, end), timedelta(hours=-8))
def test_businesstime_out_of_hours_start(self):
"""
Test for https://github.com/seatgeek/businesstime/issues/13
"""
start = datetime(2014, 8, 9, 9, 0)
end = datetime(2014, 8, 11, 17, 0)
self.assertEqual(
self.bt.businesstime_hours(start, end), timedelta(hours=8))
def test_businesstime_out_of_hours_start_end(self):
"""
Test for https://github.com/seatgeek/businesstime/issues/13
"""
start = datetime(2014, 8, 9, 9, 0)
end = datetime(2014, 8, 11, 23, 0)
self.assertEqual(
self.bt.businesstime_hours(start, end), timedelta(hours=8))
def test_businesstime_out_of_hours_end(self):
"""
Test for https://github.com/seatgeek/businesstime/issues/13
"""
start = datetime(2014, 8, 8, 9, 0)
end = datetime(2014, 8, 11, 23, 0)
self.assertEqual(
self.bt.businesstime_hours(start, end), timedelta(hours=16))
def test_businesstime_holidays_date_desc(self):
"""
Test for https://github.com/seatgeek/businesstime/issues/25
"""
bt_cal = BusinessTime(holidays=USFederalHolidays())
non_holiday = datetime(2018, 5, 31, 12, 0)
memorial_day_2017 = datetime(2017, 5, 29, 12, 0)
memorial_day_2018 = datetime(2018, 5, 28, 12, 0)
# Note that we test the later memorial day first, internally populating
# the holidays cache starting with memorial day 2018. We then verify
# that memorial day 2017 is properly classified as a holiday.
is_memorial_day_2018_holiday = bt_cal.isholiday(memorial_day_2018)
is_memorial_day_2017_holiday = bt_cal.isholiday(memorial_day_2017)
is_non_holiday_holiday = bt_cal.isholiday(non_holiday)
self.assertTrue(is_memorial_day_2017_holiday)
self.assertTrue(is_memorial_day_2018_holiday)
self.assertFalse(is_non_holiday_holiday)
def test_lots_of_holidays(self):
"""
Test for https://github.com/seatgeek/businesstime/issues/25
"""
bt_cal = BusinessTime(holidays=USFederalHolidays())
non_holiday = datetime(2018, 5, 31, 12, 0)
non_holiday2 = datetime(2018, 2, 3, 12, 0)
non_holiday3 = datetime(2018, 6, 4, 12, 0)
non_holiday4 = datetime(2018, 11, 21, 12, 0)
memorial_day = datetime(2018, 5, 28, 12, 0)
new_year_day = datetime(2018, 1, 1, 12, 0)
labor_day = datetime(2018, 9, 3, 12, 0)
christmas = datetime(2018, 12, 25, 12, 0)
self.assertFalse(bt_cal.isholiday(non_holiday))
self.assertTrue(bt_cal.isholiday(memorial_day))
self.assertTrue(bt_cal.isholiday(new_year_day))
self.assertFalse(bt_cal.isholiday(non_holiday2))
self.assertFalse(bt_cal.isholiday(non_holiday4))
self.assertTrue(bt_cal.isholiday(labor_day))
self.assertFalse(bt_cal.isholiday(non_holiday3))
self.assertTrue(bt_cal.isholiday(christmas))
|
seatgeek/businesstime
|
businesstime/test/__init__.py
|
Python
|
bsd-2-clause
| 11,646
|
###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2022, John McNamara, jmcnamara@cpan.org
#
import unittest
from io import StringIO
from ...vml import Vml
class TestWriteOshapelayout(unittest.TestCase):
"""
Test the Vml _write_shapelayout() method.
"""
def setUp(self):
self.fh = StringIO()
self.vml = Vml()
self.vml._set_filehandle(self.fh)
def test_write_shapelayout(self):
"""Test the _write_shapelayout() method"""
self.vml._write_shapelayout(1)
exp = """<o:shapelayout v:ext="edit"><o:idmap v:ext="edit" data="1"/></o:shapelayout>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
|
jmcnamara/XlsxWriter
|
xlsxwriter/test/vml/test_write_shapelayout.py
|
Python
|
bsd-2-clause
| 804
|
from django.db import models
from django.utils.safestring import mark_safe
class Code(models.Model):
"""
Registration codes
"""
code = models.CharField(max_length=255, unique=True)
is_active = models.BooleanField(default=True)
class Meta:
app_label = 'profiles'
ordering = ('code',)
def __unicode__(self):
return mark_safe(self.code)
|
incuna/django-extensible-profiles
|
profiles/modules/codes/models.py
|
Python
|
bsd-2-clause
| 389
|
""" The Threaded cache returns immediately on writes, then sends to the upstream
in a thread.
"""
from . import Cache, PassthroughCache
from . import copy_file_or_flo
from Queue import Queue, Empty
from threading import Thread
import signal
upload_queue = Queue(2000)
keep_alive = True # Don't need a mutex on a boolean.
orig_handler = signal.getsignal(signal.SIGINT)
def handler(signum, frame):
keep_alive = False
orig_handler(signum, frame)
signal.signal(signal.SIGINT, handler)
upload_thread = None
class UploaderThread(Thread):
"""A Thread job to write a long job"""
def run(self):
from ckcache import new_cache
global keep_alive
global upload_queue
while(keep_alive and not upload_queue.empty()):
try:
(rel_path, cache_string, buffer) = upload_queue.get(False)
print "Send ", rel_path, cache_string
cache = new_cache(cache_string)
with cache.put_stream(rel_path) as s:
copy_file_or_flo(buffer, s)
upload_queue.task_done()
except Empty:
break
def submit_task(rel_path, cache_string, buffer):
"""Put an upload job on the queue, and start the thread if required"""
global upload_queue
global upload_thread
upload_queue.put((rel_path, cache_string, buffer))
if upload_thread is None or not upload_thread.is_alive():
upload_thread = UploaderThread()
upload_thread.start()
class ThreadedWriteCache(Cache):
""" The Threaded cache returns immediately on writes, then sends to the upstream
in a thread.
"""
def __init__(self, upstream, **kwargs):
super(ThreadedWriteCache, self).__init__(upstream)
def clone(self):
return ThreadedWriteCache(upstream=self.upstream, **self.args)
@property
def priority(self):
return self.upstream.priority - 1 # give a slightly better priority
def set_priority(self, i):
self.upstream.set_priority(i)
##
# Put
##
def put(self, source, rel_path, metadata=None):
sink = self.put_stream(self._rename(rel_path), metadata=metadata)
sink.close()
return self.path(self._rename(rel_path))
def put_stream(self, rel_path, metadata=None, cb=None):
from io import IOBase
from cStringIO import StringIO
if not metadata:
metadata = {}
class flo(IOBase):
''' '''
def __init__(self, upstream, rel_path):
self._upstream = upstream
self._rel_path = rel_path
self._buffer = StringIO()
@property
def rel_path(self):
return self._rel_path
def write(self, str_):
self._buffer.write(str_)
def close(self):
submit_task(
rel_path, str(
self._upstream), StringIO(
self._buffer.getvalue()))
self._buffer.close()
def __enter__(self):
return self
def __exit__(self, type_, value, traceback):
if type_:
return False
self.close()
self.put_metadata(rel_path, metadata)
return flo(self.upstream, rel_path)
##
# Get
##
def get_stream(self, rel_path, cb=None):
return self.upstream.get_stream(rel_path, cb=cb)
def get(self, rel_path, cb=None):
return self.upstream.get(rel_path, cb=cb)
def find(self, query):
'''Passes the query to the upstream, if it exists'''
if not self.upstream:
raise Exception("DelayedWriteCache must have an upstream")
return self.upstream.find(query)
##
# Delete
##
def remove(self, rel_path, propagate=False):
'''Delete the file from the cache, and from the upstream'''
if not self.upstream:
raise Exception("Must have an upstream")
# Must always propagate, since this is really just a filter.
self.upstream.remove(self._rename(rel_path), propagate)
##
# Information
##
def path(self, rel_path, **kwargs):
kwargs['missing_ok'] = True
return self.upstream.path(self._rename(rel_path), **kwargs)
def md5(self, rel_path):
'''Return the MD5 for the file. Since the file is compressed,
this will rely on the metadata'''
md = self.metadata(rel_path)
return md['md5']
def list(self, path=None, with_metadata=False, include_partitions=False):
'''get a list of all of the files in the repository'''
return self.upstream.list(
path,
with_metadata=with_metadata,
include_partitions=include_partitions)
def store_list(self, cb=None):
"""List the cache and store it as metadata. This allows for getting the list from HTTP caches
and other types where it is not possible to traverse the tree"""
return self.upstream.store_list(cb=cb)
def has(self, rel_path, md5=None, propagate=True):
# This odd structure is because the MD5 check won't work if it is computed on a uncompressed
# file and checked on a compressed file. But it will work if the check is done on an s3
# file, which stores the md5 as metadada
r = self.upstream.has(
self._rename(rel_path),
md5=md5,
propagate=propagate)
if r:
return True
return self.upstream.has(
self._rename(rel_path), md5=None, propagate=propagate)
def metadata(self, rel_path):
return self.upstream.metadata(self._rename(rel_path))
@staticmethod
def _rename(rel_path):
return rel_path
@property
def repo_id(self):
return self.upstream.repo_id + '#async'
@property
def cache_dir(self):
return self.upstream.cache_dir
def __repr__(self):
us = str(self.upstream)
if '#' in us:
return us + ';async'
else:
return us + '#async'
class LogEntry(object):
"""A class for acessing LoggingCache Log entries and deleting them"""
def __init__(self, cache, rel_path):
import json
self.rel_path = rel_path
self.__dict__.update(json.load(cache.get_stream(rel_path)))
def remove(self):
from ckcache import new_cache
cache = new_cache(self.cache)
cache.remove(self.rel_path)
class LoggingCache(PassthroughCache):
"""The Logging cache writes a record of all of the files that bave been put in a '_log'
directory in the upstream """
def write_record(self, rel_path):
import os
import hashlib
import json
import time
t = time.time()
path = os.path.join(
"_log", str(int(t * 1000)) + '_' + hashlib.md5(rel_path).hexdigest())
with self.upstream.put_stream(path) as s:
s.write(
json.dumps(
dict(
path=rel_path,
cache=str(
self.upstream),
time=t)))
def put(self, source, rel_path, metadata=None):
self.write_record(rel_path)
return self.upstream.put(source, rel_path, metadata)
def put_stream(self, rel_path, metadata=None):
self.write_record(rel_path)
return self.upstream.put_stream(rel_path, metadata)
def list_log(self):
for e in sorted(self.upstream.list('_log').keys()):
yield LogEntry(self.upstream, e)
def __repr__(self):
return str(self.upstream) + \
(';' if '#' in str(self.upstream) else '#') + 'record'
|
CivicKnowledge/ckcache
|
ckcache/async.py
|
Python
|
bsd-2-clause
| 7,854
|
import cv2
import numpy as np
import CropImage
from random import randint
def imDiff(im1, im2):
# im1 = im1[0:im1.shape[0]-8, 0:im1.shape[1]]
# im2 = im2[0:im2.shape[0]-8, 0:im2.shape[1]]
imGray1 = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY)
imGray2 = cv2.cvtColor(im2, cv2.COLOR_BGR2GRAY)
print(im1.shape, im2.shape)
im = cv2.absdiff(imGray1, imGray2)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3,3))
thresh = cv2.erode(im, kernel, iterations=3)
thresh = cv2.erode(thresh, kernel, iterations=3)
ret, thresh = cv2.threshold(im, 127,255, cv2.THRESH_BINARY)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3,3))
# finalIm = thresh
thresh = cv2.dilate(thresh, kernel, iterations=2)
# thresh = cv2.cvtColor(thresh, cv2.COLOR_BGR2GRAY)
img2, contours,hierarchy = cv2.findContours(thresh.copy(),cv2.RETR_LIST,cv2.CHAIN_APPROX_TC89_KCOS )
# finalIm = cv2.erode(finalIm, kernel=np.ones(shape=(3, 3), dtype=np.uint8))
contours = [cc for cc in contours if 10 < cv2.contourArea(cc) < 15000]
'''for c in contours:
x,y,w,h = cv2.boundingRect(c)
M = cv2.moments(c)
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])'''
#cv2.circle(im, (cX, cY), 2, (255, 0, 2), 3)
#cv2.rectangle(im, (cX, cY), (cX+w, cY+h), (0, 255, 0), 2)
colors = []
for i in range(50):
colors.append((randint(0,255), randint(0,255), randint(0,255)))
# finalIm = cv2.cvtColor(im1, cv2.COLOR_GRAY2BGR)
# testEllipse = ((100, 50), (20, 100), 90)
# cv2.ellipse(im2, testEllipse, (0,0,255), 3)
# cv2.circle(im2, (50, 118), 2, (255,0,0))
# cv2.circle(im2, (100, 100), 2, (255,0,0))
for i, cnt in enumerate(contours):
try:
# ellipse = cv2.fitEllipse(cnt)
# if ellipse[0][0] < 0 or ellipse[0][0] > im.shape[1] or ellipse[0][1] < 0 or ellipse[0][1] > im.shape[0]:
# continue
# cv2.ellipse(im1, ellipse, colors[i], 1)
# cv2.ellipse(im2, ellipse, colors[i], 1)
cv2.drawContours(im1, [cnt], 0, colors[i], 1)
cv2.drawContours(im2, [cnt], 0, colors[i], 1)
except cv2.error:
continue
# cv2.drawContours(im1, [cnt], 0, colors[i], 2)
# cv2.drawContours(im2, [cnt], 0, colors[i], 2)
# cv2.drawContours(finalIm, contours, -1, (0,255,0), 2)
# cv2.imwrite('p441.png', im1)
# cv2.imwrite('p442.png', im2)
cv2.imshow('diff', thresh)
# cv2.waitKey()
# cv2.imwrite("difference.png", finalIm)
return im1, im2
if __name__ == '__main__':
Folder = 'C:\\Users\\soumy\\Downloads\\HocusFocusSeg\\HocusFocusSeg\\'
import os
DownImages = []
for f in os.listdir(Folder + 'down'):
DownImages.append(Folder + 'down\\' + str(f))
UpImages = []
for f in os.listdir(Folder + 'up'):
UpImages.append(Folder + 'up\\' + str(f))
for u, d in zip(UpImages, DownImages):
print(u, d)
im1 = cv2.imread(u, cv2.IMREAD_GRAYSCALE)
im2 = cv2.imread(d, cv2.IMREAD_GRAYSCALE)
# dilated1 = cv2.dilate(im1, kernel=np.zeros(shape=(3, 3)), iterations=1)
# dilated2 = cv2.dilate(im2, kernel=np.zeros(shape=(3, 3)), iterations=1)
# imDiff(dilated1, dilated2)
imOut = CropImage.cropImage(im1, im2)
imDiff(im1, imOut)
|
SoumyajitPal/YinYang
|
ImDiff.py
|
Python
|
bsd-2-clause
| 3,451
|
#!/usr/bin/env python
from __future__ import print_function
import numpy as np
import time
import re
import cv2
import os
import sys
from collections import defaultdict
from classifySoftmax import Net
from sklearn.metrics import pairwise_distances
def parse_args():
print( ' '.join(sys.argv))
import argparse
parser = argparse.ArgumentParser(epilog="Extract features with Openface network.")
parser.add_argument('-a', '--annotation',
required=True,
help='File with annotation. File name and person identifier on each line.')
parser.add_argument( '-f', '--feature-file',
help="Numpy matrix with features.", required=True)
parser.add_argument('-i', '--iterations',
default=100,
type=int,
help='Number of random evaluation iterations.')
args = parser.parse_args()
return args
def readAnnotation(fileName):
annotations = defaultdict(list)
with open(fileName, 'r') as f:
for lineID, line in enumerate(f):
fileName, personID = line.split()
annotations[personID].append({'line':lineID, 'file': fileName})
# for person in annotations.keys():
# if len(annotations[person]) == 1:
# del annotations[person]
# print('Removing "{}" from evaluation as it has only one example.'.format(person))
return annotations
def main():
args = parse_args()
features = np.load(args.feature_file)
annotation = readAnnotation(args.annotation)
print('Evaluating on {} identities.'.format(len(annotation)))
sumCount = 0
goodCount = 0
for iteration in range(args.iterations):
queryIDs = []
databaseIDs = []
for person in annotation:
query, database = np.random.choice( annotation[person], 2, replace=False)
queryIDs.append(query['line'])
databaseIDs.append(database['line'])
queryFeatures = features[ queryIDs, :]
databaseFeatures = features[databaseIDs, :]
distances = pairwise_distances(queryFeatures, databaseFeatures, metric='cosine')
good0 = np.sum(np.argmin( distances, axis=0) == np.arange(len(annotation)))
good1 = np.sum(np.argmin( distances, axis=1) == np.arange(len(annotation)))
sumCount += 2*len(annotation)
goodCount += good0 + good1
print(len(annotation), good0, good1, good0+good1/2.0/len(annotation), sumCount, goodCount/float(sumCount))
if __name__ == "__main__":
main()
|
DCGM/EmotionService
|
src/testFaceRecognition.py
|
Python
|
bsd-2-clause
| 2,571
|
from enum import IntEnum
import copy
class Type(IntEnum):
UNKNOWN = 0
FLOAT = 1
DOUBLE = 2
INT8_T = 3
UINT8_T = 4
INT16_T = 5
UINT16_T = 6
INT32_T = 7
UINT32_T = 8
INT64_T = 9
UINT64_T = 10
STRING = 11
VOID = 12
BOOL = 13
class Availability(IntEnum):
UNKNOWN = 0
PUBLIC = 1
LICENSED = 2
EXTENSION = 3
class SpecBase:
# Base class used in code generator classes
def __init__(self, name):
self.name = name
self.parent = None
self.generator = None
def __str__(self):
if self.parent:
return '{}.{}'.format(self.parent, self.name)
else:
return self.name
def construct_copy(self, other):
for k, v in other.__dict__.items():
setattr(self, k, v)
class Constant(SpecBase):
def __init__(self, name, value, type, doc=None):
super().__init__(name)
self.name = name
self.value = value
self.type = type
self.doc = doc
@property
def description(self):
return self.generator.get_description(self.doc)
def validate(self):
if self.value.startswith('0x') and not self.type == Type.STRING and not (
self.type == Type.UINT32_T or
self.type == Type.INT32_T or
self.type == Type.UINT64_T or
self.type == Type.INT64_T):
raise RuntimeError('Unsupported type for constant {} with hex value {}'.format(self, self.value))
if self.parent:
if not isinstance(self.type, Type) and not self.parent.is_null_handle:
raise RuntimeError('Unsupported type {} for constant {}'.format(self.type, self))
class Define(SpecBase):
def __init__(self, name, doc, is_null_handle=False, constants=None):
super().__init__(name)
self.name = name
self.doc = doc
self.is_null_handle = is_null_handle
self.constants = [] if constants is None else constants
@property
def description(self):
return self.generator.get_description(self.doc)
def validate(self):
if not self.doc:
raise RuntimeError('Undocumented define {}'.format(self))
for constant in self.constants:
constant.validate()
|
GeosoftInc/gxapi
|
spec/gxdefs.py
|
Python
|
bsd-2-clause
| 2,304
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
from gdcmdtools.base import BASE_INFO
from gdcmdtools.base import DEBUG_LEVEL
from gdcmdtools.auth import GDAuth
import argparse
from argparse import RawTextHelpFormatter
from gdcmdtools.auth import DICT_OF_REDIRECT_URI
import logging
logger = logging.getLogger()
__THIS_APP = 'gdauth'
__THIS_DESCRIPTION = 'Google Drive OAuth2 authentication tool'
__THIS_VERSION = BASE_INFO["version"]
def test():
assert True
if __name__ == '__main__':
arg_parser = argparse.ArgumentParser( \
description='%s v%s - %s - %s (%s)' %
(__THIS_APP, __THIS_VERSION, __THIS_DESCRIPTION, BASE_INFO["app"], BASE_INFO["description"]),
formatter_class=RawTextHelpFormatter)
default_secrets_file = os.path.expanduser('~/.%s.secrets' % BASE_INFO["app"])
arg_parser.add_argument('secret_file', default=default_secrets_file, help='the secret file in JSON format, %s will be overwritten' % default_secrets_file)
choices_redirect_uri = list(DICT_OF_REDIRECT_URI.keys())
list_help_redirect_uri = \
[ (k+": "+DICT_OF_REDIRECT_URI[k]) for k in DICT_OF_REDIRECT_URI]
help_redirect_uri = '\n'.join(list_help_redirect_uri)
arg_parser.add_argument('-i', '--client_id',
help=
'specify the client id')
arg_parser.add_argument('-s', '--client_secret',
help=
'specify the client secret')
arg_parser.add_argument('-r', '--redirect_uri', choices=choices_redirect_uri,
default="oob",
help=
'specify the redirect URI for the oauth2 flow, can be:\n%s' %
help_redirect_uri)
arg_parser.add_argument('--debug', choices=DEBUG_LEVEL, default=DEBUG_LEVEL[-1],
help='define the debug level')
args = arg_parser.parse_args()
logger.debug(args)
# post-processing of argument parsing
if (getattr(args, 'client_id', None) == None) != (getattr(args, 'client_secret', None) == None):
arg_parser.error("must supply --client_id with --client_secret")
# set debug devel
logger.setLevel(getattr(logging, args.debug.upper()))
if args.redirect_uri == 'oob':
if_oob = True
else:
if_oob = False
auth = GDAuth(args.secret_file, if_oob)
result = auth.run()
if result == None:
print("Failed to pass OAuth2 authentication")
sys.exit(1)
else:
print("The OAuth2 authentication has completed")
sys.exit(0)
|
commonssibi/gdcmdtools
|
gdauth.py
|
Python
|
bsd-2-clause
| 2,528
|