code
stringlengths
2
1.05M
repo_name
stringlengths
5
104
path
stringlengths
4
251
language
stringclasses
1 value
license
stringclasses
15 values
size
int32
2
1.05M
# Copyright (c) 2016 Baidu, Inc. All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from py_paddle import swig_paddle, DataProviderConverter from paddle.trainer.PyDataProvider2 import dense_vector from paddle.trainer.config_parser import parse_config TEST_DATA = [[[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.215686, 0.533333, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.67451, 0.992157, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.070588, 0.886275, 0.992157, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.192157, 0.070588, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.670588, 0.992157, 0.992157, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.117647, 0.933333, 0.858824, 0.313725, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.090196, 0.858824, 0.992157, 0.831373, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.141176, 0.992157, 0.992157, 0.611765, 0.054902, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.258824, 0.992157, 0.992157, 0.529412, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.368627, 0.992157, 0.992157, 0.419608, 0.003922, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.094118, 0.835294, 0.992157, 0.992157, 0.517647, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.603922, 0.992157, 0.992157, 0.992157, 0.603922, 0.545098, 0.043137, 0, 0, 0, 0, 0, 0, 0, 0.447059, 0.992157, 0.992157, 0.956863, 0.062745, 0, 0, 0, 0, 0, 0, 0, 0, 0.011765, 0.666667, 0.992157, 0.992157, 0.992157, 0.992157, 0.992157, 0.745098, 0.137255, 0, 0, 0, 0, 0, 0.152941, 0.866667, 0.992157, 0.992157, 0.521569, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.070588, 0.992157, 0.992157, 0.992157, 0.803922, 0.352941, 0.745098, 0.992157, 0.945098, 0.317647, 0, 0, 0, 0, 0.580392, 0.992157, 0.992157, 0.764706, 0.043137, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.070588, 0.992157, 0.992157, 0.776471, 0.043137, 0, 0.007843, 0.27451, 0.882353, 0.941176, 0.176471, 0, 0, 0.180392, 0.898039, 0.992157, 0.992157, 0.313725, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.070588, 0.992157, 0.992157, 0.713725, 0, 0, 0, 0, 0.627451, 0.992157, 0.729412, 0.062745, 0, 0.509804, 0.992157, 0.992157, 0.776471, 0.035294, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.494118, 0.992157, 0.992157, 0.968627, 0.168627, 0, 0, 0, 0.423529, 0.992157, 0.992157, 0.364706, 0, 0.717647, 0.992157, 0.992157, 0.317647, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.533333, 0.992157, 0.984314, 0.945098, 0.603922, 0, 0, 0, 0.003922, 0.466667, 0.992157, 0.988235, 0.976471, 0.992157, 0.992157, 0.788235, 0.007843, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.686275, 0.882353, 0.364706, 0, 0, 0, 0, 0, 0, 0.098039, 0.588235, 0.992157, 0.992157, 0.992157, 0.980392, 0.305882, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.101961, 0.67451, 0.321569, 0, 0, 0, 0, 0, 0, 0, 0.105882, 0.733333, 0.976471, 0.811765, 0.713725, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.65098, 0.992157, 0.321569, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.25098, 0.007843, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0.94902, 0.219608, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.968627, 0.764706, 0.152941, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.498039, 0.25098, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]], [[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.298039, 0.333333, 0.333333, 0.333333, 0.337255, 0.333333, 0.333333, 0.109804, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.027451, 0.223529, 0.776471, 0.964706, 0.988235, 0.988235, 0.988235, 0.992157, 0.988235, 0.988235, 0.780392, 0.098039, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.14902, 0.698039, 0.988235, 0.992157, 0.988235, 0.901961, 0.87451, 0.568627, 0.882353, 0.976471, 0.988235, 0.988235, 0.501961, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.188235, 0.647059, 0.988235, 0.988235, 0.745098, 0.439216, 0.098039, 0, 0, 0, 0.572549, 0.988235, 0.988235, 0.988235, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.2, 0.933333, 0.992157, 0.941176, 0.247059, 0, 0, 0, 0, 0, 0, 0.188235, 0.898039, 0.992157, 0.992157, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.039216, 0.639216, 0.933333, 0.988235, 0.913725, 0.278431, 0, 0, 0, 0, 0, 0, 0, 0.113725, 0.843137, 0.988235, 0.988235, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.235294, 0.988235, 0.992157, 0.988235, 0.815686, 0.07451, 0, 0, 0, 0, 0, 0, 0, 0.333333, 0.988235, 0.988235, 0.552941, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.211765, 0.878431, 0.988235, 0.992157, 0.701961, 0.329412, 0.109804, 0, 0, 0, 0, 0, 0, 0, 0.698039, 0.988235, 0.913725, 0.145098, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.188235, 0.890196, 0.988235, 0.988235, 0.745098, 0.047059, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.882353, 0.988235, 0.568627, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.2, 0.933333, 0.992157, 0.992157, 0.992157, 0.447059, 0.294118, 0, 0, 0, 0, 0, 0, 0, 0, 0.447059, 0.992157, 0.768627, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.623529, 0.988235, 0.988235, 0.988235, 0.988235, 0.992157, 0.47451, 0, 0, 0, 0, 0, 0, 0, 0.188235, 0.933333, 0.87451, 0.509804, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.992157, 0.988235, 0.937255, 0.792157, 0.988235, 0.894118, 0.082353, 0, 0, 0, 0, 0, 0, 0.027451, 0.647059, 0.992157, 0.654902, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.623529, 0.988235, 0.913725, 0.329412, 0.376471, 0.184314, 0, 0, 0, 0, 0, 0, 0.027451, 0.513725, 0.988235, 0.635294, 0.219608, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.196078, 0.929412, 0.988235, 0.988235, 0.741176, 0.309804, 0, 0, 0, 0, 0, 0, 0.529412, 0.988235, 0.678431, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.223529, 0.992157, 0.992157, 1, 0.992157, 0.992157, 0.992157, 0.992157, 1, 0.992157, 0.992157, 0.882353, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.023529, 0.478431, 0.654902, 0.658824, 0.952941, 0.988235, 0.988235, 0.988235, 0.992157, 0.988235, 0.729412, 0.278431, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.196078, 0.647059, 0.764706, 0.764706, 0.768627, 0.580392, 0.047059, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]]] def main(): conf = parse_config("./mnist_model/trainer_config.py", "") print conf.data_config.load_data_args network = swig_paddle.GradientMachine.createFromConfigProto( conf.model_config) assert isinstance(network, swig_paddle.GradientMachine) # For code hint. network.loadParameters("./mnist_model/") converter = DataProviderConverter([dense_vector(784)]) inArg = converter(TEST_DATA) print network.forwardTest(inArg) if __name__ == '__main__': swig_paddle.initPaddle("--use_gpu=0") main()
qingqing01/Paddle
doc/ui/predict/predict_sample.py
Python
apache-2.0
8,781
# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import unittest import time from TestUtils import TestUtilsMixin class NativeMapTest(TestUtilsMixin, unittest.TestCase): "Native Map Unit Test" order = 21 testClass="" def setUp(self): pass def runTest(self): handle = self.runClassOn('localhost', 'org.apache.accumulo.test.functional.NativeMapTest', []) self.waitForStop(handle, 20) def tearDown(self): pass def suite(): result = unittest.TestSuite() result.addTest(NativeMapTest()) return result
phrocker/accumulo
test/system/auto/simple/nativeMap.py
Python
apache-2.0
1,323
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). import logging import os import shutil from pants.backend.jvm.subsystems.scoverage_platform import ScoveragePlatform from pants.backend.jvm.tasks.coverage.cobertura import Cobertura from pants.backend.jvm.tasks.coverage.engine import NoCoverage from pants.backend.jvm.tasks.coverage.jacoco import Jacoco from pants.backend.jvm.tasks.coverage.scoverage import Scoverage from pants.subsystem.subsystem import Subsystem from pants.util.dirutil import safe_mkdir from pants.util.strutil import safe_shlex_split logger = logging.getLogger(__name__) class CodeCoverageSettings: """A class containing settings for code coverage tasks.""" def __init__( self, options, context, workdir, tool_classpath, confs, log, copy2=shutil.copy2, copytree=shutil.copytree, is_file=os.path.isfile, safe_md=safe_mkdir, ): self.options = options self.context = context self.workdir = workdir self.tool_classpath = tool_classpath self.confs = confs self.log = log self.coverage_dir = os.path.join(self.workdir, "coverage") self.coverage_jvm_options = [] for jvm_option in options.coverage_jvm_options: self.coverage_jvm_options.extend(safe_shlex_split(jvm_option)) self.coverage_open = options.coverage_open self.coverage_force = options.coverage_force # Injecting these methods to make unit testing cleaner. self.copy2 = copy2 self.copytree = copytree self.is_file = is_file self.safe_makedir = safe_md @classmethod def from_task(cls, task, workdir=None): return cls( options=task.get_options(), context=task.context, workdir=workdir or task.workdir, tool_classpath=task.tool_classpath, confs=task.confs, log=task.context.log, ) class CodeCoverage(Subsystem): """Manages setup and construction of JVM code coverage engines.""" options_scope = "coverage" @classmethod def subsystem_dependencies(cls): return super().subsystem_dependencies() + ( Cobertura.Factory, Jacoco.Factory, Scoverage.Factory, ) # TODO(jtrobec): move these to subsystem scope after deprecating @staticmethod def register_junit_options(register, register_jvm_tool): register("--coverage", type=bool, fingerprint=True, help="Collect code coverage data.") register( "--coverage-processor", advanced=True, fingerprint=True, choices=["cobertura", "jacoco", "scoverage"], default=None, help="Which coverage processor to use if --coverage is enabled. If this option is " "unset but coverage is enabled implicitly or explicitly, defaults to 'cobertura'. " "If this option is explicitly set, implies --coverage. If this option is set to " "scoverage, then first scoverage MUST be enabled by passing option " "--scoverage-enable-scoverage.", ) # We need to fingerprint this even though it nominally UI-only affecting option since the # presence of this option alone can implicitly flag on `--coverage`. register( "--coverage-open", type=bool, fingerprint=True, help="Open the generated HTML coverage report in a browser. Implies --coverage ", ) register( "--coverage-jvm-options", advanced=True, type=list, fingerprint=True, help="JVM flags to be added when running the coverage processor. For example: " "{flag}=-Xmx4g {flag}=-Xms2g".format(flag="--coverage-jvm-options"), ) register( "--coverage-force", advanced=True, type=bool, help="Attempt to run the reporting phase of coverage even if tests failed " "(defaults to False, as otherwise the coverage results would be unreliable).", ) # register options for coverage engines # TODO(jtrobec): get rid of these calls when engines are dependent subsystems Cobertura.register_junit_options(register, register_jvm_tool) class InvalidCoverageEngine(Exception): """Indicates an invalid coverage engine type was selected.""" def get_coverage_engine(self, task, output_dir, all_targets, execute_java): options = task.get_options() enable_scoverage = ScoveragePlatform.global_instance().get_options().enable_scoverage processor = options.coverage_processor if processor == "scoverage" and not enable_scoverage: raise self.InvalidCoverageEngine( "Cannot set processor to scoverage without first enabling " "scoverage (by passing --scoverage-enable-scoverage option)" ) if enable_scoverage: if processor not in (None, "scoverage"): raise self.InvalidCoverageEngine( f"Scoverage is enabled. " f"Cannot use {processor} as the engine. Set engine to scoverage " f"(--test-junit-coverage-processor=scoverage)" ) processor = "scoverage" if options.coverage or processor or options.is_flagged("coverage_open"): settings = CodeCoverageSettings.from_task(task, workdir=output_dir) if processor in ("cobertura", None): return Cobertura.Factory.global_instance().create( settings, all_targets, execute_java ) elif processor == "jacoco": return Jacoco.Factory.global_instance().create(settings, all_targets, execute_java) elif processor == "scoverage": return Scoverage.Factory.global_instance().create( settings, all_targets, execute_java ) else: # NB: We should never get here since the `--coverage-processor` is restricted by `choices`, # but for clarity. raise self.InvalidCoverageEngine( "Unknown and unexpected coverage processor {!r}!".format( options.coverage_processor ) ) else: return NoCoverage()
tdyas/pants
src/python/pants/backend/jvm/tasks/coverage/manager.py
Python
apache-2.0
6,621
# Copyright 2012-2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Identity v3 User action implementations""" import copy import six import sys from keystoneauth1 import exceptions as ks_exc from eclcli.common import command from eclcli.common import utils from eclcli.i18n import _ # noqa from eclcli.identity import common class CreateUser(command.ShowOne): """Create new user""" def get_parser(self, prog_name): parser = super(CreateUser, self).get_parser(prog_name) parser.add_argument( 'name', metavar='<name>', help='New user name', ) parser.add_argument( '--domain', metavar='<domain>', help='Default domain (name or ID)', ) parser.add_argument( '--project', metavar='<project>', help='Default project (name or ID)', ) common.add_project_domain_option_to_parser(parser) parser.add_argument( '--password', metavar='<password>', help='Set user password', ) parser.add_argument( '--password-prompt', dest="password_prompt", action="store_true", help='Prompt interactively for password', ) parser.add_argument( '--email', metavar='<email-address>', help='Set user email address', ) parser.add_argument( '--description', metavar='<description>', help='User description', ) enable_group = parser.add_mutually_exclusive_group() enable_group.add_argument( '--enable', action='store_true', help='Enable user (default)', ) enable_group.add_argument( '--disable', action='store_true', help='Disable user', ) parser.add_argument( '--or-show', action='store_true', help=_('Return existing user'), ) return parser def take_action(self, parsed_args): identity_client = self.app.client_manager.identity project_id = None if parsed_args.project: project_id = common.find_project(identity_client, parsed_args.project, parsed_args.project_domain).id domain_id = None if parsed_args.domain: domain_id = common.find_domain(identity_client, parsed_args.domain).id enabled = True if parsed_args.disable: enabled = False if parsed_args.password_prompt: parsed_args.password = utils.get_password(self.app.stdin) try: user = identity_client.users.create( name=parsed_args.name, domain=domain_id, default_project=project_id, password=parsed_args.password, email=parsed_args.email, description=parsed_args.description, enabled=enabled ) except ks_exc.Conflict as e: if parsed_args.or_show: user = utils.find_resource(identity_client.users, parsed_args.name, domain_id=domain_id) self.log.info('Returning existing user %s', user.name) else: raise e user._info.pop('links') return zip(*sorted(six.iteritems(user._info))) class DeleteUser(command.Command): """Delete user(s)""" def get_parser(self, prog_name): parser = super(DeleteUser, self).get_parser(prog_name) parser.add_argument( 'users', metavar='<user>', nargs="+", help='User(s) to delete (name or ID)', ) parser.add_argument( '--domain', metavar='<domain>', help='Domain owning <user> (name or ID)', ) return parser def take_action(self, parsed_args): identity_client = self.app.client_manager.identity domain = None if parsed_args.domain: domain = common.find_domain(identity_client, parsed_args.domain) for user in parsed_args.users: if domain is not None: user_obj = utils.find_resource(identity_client.users, user, domain_id=domain.id) else: user_obj = utils.find_resource(identity_client.users, user) identity_client.users.delete(user_obj.id) class ListUser(command.Lister): """List users""" def get_parser(self, prog_name): parser = super(ListUser, self).get_parser(prog_name) parser.add_argument( '--domain', metavar='<domain>', help='Filter users by <domain> (name or ID)', ) project_or_group = parser.add_mutually_exclusive_group() project_or_group.add_argument( '--group', metavar='<group>', help='Filter users by <group> membership (name or ID)', ) project_or_group.add_argument( '--project', metavar='<project>', help='Filter users by <project> (name or ID)', ) parser.add_argument( '--long', action='store_true', default=False, help='List additional fields in output', ) return parser def take_action(self, parsed_args): identity_client = self.app.client_manager.identity domain = None if parsed_args.domain: domain = common.find_domain(identity_client, parsed_args.domain).id group = None if parsed_args.group: group = common.find_group(identity_client, parsed_args.group, parsed_args.domain).id if parsed_args.project: if domain is not None: project = utils.find_resource( identity_client.projects, parsed_args.project, domain_id=domain ).id else: project = utils.find_resource( identity_client.projects, parsed_args.project, ).id assignments = identity_client.role_assignments.list( project=project) # NOTE(stevemar): If a user has more than one role on a project # then they will have two entries in the returned data. Since we # are looking for any role, let's just track unique user IDs. user_ids = set() for assignment in assignments: if hasattr(assignment, 'user'): user_ids.add(assignment.user['id']) # NOTE(stevemar): Call find_resource once we have unique IDs, so # it's fewer trips to the Identity API, then collect the data. data = [] for user_id in user_ids: user = utils.find_resource(identity_client.users, user_id) data.append(user) else: data = identity_client.users.list( domain=domain, group=group, ) # Column handling if parsed_args.long: columns = ['ID', 'Name', 'Default Project Id', 'Domain Id', 'Description', 'Email', 'Enabled'] column_headers = copy.deepcopy(columns) column_headers[2] = 'Project' column_headers[3] = 'Domain' else: columns = ['ID', 'Name'] column_headers = columns return ( column_headers, (utils.get_item_properties( s, columns, formatters={}, ) for s in data) ) class SetUser(command.Command): """Set user properties""" def get_parser(self, prog_name): parser = super(SetUser, self).get_parser(prog_name) parser.add_argument( 'user', metavar='<user>', help='User to change (name or ID)', ) parser.add_argument( '--name', metavar='<name>', help='Set user name', ) parser.add_argument( '--project', metavar='<project>', help='Set default project (name or ID)', ) common.add_project_domain_option_to_parser(parser) parser.add_argument( '--password', metavar='<password>', help='Set user password', ) parser.add_argument( '--password-prompt', dest="password_prompt", action="store_true", help='Prompt interactively for password', ) parser.add_argument( '--email', metavar='<email-address>', help='Set user email address', ) parser.add_argument( '--description', metavar='<description>', help='Set user description', ) enable_group = parser.add_mutually_exclusive_group() enable_group.add_argument( '--enable', action='store_true', help='Enable user (default)', ) enable_group.add_argument( '--disable', action='store_true', help='Disable user', ) return parser def take_action(self, parsed_args): identity_client = self.app.client_manager.identity if parsed_args.password_prompt: parsed_args.password = utils.get_password(self.app.stdin) if (not parsed_args.name and not parsed_args.name and not parsed_args.password and not parsed_args.email and not parsed_args.project and not parsed_args.description and not parsed_args.enable and not parsed_args.disable): sys.stderr.write("Incorrect set of arguments " "provided. See ecl --help for more " "details\n") return user = utils.find_resource( identity_client.users, parsed_args.user, ) kwargs = {} if parsed_args.name: kwargs['name'] = parsed_args.name if parsed_args.email: kwargs['email'] = parsed_args.email if parsed_args.password: kwargs['password'] = parsed_args.password if parsed_args.description: kwargs['description'] = parsed_args.description if parsed_args.project: project_id = common.find_project(identity_client, parsed_args.project, parsed_args.project_domain).id kwargs['default_project'] = project_id kwargs['enabled'] = user.enabled if parsed_args.enable: kwargs['enabled'] = True if parsed_args.disable: kwargs['enabled'] = False identity_client.users.update(user.id, **kwargs) class SetPasswordUser(command.Command): """Change current user password""" def get_parser(self, prog_name): parser = super(SetPasswordUser, self).get_parser(prog_name) parser.add_argument( '--password', metavar='<new-password>', help='New user password' ) parser.add_argument( '--original-password', metavar='<original-password>', help='Original user password' ) return parser def take_action(self, parsed_args): identity_client = self.app.client_manager.identity # FIXME(gyee): there are two scenarios: # # 1. user update password for himself # 2. admin update password on behalf of the user. This is an unlikely # scenario because that will require admin knowing the user's # original password which is forbidden under most security # policies. # # Of the two scenarios above, user either authenticate using its # original password or an authentication token. For scenario #1, # if user is authenticating with its original password (i.e. passing # --os-password argument), we can just make use of it instead of using # --original-password or prompting. For scenario #2, admin will need # to specify --original-password option or this won't work because # --os-password is the admin's own password. In the future if we stop # supporting scenario #2 then we can just do this. # # current_password = (parsed_args.original_password or # self.app.cloud.password) # current_password = parsed_args.original_password if current_password is None: current_password = utils.get_password( self.app.stdin, prompt="Current Password:", confirm=False) password = parsed_args.password if password is None: password = utils.get_password( self.app.stdin, prompt="New Password:") identity_client.users.update_password(current_password, password) class ShowUser(command.ShowOne): """Display user details""" def get_parser(self, prog_name): parser = super(ShowUser, self).get_parser(prog_name) parser.add_argument( 'user', metavar='<user>', help='User to display (name or ID)', ) parser.add_argument( '--domain', metavar='<domain>', help='Domain owning <user> (name or ID)', ) return parser def take_action(self, parsed_args): identity_client = self.app.client_manager.identity if parsed_args.domain: domain = common.find_domain(identity_client, parsed_args.domain) user = utils.find_resource(identity_client.users, parsed_args.user, domain_id=domain.id) else: user = utils.find_resource(identity_client.users, parsed_args.user) user._info.pop('links') return zip(*sorted(six.iteritems(user._info)))
nttcom/eclcli
eclcli/identity/v3/user.py
Python
apache-2.0
15,378
#!/usr/bin/env python3 # Copyright (c) 2014 Pawel Rozlach, Brainly.com sp. z o.o. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy of # the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. import logging from inventory_tool.exception import MalformedInputException, GenericException # For Python3 < 3.3, ipaddress module is available as an extra module, # under a different name: try: from ipaddress import ip_address from ipaddress import ip_network from ipaddress import IPv4Address from ipaddress import IPv6Address ipaddress_name_network = "network_address" ipaddress_name_broadcast = "broadcast_address" except ImportError: from ipaddr import IPAddress as ip_address from ipaddr import IPNetwork as ip_network from ipaddr import IPv4Address from ipaddr import IPv6Address ipaddress_name_network = "network" ipaddress_name_broadcast = "broadcast" class IPPool: """IP pool representation and manipulation This class takes care of managing ip pools available, and this includes: - assigning and releasing IPs, both manually and automatically - booking and canceling IPs for special use - serialization of IP pools for storage in YAML documents - human readable representation of ip pools """ __slots__ = ['_network', '_allocated', '_reserved'] def __init__(self, network, allocated=[], reserved=[]): """Init IPPool Args: network: network from which ip addresses should be allocated allocated: list of ip addres strings that are already allocated reserved: list of ip address strings that should not be available for allocation. Raises: ValueError: ip address or network is invalid or malformed. """ self._network = ip_network(network) self._allocated = [ip_address(x) for x in allocated] self._reserved = [ip_address(x) for x in reserved] def get_hash(self): """Extract data from object in a way suitable for serializing Returns: Method returns data necessary for re-initializing the same object in a form suitable for serialization using YAML/JSON. Normally, this object contains other objects which can not be easily serialized or are not very readable after serializing. """ tmp = {"network": str(self._network), "allocated": sorted([str(x) for x in self._allocated]), "reserved": sorted([str(x) for x in self._reserved]), } return tmp def allocate(self, ip=None): """Allocate an IP from the pool. Method allocates next free adress from the pool if ip is None, or marks given ip as already allocated Args: ip: either None or ipaddress.ip_address object Returns: An ip that has been allocated. In case when "ip" argument is not none, then the object pointed by it is returned. Raises: MalformedInputException - user provided data is invalid GenericException - pool has run out of free ip adresses """ if ip is not None: if ip not in self._network: msg = "Attempt to allocate IP from outside of the pool: " msg += "{0} is not in {1}.".format(ip, self._network) raise MalformedInputException(msg) if ip in self._allocated: msg = "Attempt to allocate already allocated IP: " + str(ip) raise MalformedInputException(msg) elif ip in self._reserved: msg = "Attempt to allocate from reserved pool: " + str(ip) raise MalformedInputException(msg) else: self._allocated.append(ip) return ip else: for candidate in [x for x in self._network if x != self._network.__getattribute__(ipaddress_name_broadcast) and x != self._network.__getattribute__(ipaddress_name_network)]: if candidate not in self._allocated and \ candidate not in self._reserved: logging.info( "IP {0} has been auto-assigned.".format(candidate)) self._allocated.append(candidate) return candidate msg = "The pool has run out of free ip addresses." raise GenericException(msg) def release(self, ip): """Mark given IP as free, available for allocation. Args: ip: ip to deallocate Raises: MalformedInputException: provided ip has not been alocated yet. """ if ip in self._allocated: self._allocated.remove(ip_address(ip)) else: msg = "An attempt to release an ip {0} ".format(ip) msg += "which has not been allocated yet." raise MalformedInputException(msg) def release_all(self): """Mark all ip addresses in the pool as available""" self._allocated = [] def overlaps(self, other): """Check if IP pools overlap Args: other: ip pool to check for overlap with this pool """ return self._network.overlaps(other._network) def book(self, ip): """Prevent IP from being allocated. Marks given IP as reserved/unavailable for allocation. Args: ip: ip to book. Raises: MalformedInputException: ip does not belong to this pool """ if ip not in self._network: msg = "IP {0} does not belong to network {1}".format(ip, self._network) raise MalformedInputException(msg) elif ip in self._reserved: msg = "IP {0} has already been booked".format(ip) raise MalformedInputException(msg) else: self._reserved.append(ip) def cancel(self, ip): """Remove reservation of an IP address Marks given IP as available for allocation. Args: ip: ip to release Raises: MalformedInputException: ip has not been reserved yet. """ if ip in self._reserved: self._reserved.remove(ip) else: msg = "IP {0} has not been reserved yet".format(ip) raise MalformedInputException(msg) def __contains__(self, other): """Check if ip belongs to the pool. Args: other: ip, either as a string or an ipaddress.ip_address object to check the membership for. """ if isinstance(other, str): tmp = ip_address(other) return tmp in self._network elif isinstance(other, IPv4Address) or \ isinstance(other, IPv6Address): return other in self._network else: msg = "Could not determine membership of the object {0}".format(other) raise MalformedInputException(msg) def __str__(self): """Present object in human-readable form""" msg = "Network: {0}\n".format(self._network) msg += "Allocated:\n" if self._allocated: for tmp in self._allocated: msg += "\t- {0}\n".format(tmp) else: msg += "\t<None>\n" msg += "Reserved:\n" if self._reserved: for tmp in self._reserved: msg += "\t- {0}\n".format(tmp) else: msg += "\t<None>\n" return msg
vespian/inventory_tool
inventory_tool/object/ippool.py
Python
apache-2.0
8,065
# Copyright (c) 2012 - 2014 EMC Corporation, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import re import mock from oslo_concurrency import processutils from cinder import exception from cinder import test from cinder.volume import configuration as conf from cinder.volume.drivers.emc.emc_cli_fc import EMCCLIFCDriver from cinder.volume.drivers.emc.emc_cli_iscsi import EMCCLIISCSIDriver import cinder.volume.drivers.emc.emc_vnx_cli as emc_vnx_cli from cinder.volume.drivers.emc.emc_vnx_cli import CommandLineHelper from cinder.volume.drivers.emc.emc_vnx_cli import EMCVnxCLICmdError from cinder.volume import volume_types from cinder.zonemanager.fc_san_lookup_service import FCSanLookupService SUCCEED = ("", 0) FAKE_ERROR_RETURN = ("FAKE ERROR", 255) class EMCVNXCLIDriverTestData(): test_volume = { 'name': 'vol1', 'size': 1, 'volume_name': 'vol1', 'id': '1', 'provider_auth': None, 'project_id': 'project', 'display_name': 'vol1', 'display_description': 'test volume', 'volume_type_id': None, 'consistencygroup_id': None, 'volume_admin_metadata': [{'key': 'readonly', 'value': 'True'}] } test_volume_clone_cg = { 'name': 'vol1', 'size': 1, 'volume_name': 'vol1', 'id': '1', 'provider_auth': None, 'project_id': 'project', 'display_name': 'vol1', 'display_description': 'test volume', 'volume_type_id': None, 'consistencygroup_id': None, 'volume_admin_metadata': [{'key': 'readonly', 'value': 'True'}] } test_volume_cg = { 'name': 'vol1', 'size': 1, 'volume_name': 'vol1', 'id': '1', 'provider_auth': None, 'project_id': 'project', 'display_name': 'vol1', 'display_description': 'test volume', 'volume_type_id': None, 'consistencygroup_id': 'cg_id', 'volume_admin_metadata': [{'key': 'readonly', 'value': 'True'}] } test_volume_rw = { 'name': 'vol1', 'size': 1, 'volume_name': 'vol1', 'id': '1', 'provider_auth': None, 'project_id': 'project', 'display_name': 'vol1', 'display_description': 'test volume', 'volume_type_id': None, 'consistencygroup_id': None, 'volume_admin_metadata': [{'key': 'attached_mode', 'value': 'rw'}, {'key': 'readonly', 'value': 'False'}] } test_volume2 = { 'name': 'vol2', 'size': 1, 'volume_name': 'vol2', 'id': '1', 'provider_auth': None, 'project_id': 'project', 'display_name': 'vol2', 'consistencygroup_id': None, 'display_description': 'test volume', 'volume_type_id': None} volume_in_cg = { 'name': 'vol2', 'size': 1, 'volume_name': 'vol2', 'id': '1', 'provider_auth': None, 'project_id': 'project', 'display_name': 'vol2', 'consistencygroup_id': None, 'display_description': 'test volume', 'volume_type_id': None} test_volume_with_type = { 'name': 'vol_with_type', 'size': 1, 'volume_name': 'vol_with_type', 'id': '1', 'provider_auth': None, 'project_id': 'project', 'display_name': 'thin_vol', 'consistencygroup_id': None, 'display_description': 'vol with type', 'volume_type_id': 'abc1-2320-9013-8813-8941-1374-8112-1231'} test_failed_volume = { 'name': 'failed_vol1', 'size': 1, 'volume_name': 'failed_vol1', 'id': '4', 'provider_auth': None, 'project_id': 'project', 'display_name': 'failed_vol', 'consistencygroup_id': None, 'display_description': 'test failed volume', 'volume_type_id': None} test_snapshot = { 'name': 'snapshot1', 'size': 1, 'id': '4444', 'volume_name': 'vol1', 'volume_size': 1, 'consistencygroup_id': None, 'cgsnapshot_id': None, 'project_id': 'project'} test_failed_snapshot = { 'name': 'failed_snapshot', 'size': 1, 'id': '5555', 'volume_name': 'vol-vol1', 'volume_size': 1, 'project_id': 'project'} test_clone = { 'name': 'clone1', 'size': 1, 'id': '2', 'volume_name': 'vol1', 'provider_auth': None, 'project_id': 'project', 'display_name': 'clone1', 'consistencygroup_id': None, 'display_description': 'volume created from snapshot', 'volume_type_id': None} test_clone_cg = { 'name': 'clone1', 'size': 1, 'id': '2', 'volume_name': 'vol1', 'provider_auth': None, 'project_id': 'project', 'display_name': 'clone1', 'consistencygroup_id': 'consistencygroup_id', 'display_description': 'volume created from snapshot', 'volume_type_id': None} connector = { 'ip': '10.0.0.2', 'initiator': 'iqn.1993-08.org.debian:01:222', 'wwpns': ["1234567890123456", "1234567890543216"], 'wwnns': ["2234567890123456", "2234567890543216"], 'host': 'fakehost'} test_volume3 = {'migration_status': None, 'availability_zone': 'nova', 'id': '1181d1b2-cea3-4f55-8fa8-3360d026ce24', 'name': 'vol3', 'size': 2, 'volume_admin_metadata': [], 'status': 'available', 'volume_type_id': '19fdd0dd-03b3-4d7c-b541-f4df46f308c8', 'deleted': False, 'provider_location': None, 'host': 'ubuntu-server12@pool_backend_1', 'source_volid': None, 'provider_auth': None, 'display_name': 'vol-test02', 'instance_uuid': None, 'attach_status': 'detached', 'volume_type': [], 'attached_host': None, '_name_id': None, 'volume_metadata': []} test_new_type = {'name': 'voltype0', 'qos_specs_id': None, 'deleted': False, 'extra_specs': {'storagetype:provisioning': 'thin'}, 'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'} test_diff = {'encryption': {}, 'qos_specs': {}, 'extra_specs': {'storagetype:provisioning': ('thick', 'thin')}} test_host = {'host': 'ubuntu-server12@pool_backend_1', 'capabilities': {'location_info': 'POOL_SAS1|FNM00124500890', 'volume_backend_name': 'pool_backend_1', 'storage_protocol': 'iSCSI'}} test_volume4 = {'migration_status': None, 'availability_zone': 'nova', 'id': '1181d1b2-cea3-4f55-8fa8-3360d026ce24', 'name': 'vol4', 'size': 2L, 'volume_admin_metadata': [], 'status': 'available', 'volume_type_id': '19fdd0dd-03b3-4d7c-b541-f4df46f308c8', 'deleted': False, 'provider_location': None, 'host': 'ubuntu-server12@array_backend_1', 'source_volid': None, 'provider_auth': None, 'display_name': 'vol-test02', 'instance_uuid': None, 'attach_status': 'detached', 'volume_type': [], '_name_id': None, 'volume_metadata': []} test_volume5 = {'migration_status': None, 'availability_zone': 'nova', 'id': '1181d1b2-cea3-4f55-8fa8-3360d026ce25', 'name_id': '1181d1b2-cea3-4f55-8fa8-3360d026ce25', 'name': 'vol5', 'size': 1, 'volume_admin_metadata': [], 'status': 'available', 'volume_type_id': '19fdd0dd-03b3-4d7c-b541-f4df46f308c8', 'deleted': False, 'provider_location': 'system^FNM11111|type^lun|lun_id^5', 'host': 'ubuntu-server12@array_backend_1', 'source_volid': None, 'provider_auth': None, 'display_name': 'vol-test05', 'instance_uuid': None, 'attach_status': 'detached', 'volume_type': [], '_name_id': None, 'volume_metadata': []} test_new_type2 = {'name': 'voltype0', 'qos_specs_id': None, 'deleted': False, 'extra_specs': {'storagetype:pool': 'POOL_SAS2'}, 'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'} test_diff2 = {'encryption': {}, 'qos_specs': {}, 'extra_specs': {'storagetype:pool': ('POOL_SAS1', 'POOL_SAS2')}} test_host2 = {'host': 'ubuntu-server12@array_backend_1', 'capabilities': {'location_info': '|FNM00124500890', 'volume_backend_name': 'array_backend_1', 'storage_protocol': 'iSCSI'}} test_cg = {'id': 'consistencygroup_id', 'name': 'group_name', 'status': 'deleting'} test_cgsnapshot = { 'consistencygroup_id': 'consistencygroup_id', 'id': 'cgsnapshot_id', 'status': 'available'} test_member_cgsnapshot = { 'name': 'snapshot1', 'size': 1, 'id': 'cgsnapshot_id', 'volume_name': 'vol1', 'volume_size': 1, 'consistencygroup_id': 'consistencygroup_id', 'cgsnapshot_id': 'cgsnapshot_id', 'project_id': 'project' } test_lun_id = 1 test_existing_ref = {'id': test_lun_id} test_pool_name = 'Pool_02_SASFLASH' device_map = { '1122334455667788': { 'initiator_port_wwn_list': ['123456789012345', '123456789054321'], 'target_port_wwn_list': ['1122334455667777']}} i_t_map = {'123456789012345': ['1122334455667777'], '123456789054321': ['1122334455667777']} POOL_PROPERTY_CMD = ('storagepool', '-list', '-name', 'unit_test_pool', '-userCap', '-availableCap') NDU_LIST_CMD = ('ndu', '-list') NDU_LIST_RESULT = ("Name of the software package: -Compression " + "Name of the software package: -Deduplication " + "Name of the software package: -FAST " + "Name of the software package: -FASTCache " + "Name of the software package: -ThinProvisioning ", 0) def SNAP_MP_CREATE_CMD(self, name='vol1', source='vol1'): return ('lun', '-create', '-type', 'snap', '-primaryLunName', source, '-name', name) def SNAP_ATTACH_CMD(self, name='vol1', snapName='snapshot1'): return ('lun', '-attach', '-name', name, '-snapName', snapName) def SNAP_DELETE_CMD(self, name): return ('snap', '-destroy', '-id', name, '-o') def SNAP_CREATE_CMD(self, name): return ('snap', '-create', '-res', 1, '-name', name, '-allowReadWrite', 'yes', '-allowAutoDelete', 'no') def LUN_DELETE_CMD(self, name): return ('lun', '-destroy', '-name', name, '-forceDetach', '-o') def LUN_CREATE_CMD(self, name, isthin=False): return ('lun', '-create', '-type', 'Thin' if isthin else 'NonThin', '-capacity', 1, '-sq', 'gb', '-poolName', 'unit_test_pool', '-name', name) def LUN_EXTEND_CMD(self, name, newsize): return ('lun', '-expand', '-name', name, '-capacity', newsize, '-sq', 'gb', '-o', '-ignoreThresholds') def LUN_PROPERTY_ALL_CMD(self, lunname): return ('lun', '-list', '-name', lunname, '-state', '-status', '-opDetails', '-userCap', '-owner', '-attachedSnapshot') def MIGRATION_CMD(self, src_id=1, dest_id=1): return ("migrate", "-start", "-source", src_id, "-dest", dest_id, "-rate", "high", "-o") def MIGRATION_VERIFY_CMD(self, src_id): return ("migrate", "-list", "-source", src_id) def GETPORT_CMD(self): return ("connection", "-getport", "-address", "-vlanid") def PINGNODE_CMD(self, sp, portid, vportid, ip): return ("connection", "-pingnode", "-sp", sp, '-portid', portid, "-vportid", vportid, "-address", ip) def GETFCPORT_CMD(self): return ('port', '-list', '-sp') def CONNECTHOST_CMD(self, hostname, gname): return ('storagegroup', '-connecthost', '-host', hostname, '-gname', gname, '-o') def ENABLE_COMPRESSION_CMD(self, lun_id): return ('compression', '-on', '-l', lun_id, '-ignoreThresholds', '-o') provisioning_values = { 'thin': ['-type', 'Thin'], 'thick': ['-type', 'NonThin'], 'compressed': ['-type', 'Thin'], 'deduplicated': ['-type', 'Thin', '-deduplication', 'on']} tiering_values = { 'starthighthenauto': [ '-initialTier', 'highestAvailable', '-tieringPolicy', 'autoTier'], 'auto': [ '-initialTier', 'optimizePool', '-tieringPolicy', 'autoTier'], 'highestavailable': [ '-initialTier', 'highestAvailable', '-tieringPolicy', 'highestAvailable'], 'lowestavailable': [ '-initialTier', 'lowestAvailable', '-tieringPolicy', 'lowestAvailable'], 'nomovement': [ '-initialTier', 'optimizePool', '-tieringPolicy', 'noMovement']} def LUN_CREATION_CMD(self, name, size, pool, provisioning, tiering): initial = ['lun', '-create', '-capacity', size, '-sq', 'gb', '-poolName', pool, '-name', name] if provisioning: initial.extend(self.provisioning_values[provisioning]) else: initial.extend(self.provisioning_values['thick']) if tiering: initial.extend(self.tiering_values[tiering]) return tuple(initial) def CHECK_FASTCACHE_CMD(self, storage_pool): return ('-np', 'storagepool', '-list', '-name', storage_pool, '-fastcache') def CREATE_CONSISTENCYGROUP_CMD(self, cg_name): return ('-np', 'snap', '-group', '-create', '-name', cg_name, '-allowSnapAutoDelete', 'no') def DELETE_CONSISTENCYGROUP_CMD(self, cg_name): return ('-np', 'snap', '-group', '-destroy', '-id', cg_name) def GET_CONSISTENCYGROUP_BY_NAME(self, cg_name): return ('snap', '-group', '-list', '-id', cg_name) def ADD_LUN_TO_CG_CMD(self, cg_name, lun_id): return ('-np', 'snap', '-group', '-addmember', '-id', cg_name, '-res', lun_id) def CREATE_CG_SNAPSHOT(self, cg_name, snap_name): return ('-np', 'snap', '-create', '-res', cg_name, '-resType', 'CG', '-name', snap_name, '-allowReadWrite', 'yes', '-allowAutoDelete', 'no') def DELETE_CG_SNAPSHOT(self, snap_name): return ('-np', 'snap', '-destroy', '-id', snap_name, '-o') def GET_CG_BY_NAME_CMD(self, cg_name): return ('snap', '-group', '-list', '-id', cg_name) def CONSISTENCY_GROUP_VOLUMES(self): volumes = [] volumes.append(self.test_volume) volumes.append(self.test_volume) return volumes def SNAPS_IN_SNAP_GROUP(self): snaps = [] snaps.append(self.test_snapshot) snaps.append(self.test_snapshot) return snaps def CG_PROPERTY(self, cg_name): return """ Name: %(cg_name)s Description: Allow auto delete: No Member LUN ID(s): 1, 3 State: Ready """ % {'cg_name': cg_name} POOL_PROPERTY = ("""\ Pool Name: unit_test_pool Pool ID: 1 User Capacity (Blocks): 5769501696 User Capacity (GBs): 10000.5 Available Capacity (Blocks): 5676521472 Available Capacity (GBs): 1000.6 """, 0) ALL_PORTS = ("SP: A\n" + "Port ID: 4\n" + "Port WWN: iqn.1992-04.com.emc:cx.fnm00124000215.a4\n" + "iSCSI Alias: 0215.a4\n\n" + "Virtual Port ID: 0\n" + "VLAN ID: Disabled\n" + "IP Address: 10.244.214.118\n\n" + "SP: A\n" + "Port ID: 5\n" + "Port WWN: iqn.1992-04.com.emc:cx.fnm00124000215.a5\n" + "iSCSI Alias: 0215.a5\n", 0) iscsi_connection_info_ro = \ {'data': {'access_mode': 'ro', 'target_discovered': True, 'target_iqn': 'iqn.1992-04.com.emc:cx.fnm00124000215.a4', 'target_lun': 1, 'target_portal': '10.244.214.118:3260'}, 'driver_volume_type': 'iscsi'} iscsi_connection_info_rw = \ {'data': {'access_mode': 'rw', 'target_discovered': True, 'target_iqn': 'iqn.1992-04.com.emc:cx.fnm00124000215.a4', 'target_lun': 1, 'target_portal': '10.244.214.118:3260'}, 'driver_volume_type': 'iscsi'} PING_OK = ("Reply from 10.0.0.2: bytes=32 time=1ms TTL=30\n" + "Reply from 10.0.0.2: bytes=32 time=1ms TTL=30\n" + "Reply from 10.0.0.2: bytes=32 time=1ms TTL=30\n" + "Reply from 10.0.0.2: bytes=32 time=1ms TTL=30\n", 0) FC_PORTS = ("Information about each SPPORT:\n" + "\n" + "SP Name: SP A\n" + "SP Port ID: 0\n" + "SP UID: 50:06:01:60:88:60:01:95:" + "50:06:01:60:08:60:01:95\n" + "Link Status: Up\n" + "Port Status: Online\n" + "Switch Present: YES\n" + "Switch UID: 10:00:00:05:1E:72:EC:A6:" + "20:46:00:05:1E:72:EC:A6\n" + "SP Source ID: 272896\n" + "\n" + "SP Name: SP B\n" + "SP Port ID: 4\n" + "SP UID: iqn.1992-04.com.emc:cx." + "fnm00124000215.b4\n" + "Link Status: Up\n" + "Port Status: Online\n" + "Switch Present: Not Applicable\n" + "\n" + "SP Name: SP A\n" + "SP Port ID: 2\n" + "SP UID: 50:06:01:60:88:60:01:95:" + "50:06:01:62:08:60:01:95\n" + "Link Status: Down\n" + "Port Status: Online\n" + "Switch Present: NO\n", 0) FAKEHOST_PORTS = ( "Information about each HBA:\n" + "\n" + "HBA UID: 20:00:00:90:FA:53:46:41:12:34:" + "56:78:90:12:34:56\n" + "Server Name: fakehost\n" + "Server IP Address: 10.0.0.2" + "HBA Model Description:\n" + "HBA Vendor Description:\n" + "HBA Device Driver Name:\n" + "Information about each port of this HBA:\n\n" + " SP Name: SP A\n" + " SP Port ID: 0\n" + " HBA Devicename:\n" + " Trusted: NO\n" + " Logged In: YES\n" + " Defined: YES\n" + " Initiator Type: 3\n" + " StorageGroup Name: fakehost\n\n" + " SP Name: SP A\n" + " SP Port ID: 2\n" + " HBA Devicename:\n" + " Trusted: NO\n" + " Logged In: YES\n" + " Defined: YES\n" + " Initiator Type: 3\n" + " StorageGroup Name: fakehost\n\n" + "Information about each SPPORT:\n" + "\n" + "SP Name: SP A\n" + "SP Port ID: 0\n" + "SP UID: 50:06:01:60:88:60:01:95:" + "50:06:01:60:08:60:01:95\n" + "Link Status: Up\n" + "Port Status: Online\n" + "Switch Present: YES\n" + "Switch UID: 10:00:00:05:1E:72:EC:A6:" + "20:46:00:05:1E:72:EC:A6\n" + "SP Source ID: 272896\n" + "\n" + "SP Name: SP B\n" + "SP Port ID: 4\n" + "SP UID: iqn.1992-04.com.emc:cx." + "fnm00124000215.b4\n" + "Link Status: Up\n" + "Port Status: Online\n" + "Switch Present: Not Applicable\n" + "\n" + "SP Name: SP A\n" + "SP Port ID: 2\n" + "SP UID: 50:06:01:60:88:60:01:95:" + "50:06:01:62:08:60:01:95\n" + "Link Status: Down\n" + "Port Status: Online\n" + "Switch Present: NO\n", 0) def LUN_PROPERTY(self, name, isThin=False, hasSnap=False, size=1): return """\ LOGICAL UNIT NUMBER 1 Name: %s UID: 60:06:01:60:09:20:32:00:13:DF:B4:EF:C2:63:E3:11 Current Owner: SP A Default Owner: SP A Allocation Owner: SP A Attached Snapshot: %s User Capacity (Blocks): 2101346304 User Capacity (GBs): %d Consumed Capacity (Blocks): 2149576704 Consumed Capacity (GBs): 1024.998 Pool Name: Pool_02_SASFLASH Current State: Ready Status: OK(0x0) Is Faulted: false Is Transitioning: false Current Operation: None Current Operation State: N/A Current Operation Status: N/A Current Operation Percent Completed: 0 Is Thin LUN: %s""" % (name, 'FakeSnap' if hasSnap else 'N/A', size, 'Yes' if isThin else 'No'), 0 def STORAGE_GROUP_NO_MAP(self, sgname): return ("""\ Storage Group Name: %s Storage Group UID: 27:D2:BE:C1:9B:A2:E3:11:9A:8D:FF:E5:3A:03:FD:6D Shareable: YES""" % sgname, 0) def STORAGE_GROUP_HAS_MAP(self, sgname): return ("""\ Storage Group Name: %s Storage Group UID: 54:46:57:0F:15:A2:E3:11:9A:8D:FF:E5:3A:03:FD:6D HBA/SP Pairs: HBA UID SP Name SPPort ------- ------- ------ iqn.1993-08.org.debian:01:222 SP A 4 HLU/ALU Pairs: HLU Number ALU Number ---------- ---------- 1 1 Shareable: YES""" % sgname, 0) class EMCVNXCLIDriverISCSITestCase(test.TestCase): def setUp(self): super(EMCVNXCLIDriverISCSITestCase, self).setUp() self.stubs.Set(CommandLineHelper, 'command_execute', self.succeed_fake_command_execute) self.stubs.Set(CommandLineHelper, 'get_array_serial', mock.Mock(return_value={'array_serial': 'fakeSerial'})) self.stubs.Set(os.path, 'exists', mock.Mock(return_value=1)) self.stubs.Set(emc_vnx_cli, 'INTERVAL_5_SEC', 0.01) self.stubs.Set(emc_vnx_cli, 'INTERVAL_30_SEC', 0.01) self.stubs.Set(emc_vnx_cli, 'INTERVAL_60_SEC', 0.01) self.configuration = conf.Configuration(None) self.configuration.append_config_values = mock.Mock(return_value=0) self.configuration.naviseccli_path = '/opt/Navisphere/bin/naviseccli' self.configuration.san_ip = '10.0.0.1' self.configuration.storage_vnx_pool_name = 'unit_test_pool' self.configuration.san_login = 'sysadmin' self.configuration.san_password = 'sysadmin' #set the timeout to 0.012s = 0.0002 * 60 = 1.2ms self.configuration.default_timeout = 0.0002 self.configuration.initiator_auto_registration = True self.stubs.Set(self.configuration, 'safe_get', self.fake_safe_get) self.testData = EMCVNXCLIDriverTestData() self.navisecclicmd = '/opt/Navisphere/bin/naviseccli ' + \ '-address 10.0.0.1 -user sysadmin -password sysadmin -scope 0 ' self.configuration.iscsi_initiators = '{"fakehost": ["10.0.0.2"]}' def tearDown(self): super(EMCVNXCLIDriverISCSITestCase, self).tearDown() def driverSetup(self, commands=tuple(), results=tuple()): self.driver = EMCCLIISCSIDriver(configuration=self.configuration) fake_command_execute = self.get_command_execute_simulator( commands, results) fake_cli = mock.Mock(side_effect=fake_command_execute) self.driver.cli._client.command_execute = fake_cli return fake_cli def get_command_execute_simulator(self, commands=tuple(), results=tuple()): assert(len(commands) == len(results)) def fake_command_execute(*args, **kwargv): for i in range(len(commands)): if args == commands[i]: if isinstance(results[i], list): if len(results[i]) > 0: ret = results[i][0] del results[i][0] return ret else: return results[i] return self.standard_fake_command_execute(*args, **kwargv) return fake_command_execute def standard_fake_command_execute(self, *args, **kwargv): standard_commands = [ self.testData.LUN_PROPERTY_ALL_CMD('vol1'), self.testData.LUN_PROPERTY_ALL_CMD('vol2'), self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest'), self.testData.LUN_PROPERTY_ALL_CMD('vol-vol1'), self.testData.LUN_PROPERTY_ALL_CMD('snapshot1'), self.testData.POOL_PROPERTY_CMD] standard_results = [ self.testData.LUN_PROPERTY('vol1'), self.testData.LUN_PROPERTY('vol2'), self.testData.LUN_PROPERTY('vol2_dest'), self.testData.LUN_PROPERTY('vol-vol1'), self.testData.LUN_PROPERTY('snapshot1'), self.testData.POOL_PROPERTY] standard_default = SUCCEED for i in range(len(standard_commands)): if args == standard_commands[i]: return standard_results[i] return standard_default @mock.patch( "eventlet.event.Event.wait", mock.Mock(return_value=None)) def test_create_destroy_volume_without_extra_spec(self): fake_cli = self.driverSetup() self.driver.create_volume(self.testData.test_volume) self.driver.delete_volume(self.testData.test_volume) expect_cmd = [ mock.call(*self.testData.LUN_CREATION_CMD( 'vol1', 1, 'unit_test_pool', 'thick', None)), mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1')), mock.call(*self.testData.LUN_DELETE_CMD('vol1'))] fake_cli.assert_has_calls(expect_cmd) @mock.patch( "eventlet.event.Event.wait", mock.Mock(return_value=None)) def test_create_volume_compressed(self): extra_specs = {'storagetype:provisioning': 'compressed'} volume_types.get_volume_type_extra_specs = \ mock.Mock(return_value=extra_specs) commands = [self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'), self.testData.NDU_LIST_CMD] results = [self.testData.LUN_PROPERTY('vol_with_type', True), self.testData.NDU_LIST_RESULT] fake_cli = self.driverSetup(commands, results) self.driver.cli.enablers = ['-Compression', '-Deduplication', '-ThinProvisioning', '-FAST'] #case self.driver.create_volume(self.testData.test_volume_with_type) #verification expect_cmd = [ mock.call(*self.testData.LUN_CREATION_CMD( 'vol_with_type', 1, 'unit_test_pool', 'compressed', None)), mock.call(*self.testData.LUN_PROPERTY_ALL_CMD( 'vol_with_type')), mock.call(*self.testData.LUN_PROPERTY_ALL_CMD( 'vol_with_type')), mock.call(*self.testData.ENABLE_COMPRESSION_CMD( 1))] fake_cli.assert_has_calls(expect_cmd) @mock.patch( "eventlet.event.Event.wait", mock.Mock(return_value=None)) def test_create_volume_compressed_tiering_highestavailable(self): extra_specs = {'storagetype:provisioning': 'compressed', 'storagetype:tiering': 'HighestAvailable'} volume_types.get_volume_type_extra_specs = \ mock.Mock(return_value=extra_specs) commands = [self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'), self.testData.NDU_LIST_CMD] results = [self.testData.LUN_PROPERTY('vol_with_type', True), self.testData.NDU_LIST_RESULT] fake_cli = self.driverSetup(commands, results) self.driver.cli.enablers = ['-Compression', '-Deduplication', '-ThinProvisioning', '-FAST'] #case self.driver.create_volume(self.testData.test_volume_with_type) #verification expect_cmd = [ mock.call(*self.testData.LUN_CREATION_CMD( 'vol_with_type', 1, 'unit_test_pool', 'compressed', 'highestavailable')), mock.call(*self.testData.LUN_PROPERTY_ALL_CMD( 'vol_with_type')), mock.call(*self.testData.LUN_PROPERTY_ALL_CMD( 'vol_with_type')), mock.call(*self.testData.ENABLE_COMPRESSION_CMD( 1))] fake_cli.assert_has_calls(expect_cmd) @mock.patch( "eventlet.event.Event.wait", mock.Mock(return_value=None)) def test_create_volume_deduplicated(self): extra_specs = {'storagetype:provisioning': 'deduplicated'} volume_types.get_volume_type_extra_specs = \ mock.Mock(return_value=extra_specs) commands = [self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'), self.testData.NDU_LIST_CMD] results = [self.testData.LUN_PROPERTY('vol_with_type', True), self.testData.NDU_LIST_RESULT] fake_cli = self.driverSetup(commands, results) self.driver.cli.enablers = ['-Compression', '-Deduplication', '-ThinProvisioning', '-FAST'] #case self.driver.create_volume(self.testData.test_volume_with_type) #verification expect_cmd = [ mock.call(*self.testData.LUN_CREATION_CMD( 'vol_with_type', 1, 'unit_test_pool', 'deduplicated', None))] fake_cli.assert_has_calls(expect_cmd) @mock.patch( "eventlet.event.Event.wait", mock.Mock(return_value=None)) def test_create_volume_tiering_auto(self): extra_specs = {'storagetype:tiering': 'Auto'} volume_types.get_volume_type_extra_specs = \ mock.Mock(return_value=extra_specs) commands = [self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'), self.testData.NDU_LIST_CMD] results = [self.testData.LUN_PROPERTY('vol_with_type', True), self.testData.NDU_LIST_RESULT] fake_cli = self.driverSetup(commands, results) self.driver.cli.enablers = ['-Compression', '-Deduplication', '-ThinProvisioning', '-FAST'] #case self.driver.create_volume(self.testData.test_volume_with_type) #verification expect_cmd = [ mock.call(*self.testData.LUN_CREATION_CMD( 'vol_with_type', 1, 'unit_test_pool', None, 'auto'))] fake_cli.assert_has_calls(expect_cmd) def test_create_volume_deduplicated_tiering_auto(self): extra_specs = {'storagetype:tiering': 'Auto', 'storagetype:provisioning': 'Deduplicated'} volume_types.get_volume_type_extra_specs = \ mock.Mock(return_value=extra_specs) commands = [self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'), self.testData.NDU_LIST_CMD] results = [self.testData.LUN_PROPERTY('vol_with_type', True), self.testData.NDU_LIST_RESULT] self.driverSetup(commands, results) ex = self.assertRaises( exception.VolumeBackendAPIException, self.driver.create_volume, self.testData.test_volume_with_type) self.assertTrue( re.match(r".*deduplicated and auto tiering can't be both enabled", ex.msg)) def test_create_volume_compressed_no_enabler(self): extra_specs = {'storagetype:provisioning': 'Compressed'} volume_types.get_volume_type_extra_specs = \ mock.Mock(return_value=extra_specs) commands = [self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'), self.testData.NDU_LIST_CMD] results = [self.testData.LUN_PROPERTY('vol_with_type', True), ('No package', 0)] self.driverSetup(commands, results) ex = self.assertRaises( exception.VolumeBackendAPIException, self.driver.create_volume, self.testData.test_volume_with_type) self.assertTrue( re.match(r".*Compression Enabler is not installed", ex.msg)) @mock.patch( "eventlet.event.Event.wait", mock.Mock(return_value=None)) def test_create_compression_volume_on_array_backend(self): """Unit test for create a compression volume on array backend. """ #Set up the array backend config = conf.Configuration(None) config.append_config_values = mock.Mock(return_value=0) config.naviseccli_path = '/opt/Navisphere/bin/naviseccli' config.san_ip = '10.0.0.1' config.san_login = 'sysadmin' config.san_password = 'sysadmin' config.default_timeout = 0.0002 config.initiator_auto_registration = True config.navisecclicmd = '/opt/Navisphere/bin/naviseccli ' + \ '-address 10.0.0.1 -user sysadmin -password sysadmin -scope 0 ' config.iscsi_initiators = '{"fakehost": ["10.0.0.2"]}' self.driver = EMCCLIISCSIDriver(configuration=config) assert isinstance(self.driver.cli, emc_vnx_cli.EMCVnxCliArray) extra_specs = {'storagetype:provisioning': 'Compressed', 'storagetype:pool': 'unit_test_pool'} volume_types.get_volume_type_extra_specs = \ mock.Mock(return_value=extra_specs) commands = [self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'), self.testData.NDU_LIST_CMD] results = [self.testData.LUN_PROPERTY('vol_with_type', True), self.testData.NDU_LIST_RESULT] fake_command_execute = self.get_command_execute_simulator( commands, results) fake_cli = mock.MagicMock(side_effect=fake_command_execute) self.driver.cli._client.command_execute = fake_cli self.driver.cli.stats['compression_support'] = 'True' self.driver.cli.enablers = ['-Compression', '-Deduplication', '-ThinProvisioning', '-FAST'] #case self.driver.create_volume(self.testData.test_volume_with_type) #verification expect_cmd = [ mock.call(*self.testData.LUN_CREATION_CMD( 'vol_with_type', 1, 'unit_test_pool', 'compressed', None)), mock.call(*self.testData.LUN_PROPERTY_ALL_CMD( 'vol_with_type')), mock.call(*self.testData.LUN_PROPERTY_ALL_CMD( 'vol_with_type')), mock.call(*self.testData.ENABLE_COMPRESSION_CMD( 1))] fake_cli.assert_has_calls(expect_cmd) def test_get_volume_stats(self): #expect_result = [POOL_PROPERTY] self.driverSetup() stats = self.driver.get_volume_stats(True) self.assertTrue(stats['driver_version'] is not None, "dirver_version is not returned") self.assertTrue( stats['free_capacity_gb'] == 1000.6, "free_capacity_gb is not correct") self.assertTrue( stats['reserved_percentage'] == 0, "reserved_percentage is not correct") self.assertTrue( stats['storage_protocol'] == 'iSCSI', "storage_protocol is not correct") self.assertTrue( stats['total_capacity_gb'] == 10000.5, "total_capacity_gb is not correct") self.assertTrue( stats['vendor_name'] == "EMC", "vender name is not correct") self.assertTrue( stats['volume_backend_name'] == "namedbackend", "volume backend name is not correct") self.assertTrue(stats['location_info'] == "unit_test_pool|fakeSerial") self.assertTrue( stats['driver_version'] == "04.01.00", "driver version is incorrect.") @mock.patch("cinder.volume.drivers.emc.emc_vnx_cli." "CommandLineHelper.create_lun_by_cmd", mock.Mock(return_value=True)) @mock.patch( "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id", mock.Mock( side_effect=[1, 1])) @mock.patch( "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase." "get_lun_id_by_name", mock.Mock(return_value=1)) def test_volume_migration_timeout(self): commands = [self.testData.MIGRATION_CMD(), self.testData.MIGRATION_VERIFY_CMD(1)] FAKE_ERROR_MSG = """\ A network error occurred while trying to connect: '10.244.213.142'. Message : Error occurred because connection refused. \ Unable to establish a secure connection to the Management Server. """ FAKE_ERROR_MSG = FAKE_ERROR_MSG.replace('\n', ' ') FAKE_MIGRATE_PROPERTY = """\ Source LU Name: volume-f6247ae1-8e1c-4927-aa7e-7f8e272e5c3d Source LU ID: 63950 Dest LU Name: volume-f6247ae1-8e1c-4927-aa7e-7f8e272e5c3d_dest Dest LU ID: 136 Migration Rate: high Current State: MIGRATED Percent Complete: 100 Time Remaining: 0 second(s) """ results = [(FAKE_ERROR_MSG, 255), [SUCCEED, (FAKE_MIGRATE_PROPERTY, 0), ('The specified source LUN is not currently migrating', 23)]] fake_cli = self.driverSetup(commands, results) fakehost = {'capabilities': {'location_info': "unit_test_pool2|fakeSerial", 'storage_protocol': 'iSCSI'}} ret = self.driver.migrate_volume(None, self.testData.test_volume, fakehost)[0] self.assertTrue(ret) #verification expect_cmd = [mock.call(*self.testData.MIGRATION_CMD(1, 1), retry_disable=True), mock.call(*self.testData.MIGRATION_VERIFY_CMD(1)), mock.call(*self.testData.MIGRATION_VERIFY_CMD(1))] fake_cli.assert_has_calls(expect_cmd) @mock.patch("cinder.volume.drivers.emc.emc_vnx_cli." "CommandLineHelper.create_lun_by_cmd", mock.Mock( return_value=True)) @mock.patch( "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id", mock.Mock( side_effect=[1, 1])) @mock.patch( "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase." "get_lun_id_by_name", mock.Mock(return_value=1)) def test_volume_migration(self): commands = [self.testData.MIGRATION_CMD(), self.testData.MIGRATION_VERIFY_CMD(1)] FAKE_MIGRATE_PROPERTY = """\ Source LU Name: volume-f6247ae1-8e1c-4927-aa7e-7f8e272e5c3d Source LU ID: 63950 Dest LU Name: volume-f6247ae1-8e1c-4927-aa7e-7f8e272e5c3d_dest Dest LU ID: 136 Migration Rate: high Current State: MIGRATED Percent Complete: 100 Time Remaining: 0 second(s) """ results = [SUCCEED, [(FAKE_MIGRATE_PROPERTY, 0), ('The specified source LUN is not ' 'currently migrating', 23)]] fake_cli = self.driverSetup(commands, results) fakehost = {'capabilities': {'location_info': "unit_test_pool2|fakeSerial", 'storage_protocol': 'iSCSI'}} ret = self.driver.migrate_volume(None, self.testData.test_volume, fakehost)[0] self.assertTrue(ret) #verification expect_cmd = [mock.call(*self.testData.MIGRATION_CMD(), retry_disable=True), mock.call(*self.testData.MIGRATION_VERIFY_CMD(1)), mock.call(*self.testData.MIGRATION_VERIFY_CMD(1))] fake_cli.assert_has_calls(expect_cmd) @mock.patch("cinder.volume.drivers.emc.emc_vnx_cli." "CommandLineHelper.create_lun_by_cmd", mock.Mock( return_value=True)) @mock.patch( "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase." "get_lun_id_by_name", mock.Mock(return_value=5)) def test_volume_migration_02(self): commands = [self.testData.MIGRATION_CMD(5, 5), self.testData.MIGRATION_VERIFY_CMD(5)] FAKE_MIGRATE_PROPERTY = """\ Source LU Name: volume-f6247ae1-8e1c-4927-aa7e-7f8e272e5c3d Source LU ID: 63950 Dest LU Name: volume-f6247ae1-8e1c-4927-aa7e-7f8e272e5c3d_dest Dest LU ID: 136 Migration Rate: high Current State: MIGRATED Percent Complete: 100 Time Remaining: 0 second(s) """ results = [SUCCEED, [(FAKE_MIGRATE_PROPERTY, 0), ('The specified source LUN is not ' 'currently migrating', 23)]] fake_cli = self.driverSetup(commands, results) fakehost = {'capabilities': {'location_info': "unit_test_pool2|fakeSerial", 'storage_protocol': 'iSCSI'}} ret = self.driver.migrate_volume(None, self.testData.test_volume5, fakehost)[0] self.assertTrue(ret) #verification expect_cmd = [mock.call(*self.testData.MIGRATION_CMD(5, 5), retry_disable=True), mock.call(*self.testData.MIGRATION_VERIFY_CMD(5)), mock.call(*self.testData.MIGRATION_VERIFY_CMD(5))] fake_cli.assert_has_calls(expect_cmd) @mock.patch("cinder.volume.drivers.emc.emc_vnx_cli." "CommandLineHelper.create_lun_by_cmd", mock.Mock( return_value=True)) @mock.patch( "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id", mock.Mock( side_effect=[1, 1])) @mock.patch( "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase." "get_lun_id_by_name", mock.Mock(return_value=1)) def test_volume_migration_failed(self): commands = [self.testData.MIGRATION_CMD()] results = [FAKE_ERROR_RETURN] fake_cli = self.driverSetup(commands, results) fakehost = {'capabilities': {'location_info': "unit_test_pool2|fakeSerial", 'storage_protocol': 'iSCSI'}} ret = self.driver.migrate_volume(None, self.testData.test_volume, fakehost)[0] self.assertFalse(ret) #verification expect_cmd = [mock.call(*self.testData.MIGRATION_CMD(), retry_disable=True)] fake_cli.assert_has_calls(expect_cmd) def test_create_destroy_volume_snapshot(self): fake_cli = self.driverSetup() #case self.driver.create_snapshot(self.testData.test_snapshot) self.driver.delete_snapshot(self.testData.test_snapshot) #verification expect_cmd = [mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1')), mock.call(*self.testData.SNAP_CREATE_CMD('snapshot1')), mock.call(*self.testData.SNAP_DELETE_CMD('snapshot1'))] fake_cli.assert_has_calls(expect_cmd) @mock.patch( "oslo_concurrency.processutils.execute", mock.Mock( return_value=( "fakeportal iqn.1992-04.fake.com:fake.apm00123907237.a8", 0))) @mock.patch("random.shuffle", mock.Mock()) def test_initialize_connection(self): # Test for auto registration self.configuration.initiator_auto_registration = True commands = [('storagegroup', '-list', '-gname', 'fakehost'), self.testData.GETPORT_CMD(), self.testData.PINGNODE_CMD('A', 4, 0, '10.0.0.2')] results = [[("No group", 83), self.testData.STORAGE_GROUP_NO_MAP('fakehost'), self.testData.STORAGE_GROUP_HAS_MAP('fakehost'), self.testData.STORAGE_GROUP_HAS_MAP('fakehost')], self.testData.ALL_PORTS, self.testData.PING_OK] fake_cli = self.driverSetup(commands, results) connection_info = self.driver.initialize_connection( self.testData.test_volume, self.testData.connector) self.assertEqual(connection_info, self.testData.iscsi_connection_info_ro) expected = [mock.call('storagegroup', '-list', '-gname', 'fakehost'), mock.call('storagegroup', '-create', '-gname', 'fakehost'), mock.call('storagegroup', '-list'), mock.call(*self.testData.GETPORT_CMD()), mock.call('storagegroup', '-gname', 'fakehost', '-setpath', '-hbauid', 'iqn.1993-08.org.debian:01:222', '-sp', 'A', '-spport', 4, '-spvport', 0, '-ip', '10.0.0.2', '-host', 'fakehost', '-o'), mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1')), mock.call('storagegroup', '-list', '-gname', 'fakehost'), mock.call('storagegroup', '-addhlu', '-hlu', 1, '-alu', 1, '-gname', 'fakehost'), mock.call('storagegroup', '-list', '-gname', 'fakehost'), mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1')), mock.call('storagegroup', '-list', '-gname', 'fakehost'), mock.call(*self.testData.GETPORT_CMD()), mock.call(*self.testData.PINGNODE_CMD('A', 4, 0, '10.0.0.2'))] fake_cli.assert_has_calls(expected) # Test for manaul registration self.configuration.initiator_auto_registration = False commands = [('storagegroup', '-list', '-gname', 'fakehost'), self.testData.CONNECTHOST_CMD('fakehost', 'fakehost'), self.testData.GETPORT_CMD(), self.testData.PINGNODE_CMD('A', 4, 0, '10.0.0.2')] results = [[("No group", 83), self.testData.STORAGE_GROUP_NO_MAP('fakehost'), self.testData.STORAGE_GROUP_HAS_MAP('fakehost'), self.testData.STORAGE_GROUP_HAS_MAP('fakehost')], ('', 0), self.testData.ALL_PORTS, self.testData.PING_OK] fake_cli = self.driverSetup(commands, results) connection_info = self.driver.initialize_connection( self.testData.test_volume_rw, self.testData.connector) self.assertEqual(connection_info, self.testData.iscsi_connection_info_rw) expected = [mock.call('storagegroup', '-list', '-gname', 'fakehost'), mock.call('storagegroup', '-create', '-gname', 'fakehost'), mock.call('storagegroup', '-connecthost', '-host', 'fakehost', '-gname', 'fakehost', '-o'), mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1')), mock.call('storagegroup', '-list', '-gname', 'fakehost'), mock.call('storagegroup', '-addhlu', '-hlu', 1, '-alu', 1, '-gname', 'fakehost'), mock.call('storagegroup', '-list', '-gname', 'fakehost'), mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1')), mock.call('storagegroup', '-list', '-gname', 'fakehost'), mock.call('connection', '-getport', '-address', '-vlanid')] fake_cli.assert_has_calls(expected) def test_terminate_connection(self): os.path.exists = mock.Mock(return_value=1) self.driver = EMCCLIISCSIDriver(configuration=self.configuration) cli_helper = self.driver.cli._client data = {'storage_group_name': "fakehost", 'storage_group_uid': "2F:D4:00:00:00:00:00:" "00:00:00:FF:E5:3A:03:FD:6D", 'lunmap': {1: 16, 2: 88, 3: 47}} cli_helper.get_storage_group = mock.Mock( return_value=data) lun_info = {'lun_name': "unit_test_lun", 'lun_id': 1, 'pool': "unit_test_pool", 'attached_snapshot': "N/A", 'owner': "A", 'total_capacity_gb': 1.0, 'state': "Ready"} cli_helper.get_lun_by_name = mock.Mock(return_value=lun_info) cli_helper.remove_hlu_from_storagegroup = mock.Mock() self.driver.terminate_connection(self.testData.test_volume, self.testData.connector) cli_helper.remove_hlu_from_storagegroup.assert_called_once_with( 16, self.testData.connector["host"]) # expected = [mock.call('storagegroup', '-list', '-gname', 'fakehost'), # mock.call('lun', '-list', '-name', 'vol1'), # mock.call('storagegroup', '-list', '-gname', 'fakehost'), # mock.call('lun', '-list', '-l', '10', '-owner')] def test_create_volume_cli_failed(self): commands = [self.testData.LUN_CREATION_CMD( 'failed_vol1', 1, 'unit_test_pool', None, None)] results = [FAKE_ERROR_RETURN] fake_cli = self.driverSetup(commands, results) self.assertRaises(EMCVnxCLICmdError, self.driver.create_volume, self.testData.test_failed_volume) expect_cmd = [mock.call(*self.testData.LUN_CREATION_CMD( 'failed_vol1', 1, 'unit_test_pool', None, None))] fake_cli.assert_has_calls(expect_cmd) def test_create_volume_snapshot_failed(self): commands = [self.testData.SNAP_CREATE_CMD('failed_snapshot')] results = [FAKE_ERROR_RETURN] fake_cli = self.driverSetup(commands, results) #case self.assertRaises(EMCVnxCLICmdError, self.driver.create_snapshot, self.testData.test_failed_snapshot) #verification expect_cmd = [ mock.call( *self.testData.LUN_PROPERTY_ALL_CMD( 'vol-vol1')), mock.call( *self.testData.SNAP_CREATE_CMD( 'failed_snapshot'))] fake_cli.assert_has_calls(expect_cmd) def test_create_volume_from_snapshot(self): #set up cmd_smp = ('lun', '-list', '-name', 'vol2', '-attachedSnapshot') output_smp = ("""LOGICAL UNIT NUMBER 1 Name: vol2 Attached Snapshot: N/A""", 0) cmd_dest = self.testData.LUN_PROPERTY_ALL_CMD("vol2_dest") output_dest = self.testData.LUN_PROPERTY("vol2_dest") cmd_migrate = self.testData.MIGRATION_CMD(1, 1) output_migrate = ("", 0) cmd_migrate_verify = self.testData.MIGRATION_VERIFY_CMD(1) output_migrate_verify = (r'The specified source LUN ' 'is not currently migrating', 23) commands = [cmd_smp, cmd_dest, cmd_migrate, cmd_migrate_verify] results = [output_smp, output_dest, output_migrate, output_migrate_verify] fake_cli = self.driverSetup(commands, results) self.driver.create_volume_from_snapshot(self.testData.test_volume2, self.testData.test_snapshot) expect_cmd = [ mock.call( *self.testData.SNAP_MP_CREATE_CMD( name='vol2', source='vol1')), mock.call( *self.testData.SNAP_ATTACH_CMD( name='vol2', snapName='snapshot1')), mock.call(*self.testData.LUN_CREATION_CMD( 'vol2_dest', 1, 'unit_test_pool', None, None)), mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest')), mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest')), mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2')), mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest')), mock.call(*self.testData.MIGRATION_CMD(1, 1), retry_disable=True), mock.call(*self.testData.MIGRATION_VERIFY_CMD(1)), mock.call('lun', '-list', '-name', 'vol2', '-attachedSnapshot')] fake_cli.assert_has_calls(expect_cmd) def test_create_volume_from_snapshot_sync_failed(self): output_smp = ("""LOGICAL UNIT NUMBER 1 Name: vol1 Attached Snapshot: fakesnap""", 0) cmd_smp = ('lun', '-list', '-name', 'vol2', '-attachedSnapshot') cmd_dest = self.testData.LUN_PROPERTY_ALL_CMD("vol2_dest") output_dest = self.testData.LUN_PROPERTY("vol2_dest") cmd_migrate = self.testData.MIGRATION_CMD(1, 1) output_migrate = ("", 0) cmd_migrate_verify = self.testData.MIGRATION_VERIFY_CMD(1) output_migrate_verify = (r'The specified source LUN ' 'is not currently migrating', 23) commands = [cmd_smp, cmd_dest, cmd_migrate, cmd_migrate_verify] results = [output_smp, output_dest, output_migrate, output_migrate_verify] fake_cli = self.driverSetup(commands, results) self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume_from_snapshot, self.testData.test_volume2, self.testData.test_snapshot) expect_cmd = [ mock.call( *self.testData.SNAP_MP_CREATE_CMD( name='vol2', source='vol1')), mock.call( *self.testData.SNAP_ATTACH_CMD( name='vol2', snapName='snapshot1')), mock.call(*self.testData.LUN_CREATION_CMD( 'vol2_dest', 1, 'unit_test_pool', None, None)), mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest')), mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest')), mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest')), mock.call(*self.testData.MIGRATION_CMD(1, 1), retry_disable=True), mock.call(*self.testData.MIGRATION_VERIFY_CMD(1))] fake_cli.assert_has_calls(expect_cmd) def test_create_cloned_volume(self): cmd_smp = ('lun', '-list', '-name', 'vol1', '-attachedSnapshot') output_smp = ("""LOGICAL UNIT NUMBER 1 Name: vol1 Attached Snapshot: N/A""", 0) cmd_dest = self.testData.LUN_PROPERTY_ALL_CMD("vol1_dest") output_dest = self.testData.LUN_PROPERTY("vol1_dest") cmd_migrate = self.testData.MIGRATION_CMD(1, 1) output_migrate = ("", 0) cmd_migrate_verify = self.testData.MIGRATION_VERIFY_CMD(1) output_migrate_verify = (r'The specified source LUN ' 'is not currently migrating', 23) commands = [cmd_smp, cmd_dest, cmd_migrate, cmd_migrate_verify, self.testData.NDU_LIST_CMD] results = [output_smp, output_dest, output_migrate, output_migrate_verify, self.testData.NDU_LIST_RESULT] fake_cli = self.driverSetup(commands, results) self.driver.create_cloned_volume(self.testData.test_volume, self.testData.test_snapshot) tmp_snap = 'tmp-snap-' + self.testData.test_volume['id'] expect_cmd = [ mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('snapshot1')), mock.call( *self.testData.SNAP_CREATE_CMD(tmp_snap)), mock.call(*self.testData.SNAP_MP_CREATE_CMD(name='vol1', source='snapshot1')), mock.call( *self.testData.SNAP_ATTACH_CMD( name='vol1', snapName=tmp_snap)), mock.call(*self.testData.LUN_CREATION_CMD( 'vol1_dest', 1, 'unit_test_pool', None, None)), mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1_dest')), mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1_dest')), mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1')), mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1_dest')), mock.call(*self.testData.MIGRATION_CMD(1, 1), retry_disable=True), mock.call(*self.testData.MIGRATION_VERIFY_CMD(1)), mock.call('lun', '-list', '-name', 'vol1', '-attachedSnapshot'), mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1')), mock.call(*self.testData.SNAP_DELETE_CMD(tmp_snap))] fake_cli.assert_has_calls(expect_cmd) def test_delete_volume_failed(self): commands = [self.testData.LUN_DELETE_CMD('failed_vol1')] results = [FAKE_ERROR_RETURN] fake_cli = self.driverSetup(commands, results) self.assertRaises(EMCVnxCLICmdError, self.driver.delete_volume, self.testData.test_failed_volume) expected = [mock.call(*self.testData.LUN_DELETE_CMD('failed_vol1'))] fake_cli.assert_has_calls(expected) def test_extend_volume(self): commands = [self.testData.LUN_PROPERTY_ALL_CMD('vol1')] results = [self.testData.LUN_PROPERTY('vol1', size=2)] fake_cli = self.driverSetup(commands, results) # case self.driver.extend_volume(self.testData.test_volume, 2) expected = [mock.call(*self.testData.LUN_EXTEND_CMD('vol1', 2)), mock.call(*self.testData.LUN_PROPERTY_ALL_CMD( 'vol1'))] fake_cli.assert_has_calls(expected) def test_extend_volume_has_snapshot(self): commands = [self.testData.LUN_EXTEND_CMD('failed_vol1', 2)] results = [FAKE_ERROR_RETURN] fake_cli = self.driverSetup(commands, results) self.assertRaises(EMCVnxCLICmdError, self.driver.extend_volume, self.testData.test_failed_volume, 2) expected = [mock.call(*self.testData.LUN_EXTEND_CMD('failed_vol1', 2))] fake_cli.assert_has_calls(expected) def test_extend_volume_failed(self): commands = [self.testData.LUN_PROPERTY_ALL_CMD('failed_vol1')] results = [self.testData.LUN_PROPERTY('failed_vol1', size=2)] fake_cli = self.driverSetup(commands, results) self.assertRaises(exception.VolumeBackendAPIException, self.driver.extend_volume, self.testData.test_failed_volume, 3) expected = [ mock.call( *self.testData.LUN_EXTEND_CMD('failed_vol1', 3)), mock.call( *self.testData.LUN_PROPERTY_ALL_CMD('failed_vol1'))] fake_cli.assert_has_calls(expected) def test_create_remove_export(self): fake_cli = self.driverSetup() self.driver.create_export(None, self.testData.test_volume) self.driver.remove_export(None, self.testData.test_volume) expected = [mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1'))] fake_cli.assert_has_calls(expected) def test_manage_existing(self): """Unit test for the manage_existing function of driver """ get_lun_cmd = ('lun', '-list', '-l', self.testData.test_lun_id, '-state', '-userCap', '-owner', '-attachedSnapshot', '-poolName') lun_rename_cmd = ('lun', '-modify', '-l', self.testData.test_lun_id, '-newName', 'vol_with_type', '-o') commands = [get_lun_cmd, lun_rename_cmd] results = [self.testData.LUN_PROPERTY('lun_name'), SUCCEED] self.configuration.storage_vnx_pool_name = \ self.testData.test_pool_name self.driver = EMCCLIISCSIDriver(configuration=self.configuration) assert isinstance(self.driver.cli, emc_vnx_cli.EMCVnxCliPool) #mock the command executor fake_command_execute = self.get_command_execute_simulator( commands, results) fake_cli = mock.MagicMock(side_effect=fake_command_execute) self.driver.cli._client.command_execute = fake_cli self.driver.manage_existing( self.testData.test_volume_with_type, self.testData.test_existing_ref) expected = [mock.call(*get_lun_cmd), mock.call(*lun_rename_cmd)] fake_cli.assert_has_calls(expected) def test_manage_existing_lun_in_another_pool(self): """Unit test for the manage_existing function of driver with a invalid pool backend. An exception would occur in this case """ get_lun_cmd = ('lun', '-list', '-l', self.testData.test_lun_id, '-state', '-userCap', '-owner', '-attachedSnapshot', '-poolName') commands = [get_lun_cmd] results = [self.testData.LUN_PROPERTY('lun_name')] invalid_pool_name = "fake_pool" self.configuration.storage_vnx_pool_name = invalid_pool_name self.driver = EMCCLIISCSIDriver(configuration=self.configuration) assert isinstance(self.driver.cli, emc_vnx_cli.EMCVnxCliPool) #mock the command executor fake_command_execute = self.get_command_execute_simulator( commands, results) fake_cli = mock.MagicMock(side_effect=fake_command_execute) self.driver.cli._client.command_execute = fake_cli ex = self.assertRaises( exception.ManageExistingInvalidReference, self.driver.manage_existing, self.testData.test_volume_with_type, self.testData.test_existing_ref) self.assertTrue( re.match(r'.*not in a manageable pool backend by cinder', ex.msg)) expected = [mock.call(*get_lun_cmd)] fake_cli.assert_has_calls(expected) def test_manage_existing_get_size(self): """Unit test for the manage_existing_get_size function of driver. """ get_lun_cmd = ('lun', '-list', '-l', self.testData.test_lun_id, '-state', '-status', '-opDetails', '-userCap', '-owner', '-attachedSnapshot') test_size = 2 commands = [get_lun_cmd] results = [self.testData.LUN_PROPERTY('lun_name', size=test_size)] self.configuration.storage_vnx_pool_name = \ self.testData.test_pool_name self.driver = EMCCLIISCSIDriver(configuration=self.configuration) assert isinstance(self.driver.cli, emc_vnx_cli.EMCVnxCliPool) #mock the command executor fake_command_execute = self.get_command_execute_simulator( commands, results) fake_cli = mock.MagicMock(side_effect=fake_command_execute) self.driver.cli._client.command_execute = fake_cli get_size = self.driver.manage_existing_get_size( self.testData.test_volume_with_type, self.testData.test_existing_ref) expected = [mock.call(*get_lun_cmd)] assert get_size == test_size fake_cli.assert_has_calls(expected) #Test the function with invalid reference. invaild_ref = {'fake': 'fake_ref'} self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, self.testData.test_volume_with_type, invaild_ref) def test_manage_existing_with_array_backend(self): """Unit test for the manage_existing with the array backend which is not support the manage existing functinality. """ #Set up the array backend config = conf.Configuration(None) config.append_config_values = mock.Mock(return_value=0) config.naviseccli_path = '/opt/Navisphere/bin/naviseccli' config.san_ip = '10.0.0.1' config.san_login = 'sysadmin' config.san_password = 'sysadmin' config.default_timeout = 0.0002 config.initiator_auto_registration = True config.navisecclicmd = '/opt/Navisphere/bin/naviseccli ' + \ '-address 10.0.0.1 -user sysadmin -password sysadmin -scope 0 ' config.iscsi_initiators = '{"fakehost": ["10.0.0.2"]}' self.driver = EMCCLIISCSIDriver(configuration=config) assert isinstance(self.driver.cli, emc_vnx_cli.EMCVnxCliArray) #mock the command executor lun_rename_cmd = ('lun', '-modify', '-l', self.testData.test_lun_id, '-newName', 'vol_with_type', '-o') commands = [lun_rename_cmd] results = [SUCCEED] #mock the command executor fake_command_execute = self.get_command_execute_simulator( commands, results) fake_cli = mock.MagicMock(side_effect=fake_command_execute) self.driver.cli._client.command_execute = fake_cli self.driver.manage_existing( self.testData.test_volume_with_type, self.testData.test_existing_ref) expected = [mock.call(*lun_rename_cmd)] fake_cli.assert_has_calls(expected) @mock.patch( "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id", mock.Mock(return_value=1)) @mock.patch( "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase." "get_lun_id_by_name", mock.Mock(return_value=1)) @mock.patch( "eventlet.event.Event.wait", mock.Mock(return_value=None)) @mock.patch( "time.time", mock.Mock(return_value=123456)) def test_retype_compressed_to_deduplicated(self): """Unit test for retype compressed to deduplicated.""" diff_data = {'encryption': {}, 'qos_specs': {}, 'extra_specs': {'storagetype:provsioning': ('compressed', 'deduplicated')}} new_type_data = {'name': 'voltype0', 'qos_specs_id': None, 'deleted': False, 'extra_specs': {'storagetype:provisioning': 'deduplicated'}, 'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'} host_test_data = {'host': 'ubuntu-server12@pool_backend_1', 'capabilities': {'location_info': 'unit_test_pool|FNM00124500890', 'volume_backend_name': 'pool_backend_1', 'storage_protocol': 'iSCSI'}} commands = [self.testData.NDU_LIST_CMD, ('snap', '-list', '-res', 1)] results = [self.testData.NDU_LIST_RESULT, ('No snap', 1023)] fake_cli = self.driverSetup(commands, results) self.driver.cli.enablers = ['-Compression', '-Deduplication', '-ThinProvisioning', '-FAST'] CommandLineHelper.get_array_serial = mock.Mock( return_value={'array_serial': "FNM00124500890"}) extra_specs = {'storagetype:provisioning': 'compressed'} volume_types.get_volume_type_extra_specs = \ mock.Mock(return_value=extra_specs) self.driver.retype(None, self.testData.test_volume3, new_type_data, diff_data, host_test_data) expect_cmd = [ mock.call('snap', '-list', '-res', 1), mock.call(*self.testData.LUN_CREATION_CMD( 'vol3-123456', 2, 'unit_test_pool', 'deduplicated', None)), mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol3-123456')), mock.call(*self.testData.MIGRATION_CMD(), retry_disable=True)] fake_cli.assert_has_calls(expect_cmd) @mock.patch( "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id", mock.Mock(return_value=1)) @mock.patch( "cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper." + "get_lun_by_name", mock.Mock(return_value={'lun_id': 1})) @mock.patch( "eventlet.event.Event.wait", mock.Mock(return_value=None)) @mock.patch( "time.time", mock.Mock(return_value=123456)) def test_retype_thin_to_compressed_auto(self): """Unit test for retype thin to compressed and auto tiering.""" diff_data = {'encryption': {}, 'qos_specs': {}, 'extra_specs': {'storagetype:provsioning': ('thin', 'compressed'), 'storagetype:tiering': (None, 'auto')}} new_type_data = {'name': 'voltype0', 'qos_specs_id': None, 'deleted': False, 'extra_specs': {'storagetype:provisioning': 'compressed', 'storagetype:tiering': 'auto'}, 'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'} host_test_data = {'host': 'ubuntu-server12@pool_backend_1', 'capabilities': {'location_info': 'unit_test_pool|FNM00124500890', 'volume_backend_name': 'pool_backend_1', 'storage_protocol': 'iSCSI'}} commands = [self.testData.NDU_LIST_CMD, ('snap', '-list', '-res', 1)] results = [self.testData.NDU_LIST_RESULT, ('No snap', 1023)] fake_cli = self.driverSetup(commands, results) self.driver.cli.enablers = ['-Compression', '-Deduplication', '-ThinProvisioning', '-FAST'] CommandLineHelper.get_array_serial = mock.Mock( return_value={'array_serial': "FNM00124500890"}) extra_specs = {'storagetype:provisioning': 'thin'} volume_types.get_volume_type_extra_specs = \ mock.Mock(return_value=extra_specs) self.driver.retype(None, self.testData.test_volume3, new_type_data, diff_data, host_test_data) expect_cmd = [ mock.call('snap', '-list', '-res', 1), mock.call(*self.testData.LUN_CREATION_CMD( 'vol3-123456', 2, 'unit_test_pool', 'compressed', 'auto')), mock.call(*self.testData.ENABLE_COMPRESSION_CMD(1)), mock.call(*self.testData.MIGRATION_CMD(), retry_disable=True)] fake_cli.assert_has_calls(expect_cmd) @mock.patch( "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id", mock.Mock(return_value=1)) @mock.patch( "cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper." + "get_lun_by_name", mock.Mock(return_value={'lun_id': 1})) @mock.patch( "eventlet.event.Event.wait", mock.Mock(return_value=None)) @mock.patch( "time.time", mock.Mock(return_value=123456)) def test_retype_pool_changed_dedup_to_compressed_auto(self): """Unit test for retype dedup to compressed and auto tiering and pool changed """ diff_data = {'encryption': {}, 'qos_specs': {}, 'extra_specs': {'storagetype:provsioning': ('deduplicated', 'compressed'), 'storagetype:tiering': (None, 'auto'), 'storagetype:pool': ('unit_test_pool', 'unit_test_pool2')}} new_type_data = {'name': 'voltype0', 'qos_specs_id': None, 'deleted': False, 'extra_specs': {'storagetype:provisioning': 'compressed', 'storagetype:tiering': 'auto', 'storagetype:pool': 'unit_test_pool2'}, 'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'} host_test_data = {'host': 'ubuntu-server12@pool_backend_1', 'capabilities': {'location_info': 'unit_test_pool2|FNM00124500890', 'volume_backend_name': 'pool_backend_1', 'storage_protocol': 'iSCSI'}} commands = [self.testData.NDU_LIST_CMD, ('snap', '-list', '-res', 1)] results = [self.testData.NDU_LIST_RESULT, ('No snap', 1023)] fake_cli = self.driverSetup(commands, results) self.driver.cli.enablers = ['-Compression', '-Deduplication', '-ThinProvisioning', '-FAST'] CommandLineHelper.get_array_serial = mock.Mock( return_value={'array_serial': "FNM00124500890"}) extra_specs = {'storagetype:provisioning': 'deduplicated', 'storagetype:pool': 'unit_test_pool'} volume_types.get_volume_type_extra_specs = \ mock.Mock(return_value=extra_specs) self.driver.retype(None, self.testData.test_volume3, new_type_data, diff_data, host_test_data) expect_cmd = [ mock.call('snap', '-list', '-res', 1), mock.call(*self.testData.LUN_CREATION_CMD( 'vol3-123456', 2, 'unit_test_pool2', 'compressed', 'auto')), mock.call(*self.testData.ENABLE_COMPRESSION_CMD(1)), mock.call(*self.testData.MIGRATION_CMD(), retry_disable=True)] fake_cli.assert_has_calls(expect_cmd) @mock.patch( "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id", mock.Mock(return_value=1)) @mock.patch( "cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper." + "get_lun_by_name", mock.Mock(return_value={'lun_id': 1})) def test_retype_compressed_auto_to_compressed_nomovement(self): """Unit test for retype only tiering changed.""" diff_data = {'encryption': {}, 'qos_specs': {}, 'extra_specs': {'storagetype:tiering': ('auto', 'nomovement')}} new_type_data = {'name': 'voltype0', 'qos_specs_id': None, 'deleted': False, 'extra_specs': {'storagetype:provisioning': 'compressed', 'storagetype:tiering': 'nomovement', 'storagetype:pool': 'unit_test_pool'}, 'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'} host_test_data = {'host': 'ubuntu-server12@pool_backend_1', 'capabilities': {'location_info': 'unit_test_pool|FNM00124500890', 'volume_backend_name': 'pool_backend_1', 'storage_protocol': 'iSCSI'}} commands = [self.testData.NDU_LIST_CMD, ('snap', '-list', '-res', 1)] results = [self.testData.NDU_LIST_RESULT, ('No snap', 1023)] fake_cli = self.driverSetup(commands, results) self.driver.cli.enablers = ['-Compression', '-Deduplication', '-ThinProvisioning', '-FAST'] CommandLineHelper.get_array_serial = mock.Mock( return_value={'array_serial': "FNM00124500890"}) extra_specs = {'storagetype:provisioning': 'compressed', 'storagetype:pool': 'unit_test_pool', 'storagetype:tiering': 'auto'} volume_types.get_volume_type_extra_specs = \ mock.Mock(return_value=extra_specs) self.driver.retype(None, self.testData.test_volume3, new_type_data, diff_data, host_test_data) expect_cmd = [ mock.call('lun', '-modify', '-name', 'vol3', '-o', '-initialTier', 'optimizePool', '-tieringPolicy', 'noMovement')] fake_cli.assert_has_calls(expect_cmd) @mock.patch( "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id", mock.Mock(return_value=1)) @mock.patch( "cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper." + "get_lun_by_name", mock.Mock(return_value={'lun_id': 1})) def test_retype_compressed_to_thin_cross_array(self): """Unit test for retype cross array.""" diff_data = {'encryption': {}, 'qos_specs': {}, 'extra_specs': {'storagetype:provsioning': ('compressed', 'thin')}} new_type_data = {'name': 'voltype0', 'qos_specs_id': None, 'deleted': False, 'extra_specs': {'storagetype:provisioning': 'thin', 'storagetype:pool': 'unit_test_pool'}, 'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'} host_test_data = {'host': 'ubuntu-server12@pool_backend_2', 'capabilities': {'location_info': 'unit_test_pool|FNM00124500891', 'volume_backend_name': 'pool_backend_2', 'storage_protocol': 'iSCSI'}} commands = [self.testData.NDU_LIST_CMD, ('snap', '-list', '-res', 1)] results = [self.testData.NDU_LIST_RESULT, ('No snap', 1023)] self.driverSetup(commands, results) self.driver.cli.enablers = ['-Compression', '-Deduplication', '-ThinProvisioning', '-FAST'] CommandLineHelper.get_array_serial = mock.Mock( return_value={'array_serial': "FNM00124500890"}) extra_specs = {'storagetype:provisioning': 'thin', 'storagetype:pool': 'unit_test_pool'} volume_types.get_volume_type_extra_specs = \ mock.Mock(return_value=extra_specs) retyped = self.driver.retype(None, self.testData.test_volume3, new_type_data, diff_data, host_test_data) self.assertFalse(retyped, "Retype should failed due to" " different protocol or array") @mock.patch( "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id", mock.Mock(return_value=1)) @mock.patch( "cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper." + "get_lun_by_name", mock.Mock(return_value={'lun_id': 1})) @mock.patch( "eventlet.event.Event.wait", mock.Mock(return_value=None)) @mock.patch( "time.time", mock.Mock(return_value=123456)) def test_retype_thin_auto_to_dedup_diff_procotol(self): """Unit test for retype different procotol.""" diff_data = {'encryption': {}, 'qos_specs': {}, 'extra_specs': {'storagetype:provsioning': ('thin', 'deduplicated'), 'storagetype:tiering': ('auto', None)}} new_type_data = {'name': 'voltype0', 'qos_specs_id': None, 'deleted': False, 'extra_specs': {'storagetype:provisioning': 'deduplicated', 'storagetype:pool': 'unit_test_pool'}, 'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'} host_test_data = {'host': 'ubuntu-server12@pool_backend_2', 'capabilities': {'location_info': 'unit_test_pool|FNM00124500890', 'volume_backend_name': 'pool_backend_2', 'storage_protocol': 'FC'}} commands = [self.testData.NDU_LIST_CMD, ('snap', '-list', '-res', 1)] results = [self.testData.NDU_LIST_RESULT, ('No snap', 1023)] fake_cli = self.driverSetup(commands, results) self.driver.cli.enablers = ['-Compression', '-Deduplication', '-ThinProvisioning', '-FAST'] CommandLineHelper.get_array_serial = mock.Mock( return_value={'array_serial': "FNM00124500890"}) extra_specs = {'storagetype:provisioning': 'thin', 'storagetype:tiering': 'auto', 'storagetype:pool': 'unit_test_pool'} volume_types.get_volume_type_extra_specs = \ mock.Mock(return_value=extra_specs) self.driver.retype(None, self.testData.test_volume3, new_type_data, diff_data, host_test_data) expect_cmd = [ mock.call('snap', '-list', '-res', 1), mock.call(*self.testData.LUN_CREATION_CMD( 'vol3-123456', 2, 'unit_test_pool', 'deduplicated', None)), mock.call(*self.testData.MIGRATION_CMD(), retry_disable=True)] fake_cli.assert_has_calls(expect_cmd) @mock.patch( "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id", mock.Mock(return_value=1)) @mock.patch( "cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper." + "get_lun_by_name", mock.Mock(return_value={'lun_id': 1})) def test_retype_thin_auto_has_snap_to_thick_highestavailable(self): """Unit test for retype volume has snap when need migration.""" diff_data = {'encryption': {}, 'qos_specs': {}, 'extra_specs': {'storagetype:provsioning': ('thin', None), 'storagetype:tiering': ('auto', 'highestAvailable')}} new_type_data = {'name': 'voltype0', 'qos_specs_id': None, 'deleted': False, 'extra_specs': {'storagetype:tiering': 'highestAvailable', 'storagetype:pool': 'unit_test_pool'}, 'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'} host_test_data = {'host': 'ubuntu-server12@pool_backend_1', 'capabilities': {'location_info': 'unit_test_pool|FNM00124500890', 'volume_backend_name': 'pool_backend_1', 'storage_protocol': 'iSCSI'}} commands = [self.testData.NDU_LIST_CMD, ('snap', '-list', '-res', 1)] results = [self.testData.NDU_LIST_RESULT, ('Has snap', 0)] self.driverSetup(commands, results) self.driver.cli.enablers = ['-Compression', '-Deduplication', '-ThinProvisioning', '-FAST'] CommandLineHelper.get_array_serial = mock.Mock( return_value={'array_serial': "FNM00124500890"}) extra_specs = {'storagetype:provisioning': 'thin', 'storagetype:tiering': 'auto', 'storagetype:pool': 'unit_test_pool'} volume_types.get_volume_type_extra_specs = \ mock.Mock(return_value=extra_specs) retyped = self.driver.retype(None, self.testData.test_volume3, new_type_data, diff_data, host_test_data) self.assertFalse(retyped, "Retype should failed due to" " different protocol or array") @mock.patch( "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id", mock.Mock(return_value=1)) @mock.patch( "cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper." + "get_lun_by_name", mock.Mock(return_value={'lun_id': 1})) def test_retype_thin_auto_to_thin_auto(self): """Unit test for retype volume which has no change.""" diff_data = {'encryption': {}, 'qos_specs': {}, 'extra_specs': {}} new_type_data = {'name': 'voltype0', 'qos_specs_id': None, 'deleted': False, 'extra_specs': {'storagetype:tiering': 'auto', 'storagetype:provisioning': 'thin'}, 'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'} host_test_data = {'host': 'ubuntu-server12@pool_backend_1', 'capabilities': {'location_info': 'unit_test_pool|FNM00124500890', 'volume_backend_name': 'pool_backend_1', 'storage_protocol': 'iSCSI'}} commands = [self.testData.NDU_LIST_CMD] results = [self.testData.NDU_LIST_RESULT] self.driverSetup(commands, results) self.driver.cli.enablers = ['-Compression', '-Deduplication', '-ThinProvisioning', '-FAST'] CommandLineHelper.get_array_serial = mock.Mock( return_value={'array_serial': "FNM00124500890"}) extra_specs = {'storagetype:provisioning': 'thin', 'storagetype:tiering': 'auto', 'storagetype:pool': 'unit_test_pool'} volume_types.get_volume_type_extra_specs = \ mock.Mock(return_value=extra_specs) self.driver.retype(None, self.testData.test_volume3, new_type_data, diff_data, host_test_data) def test_create_volume_with_fastcache(self): '''enable fastcache when creating volume.''' extra_specs = {'fast_cache_enabled': 'True'} volume_types.get_volume_type_extra_specs = \ mock.Mock(return_value=extra_specs) commands = [self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'), self.testData.NDU_LIST_CMD, self.testData.CHECK_FASTCACHE_CMD( self.testData.test_pool_name)] results = [self.testData.LUN_PROPERTY('vol_with_type', True), SUCCEED, ('FAST Cache: Enabled', 0)] fake_cli = self.driverSetup(commands, results) lun_info = {'lun_name': "vol_with_type", 'lun_id': 1, 'pool': "unit_test_pool", 'attached_snapshot': "N/A", 'owner': "A", 'total_capacity_gb': 1.0, 'state': "Ready", 'status': 'OK(0x0)', 'operation': 'None' } self.configuration.storage_vnx_pool_name = \ self.testData.test_pool_name self.driver = EMCCLIISCSIDriver(configuration=self.configuration) assert isinstance(self.driver.cli, emc_vnx_cli.EMCVnxCliPool) cli_helper = self.driver.cli._client cli_helper.command_execute = fake_cli cli_helper.get_lun_by_name = mock.Mock(return_value=lun_info) cli_helper.get_enablers_on_array = mock.Mock(return_value="-FASTCache") self.driver.update_volume_stats() self.driver.create_volume(self.testData.test_volume_with_type) self.assertEqual(self.driver.cli.stats['fast_cache_enabled'], 'True') expect_cmd = [ mock.call('storagepool', '-list', '-name', 'Pool_02_SASFLASH', '-userCap', '-availableCap'), mock.call('-np', 'storagepool', '-list', '-name', 'Pool_02_SASFLASH', '-fastcache'), mock.call('lun', '-create', '-capacity', 1, '-sq', 'gb', '-poolName', 'Pool_02_SASFLASH', '-name', 'vol_with_type', '-type', 'NonThin') ] fake_cli.assert_has_calls(expect_cmd) def test_get_lun_id_provider_location_exists(self): '''test function get_lun_id.''' self.driverSetup() volume_01 = { 'name': 'vol_01', 'size': 1, 'volume_name': 'vol_01', 'id': '1', 'name_id': '1', 'provider_location': 'system^FNM11111|type^lun|lun_id^1', 'project_id': 'project', 'display_name': 'vol_01', 'display_description': 'test volume', 'volume_type_id': None, 'volume_admin_metadata': [{'key': 'readonly', 'value': 'True'}]} self.assertEqual(self.driver.cli.get_lun_id(volume_01), 1) @mock.patch( "cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper." + "get_lun_by_name", mock.Mock(return_value={'lun_id': 2})) def test_get_lun_id_provider_location_has_no_lun_id(self): '''test function get_lun_id.''' self.driverSetup() volume_02 = { 'name': 'vol_02', 'size': 1, 'volume_name': 'vol_02', 'id': '2', 'provider_location': 'system^FNM11111|type^lun|', 'project_id': 'project', 'display_name': 'vol_02', 'display_description': 'test volume', 'volume_type_id': None, 'volume_admin_metadata': [{'key': 'readonly', 'value': 'True'}]} self.assertEqual(self.driver.cli.get_lun_id(volume_02), 2) def test_create_consistency_group(self): cg_name = self.testData.test_cg['id'] commands = [self.testData.CREATE_CONSISTENCYGROUP_CMD(cg_name)] results = [SUCCEED] fake_cli = self.driverSetup(commands, results) model_update = self.driver.create_consistencygroup( None, self.testData.test_cg) self.assertDictMatch({'status': 'available'}, model_update) expect_cmd = [ mock.call( *self.testData.CREATE_CONSISTENCYGROUP_CMD( cg_name))] fake_cli.assert_has_calls(expect_cmd) def test_delete_consistency_group(self): cg_name = self.testData.test_cg['id'] commands = [self.testData.DELETE_CONSISTENCYGROUP_CMD(cg_name), self.testData.LUN_DELETE_CMD('vol1')] results = [SUCCEED, SUCCEED] fake_cli = self.driverSetup(commands, results) self.driver.db = mock.MagicMock() self.driver.db.volume_get_all_by_group.return_value =\ self.testData.CONSISTENCY_GROUP_VOLUMES() self.driver.delete_consistencygroup(None, self.testData.test_cg) expect_cmd = [ mock.call( *self.testData.DELETE_CONSISTENCYGROUP_CMD( cg_name)), mock.call( *self.testData.LUN_DELETE_CMD('vol1')), mock.call( *self.testData.LUN_DELETE_CMD('vol1'))] fake_cli.assert_has_calls(expect_cmd) def test_create_cgsnapshot(self): cgsnapshot = self.testData.test_cgsnapshot['id'] cg_name = self.testData.test_cgsnapshot['consistencygroup_id'] commands = [self.testData.CREATE_CG_SNAPSHOT(cg_name, cgsnapshot)] results = [SUCCEED] fake_cli = self.driverSetup(commands, results) self.driver.db = mock.MagicMock() self.driver.db.volume_get_all_by_group.return_value =\ self.testData.SNAPS_IN_SNAP_GROUP() self.driver.create_cgsnapshot(None, self.testData.test_cgsnapshot) expect_cmd = [ mock.call( *self.testData.CREATE_CG_SNAPSHOT( cg_name, cgsnapshot))] fake_cli.assert_has_calls(expect_cmd) def test_delete_cgsnapshot(self): snap_name = self.testData.test_cgsnapshot['id'] commands = [self.testData.DELETE_CG_SNAPSHOT(snap_name)] results = [SUCCEED] fake_cli = self.driverSetup(commands, results) self.driver.db = mock.MagicMock() self.driver.db.snapshot_get_all_for_cgsnapshot.return_value =\ self.testData.SNAPS_IN_SNAP_GROUP() self.driver.delete_cgsnapshot(None, self.testData.test_cgsnapshot) expect_cmd = [ mock.call( *self.testData.DELETE_CG_SNAPSHOT( snap_name))] fake_cli.assert_has_calls(expect_cmd) @mock.patch( "eventlet.event.Event.wait", mock.Mock(return_value=None)) def test_add_volume_to_cg(self): commands = [self.testData.LUN_PROPERTY_ALL_CMD('vol1'), self.testData.ADD_LUN_TO_CG_CMD('cg_id', 1), self.testData.GET_CG_BY_NAME_CMD('cg_id') ] results = [self.testData.LUN_PROPERTY('vol1', True), SUCCEED, self.testData.CG_PROPERTY('cg_id')] fake_cli = self.driverSetup(commands, results) self.driver.create_volume(self.testData.test_volume_cg) expect_cmd = [ mock.call(*self.testData.LUN_CREATION_CMD( 'vol1', 1, 'unit_test_pool', None, None)), mock.call('lun', '-list', '-name', 'vol1', '-state', '-status', '-opDetails', '-userCap', '-owner', '-attachedSnapshot'), mock.call(*self.testData.ADD_LUN_TO_CG_CMD( 'cg_id', 1))] fake_cli.assert_has_calls(expect_cmd) def test_create_cloned_volume_from_consistnecy_group(self): cmd_smp = ('lun', '-list', '-name', 'vol1', '-attachedSnapshot') output_smp = ("""LOGICAL UNIT NUMBER 1 Name: vol1 Attached Snapshot: N/A""", 0) cmd_dest = self.testData.LUN_PROPERTY_ALL_CMD("vol1_dest") output_dest = self.testData.LUN_PROPERTY("vol1_dest") cmd_migrate = self.testData.MIGRATION_CMD(1, 1) output_migrate = ("", 0) cmd_migrate_verify = self.testData.MIGRATION_VERIFY_CMD(1) output_migrate_verify = (r'The specified source LUN ' 'is not currently migrating', 23) cg_name = self.testData.test_cgsnapshot['consistencygroup_id'] commands = [cmd_smp, cmd_dest, cmd_migrate, cmd_migrate_verify] results = [output_smp, output_dest, output_migrate, output_migrate_verify] fake_cli = self.driverSetup(commands, results) self.driver.create_cloned_volume(self.testData.test_volume_clone_cg, self.testData.test_clone_cg) tmp_cgsnapshot = 'tmp-cgsnapshot-' + self.testData.test_volume['id'] expect_cmd = [ mock.call( *self.testData.CREATE_CG_SNAPSHOT(cg_name, tmp_cgsnapshot)), mock.call(*self.testData.SNAP_MP_CREATE_CMD(name='vol1', source='clone1')), mock.call( *self.testData.SNAP_ATTACH_CMD( name='vol1', snapName=tmp_cgsnapshot)), mock.call(*self.testData.LUN_CREATION_CMD( 'vol1_dest', 1, 'unit_test_pool', None, None)), mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1_dest')), mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1_dest')), mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1')), mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1_dest')), mock.call(*self.testData.MIGRATION_CMD(1, 1), retry_disable=True), mock.call(*self.testData.MIGRATION_VERIFY_CMD(1)), mock.call('lun', '-list', '-name', 'vol1', '-attachedSnapshot'), mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1')), mock.call(*self.testData.DELETE_CG_SNAPSHOT(tmp_cgsnapshot))] fake_cli.assert_has_calls(expect_cmd) def test_create_volume_from_cgsnapshot(self): cmd_smp = ('lun', '-list', '-name', 'vol2', '-attachedSnapshot') output_smp = ("""LOGICAL UNIT NUMBER 1 Name: vol2 Attached Snapshot: N/A""", 0) cmd_dest = self.testData.LUN_PROPERTY_ALL_CMD("vol2_dest") output_dest = self.testData.LUN_PROPERTY("vol2_dest") cmd_migrate = self.testData.MIGRATION_CMD(1, 1) output_migrate = ("", 0) cmd_migrate_verify = self.testData.MIGRATION_VERIFY_CMD(1) output_migrate_verify = (r'The specified source LUN ' 'is not currently migrating', 23) commands = [cmd_smp, cmd_dest, cmd_migrate, cmd_migrate_verify] results = [output_smp, output_dest, output_migrate, output_migrate_verify] fake_cli = self.driverSetup(commands, results) self.driver.create_volume_from_snapshot( self.testData.volume_in_cg, self.testData.test_member_cgsnapshot) expect_cmd = [ mock.call( *self.testData.SNAP_MP_CREATE_CMD( name='vol2', source='vol1')), mock.call( *self.testData.SNAP_ATTACH_CMD( name='vol2', snapName='cgsnapshot_id')), mock.call(*self.testData.LUN_CREATION_CMD( 'vol2_dest', 1, 'unit_test_pool', None, None)), mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest')), mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest')), mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2')), mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest')), mock.call(*self.testData.MIGRATION_CMD(1, 1), retry_disable=True), mock.call(*self.testData.MIGRATION_VERIFY_CMD(1)), mock.call('lun', '-list', '-name', 'vol2', '-attachedSnapshot'), mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2'))] fake_cli.assert_has_calls(expect_cmd) def succeed_fake_command_execute(self, *command, **kwargv): return SUCCEED def fake_get_pool_properties(self, filter_option, properties=None): pool_info = {'pool_name': "unit_test_pool0", 'total_capacity_gb': 1000.0, 'free_capacity_gb': 1000.0 } return pool_info def fake_get_lun_properties(self, filter_option, properties=None): lun_info = {'lun_name': "vol1", 'lun_id': 1, 'pool': "unit_test_pool", 'attached_snapshot': "N/A", 'owner': "A", 'total_capacity_gb': 1.0, 'state': "Ready"} return lun_info def fake_safe_get(self, value): if value == "storage_vnx_pool_name": return "unit_test_pool" elif 'volume_backend_name' == value: return "namedbackend" else: return None class EMCVNXCLIDriverFCTestCase(test.TestCase): def setUp(self): super(EMCVNXCLIDriverFCTestCase, self).setUp() self.stubs.Set(CommandLineHelper, 'command_execute', self.succeed_fake_command_execute) self.stubs.Set(CommandLineHelper, 'get_array_serial', mock.Mock(return_value={'array_serial': "fakeSerial"})) self.stubs.Set(os.path, 'exists', mock.Mock(return_value=1)) self.stubs.Set(emc_vnx_cli, 'INTERVAL_5_SEC', 0.01) self.stubs.Set(emc_vnx_cli, 'INTERVAL_30_SEC', 0.01) self.stubs.Set(emc_vnx_cli, 'INTERVAL_60_SEC', 0.01) self.configuration = conf.Configuration(None) self.configuration.append_config_values = mock.Mock(return_value=0) self.configuration.naviseccli_path = '/opt/Navisphere/bin/naviseccli' self.configuration.san_ip = '10.0.0.1' self.configuration.storage_vnx_pool_name = 'unit_test_pool' self.configuration.san_login = 'sysadmin' self.configuration.san_password = 'sysadmin' #set the timeout to 0.012s = 0.0002 * 60 = 1.2ms self.configuration.default_timeout = 0.0002 self.configuration.initiator_auto_registration = True self.configuration.zoning_mode = None self.stubs.Set(self.configuration, 'safe_get', self.fake_safe_get) self.testData = EMCVNXCLIDriverTestData() self.navisecclicmd = '/opt/Navisphere/bin/naviseccli ' + \ '-address 10.0.0.1 -user sysadmin -password sysadmin -scope 0 ' def tearDown(self): super(EMCVNXCLIDriverFCTestCase, self).tearDown() def driverSetup(self, commands=tuple(), results=tuple()): self.driver = EMCCLIFCDriver(configuration=self.configuration) fake_command_execute = self.get_command_execute_simulator( commands, results) fake_cli = mock.Mock(side_effect=fake_command_execute) self.driver.cli._client.command_execute = fake_cli return fake_cli def get_command_execute_simulator(self, commands=tuple(), results=tuple()): assert(len(commands) == len(results)) def fake_command_execute(*args, **kwargv): for i in range(len(commands)): if args == commands[i]: if isinstance(results[i], list): if len(results[i]) > 0: ret = results[i][0] del results[i][0] return ret else: return results[i] return self.standard_fake_command_execute(*args, **kwargv) return fake_command_execute def standard_fake_command_execute(self, *args, **kwargv): standard_commands = [ self.testData.LUN_PROPERTY_ALL_CMD('vol1'), self.testData.LUN_PROPERTY_ALL_CMD('vol2'), self.testData.LUN_PROPERTY_ALL_CMD('vol-vol1'), self.testData.LUN_PROPERTY_ALL_CMD('snapshot1'), self.testData.POOL_PROPERTY_CMD] standard_results = [ self.testData.LUN_PROPERTY('vol1'), self.testData.LUN_PROPERTY('vol2'), self.testData.LUN_PROPERTY('vol-vol1'), self.testData.LUN_PROPERTY('snapshot1'), self.testData.POOL_PROPERTY] standard_default = SUCCEED for i in range(len(standard_commands)): if args == standard_commands[i]: return standard_results[i] return standard_default def succeed_fake_command_execute(self, *command, **kwargv): return SUCCEED def fake_get_pool_properties(self, filter_option, properties=None): pool_info = {'pool_name': "unit_test_pool0", 'total_capacity_gb': 1000.0, 'free_capacity_gb': 1000.0 } return pool_info def fake_get_lun_properties(self, filter_option, properties=None): lun_info = {'lun_name': "vol1", 'lun_id': 1, 'pool': "unit_test_pool", 'attached_snapshot': "N/A", 'owner': "A", 'total_capacity_gb': 1.0, 'state': "Ready"} return lun_info def fake_safe_get(self, value): if value == "storage_vnx_pool_name": return "unit_test_pool" elif 'volume_backend_name' == value: return "namedbackend" else: return None @mock.patch( "oslo_concurrency.processutils.execute", mock.Mock( return_value=( "fakeportal iqn.1992-04.fake.com:fake.apm00123907237.a8", 0))) @mock.patch("random.shuffle", mock.Mock()) def test_initialize_connection_fc_auto_reg(self): # Test for auto registration self.configuration.initiator_auto_registration = True commands = [('storagegroup', '-list', '-gname', 'fakehost'), ('storagegroup', '-list'), self.testData.GETFCPORT_CMD(), ('port', '-list', '-gname', 'fakehost')] results = [[("No group", 83), self.testData.STORAGE_GROUP_NO_MAP('fakehost'), self.testData.STORAGE_GROUP_HAS_MAP('fakehost')], self.testData.STORAGE_GROUP_HAS_MAP('fakehost'), self.testData.FC_PORTS, self.testData.FAKEHOST_PORTS] fake_cli = self.driverSetup(commands, results) data = self.driver.initialize_connection( self.testData.test_volume, self.testData.connector) self.assertEqual(data['data']['access_mode'], 'ro') expected = [ mock.call('storagegroup', '-list', '-gname', 'fakehost'), mock.call('storagegroup', '-create', '-gname', 'fakehost'), mock.call('storagegroup', '-list'), mock.call('port', '-list', '-sp'), mock.call('storagegroup', '-gname', 'fakehost', '-setpath', '-hbauid', '22:34:56:78:90:12:34:56:12:34:56:78:90:12:34:56', '-sp', 'A', '-spport', '0', '-ip', '10.0.0.2', '-host', 'fakehost', '-o'), mock.call('port', '-list', '-sp'), mock.call('storagegroup', '-gname', 'fakehost', '-setpath', '-hbauid', '22:34:56:78:90:54:32:16:12:34:56:78:90:54:32:16', '-sp', 'A', '-spport', '0', '-ip', '10.0.0.2', '-host', 'fakehost', '-o'), mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1')), mock.call('storagegroup', '-list', '-gname', 'fakehost'), mock.call('storagegroup', '-addhlu', '-hlu', 1, '-alu', 1, '-gname', 'fakehost'), mock.call('port', '-list', '-gname', 'fakehost'), mock.call('storagegroup', '-list', '-gname', 'fakehost'), mock.call('port', '-list', '-sp')] fake_cli.assert_has_calls(expected) # Test for manaul registration self.configuration.initiator_auto_registration = False commands = [('storagegroup', '-list', '-gname', 'fakehost'), ('storagegroup', '-list'), self.testData.CONNECTHOST_CMD('fakehost', 'fakehost'), self.testData.GETFCPORT_CMD(), ('port', '-list', '-gname', 'fakehost')] results = [[("No group", 83), self.testData.STORAGE_GROUP_NO_MAP('fakehost'), self.testData.STORAGE_GROUP_HAS_MAP('fakehost')], self.testData.STORAGE_GROUP_HAS_MAP('fakehost'), ('', 0), self.testData.FC_PORTS, self.testData.FAKEHOST_PORTS] fake_cli = self.driverSetup(commands, results) data = self.driver.initialize_connection( self.testData.test_volume_rw, self.testData.connector) self.assertEqual(data['data']['access_mode'], 'rw') expected = [mock.call('storagegroup', '-list', '-gname', 'fakehost'), mock.call('storagegroup', '-create', '-gname', 'fakehost'), mock.call('storagegroup', '-connecthost', '-host', 'fakehost', '-gname', 'fakehost', '-o'), mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1')), mock.call('storagegroup', '-list', '-gname', 'fakehost'), mock.call('storagegroup', '-addhlu', '-hlu', 1, '-alu', 1, '-gname', 'fakehost'), mock.call('port', '-list', '-gname', 'fakehost'), mock.call('storagegroup', '-list', '-gname', 'fakehost'), mock.call('port', '-list', '-sp')] fake_cli.assert_has_calls(expected) @mock.patch( "cinder.zonemanager.fc_san_lookup_service.FCSanLookupService." + "get_device_mapping_from_network", mock.Mock(return_value=EMCVNXCLIDriverTestData.device_map)) @mock.patch("random.shuffle", mock.Mock()) def test_initialize_connection_fc_auto_zoning(self): # Test for auto zoning self.configuration.zoning_mode = 'fabric' self.configuration.initiator_auto_registration = False commands = [('storagegroup', '-list', '-gname', 'fakehost'), ('storagegroup', '-list'), self.testData.CONNECTHOST_CMD('fakehost', 'fakehost'), self.testData.GETFCPORT_CMD(), ('port', '-list', '-gname', 'fakehost')] results = [[("No group", 83), self.testData.STORAGE_GROUP_NO_MAP('fakehost'), self.testData.STORAGE_GROUP_HAS_MAP('fakehost')], self.testData.STORAGE_GROUP_HAS_MAP('fakehost'), ('', 0), self.testData.FC_PORTS, self.testData.FAKEHOST_PORTS] fake_cli = self.driverSetup(commands, results) self.driver.cli.zonemanager_lookup_service = FCSanLookupService( configuration=self.configuration) conn_info = self.driver.initialize_connection( self.testData.test_volume, self.testData.connector) self.assertEqual(conn_info['data']['initiator_target_map'], EMCVNXCLIDriverTestData.i_t_map) self.assertEqual(conn_info['data']['target_wwn'], ['1122334455667777']) expected = [mock.call('storagegroup', '-list', '-gname', 'fakehost'), mock.call('storagegroup', '-create', '-gname', 'fakehost'), mock.call('storagegroup', '-connecthost', '-host', 'fakehost', '-gname', 'fakehost', '-o'), mock.call('lun', '-list', '-name', 'vol1', '-state', '-status', '-opDetails', '-userCap', '-owner', '-attachedSnapshot'), mock.call('storagegroup', '-list', '-gname', 'fakehost'), mock.call('storagegroup', '-addhlu', '-hlu', 1, '-alu', 1, '-gname', 'fakehost'), mock.call('port', '-list', '-gname', 'fakehost'), mock.call('storagegroup', '-list', '-gname', 'fakehost'), mock.call('port', '-list', '-sp')] fake_cli.assert_has_calls(expected) @mock.patch( "cinder.zonemanager.fc_san_lookup_service.FCSanLookupService." + "get_device_mapping_from_network", mock.Mock(return_value=EMCVNXCLIDriverTestData.device_map)) def test_terminate_connection_remove_zone_false(self): self.driver = EMCCLIFCDriver(configuration=self.configuration) cli_helper = self.driver.cli._client data = {'storage_group_name': "fakehost", 'storage_group_uid': "2F:D4:00:00:00:00:00:" "00:00:00:FF:E5:3A:03:FD:6D", 'lunmap': {1: 16, 2: 88, 3: 47}} cli_helper.get_storage_group = mock.Mock( return_value=data) lun_info = {'lun_name': "unit_test_lun", 'lun_id': 1, 'pool': "unit_test_pool", 'attached_snapshot': "N/A", 'owner': "A", 'total_capacity_gb': 1.0, 'state': "Ready"} cli_helper.get_lun_by_name = mock.Mock(return_value=lun_info) cli_helper.remove_hlu_from_storagegroup = mock.Mock() self.driver.cli.zonemanager_lookup_service = FCSanLookupService( configuration=self.configuration) connection_info = self.driver.terminate_connection( self.testData.test_volume, self.testData.connector) self.assertFalse('initiator_target_map' in connection_info['data'], 'initiator_target_map should not appear.') cli_helper.remove_hlu_from_storagegroup.assert_called_once_with( 16, self.testData.connector["host"]) @mock.patch( "cinder.zonemanager.fc_san_lookup_service.FCSanLookupService." + "get_device_mapping_from_network", mock.Mock(return_value=EMCVNXCLIDriverTestData.device_map)) def test_terminate_connection_remove_zone_true(self): self.driver = EMCCLIFCDriver(configuration=self.configuration) cli_helper = self.driver.cli._client data = {'storage_group_name': "fakehost", 'storage_group_uid': "2F:D4:00:00:00:00:00:" "00:00:00:FF:E5:3A:03:FD:6D", 'lunmap': {}} cli_helper.get_storage_group = mock.Mock( return_value=data) lun_info = {'lun_name': "unit_test_lun", 'lun_id': 1, 'pool': "unit_test_pool", 'attached_snapshot': "N/A", 'owner': "A", 'total_capacity_gb': 1.0, 'state': "Ready"} cli_helper.get_lun_by_name = mock.Mock(return_value=lun_info) cli_helper.remove_hlu_from_storagegroup = mock.Mock() self.driver.cli.zonemanager_lookup_service = FCSanLookupService( configuration=self.configuration) connection_info = self.driver.terminate_connection( self.testData.test_volume, self.testData.connector) self.assertTrue('initiator_target_map' in connection_info['data'], 'initiator_target_map should be populated.') self.assertEqual(connection_info['data']['initiator_target_map'], EMCVNXCLIDriverTestData.i_t_map) def test_get_volume_stats(self): #expect_result = [POOL_PROPERTY] self.driverSetup() stats = self.driver.get_volume_stats(True) self.assertTrue(stats['driver_version'] is not None, "dirver_version is not returned") self.assertTrue( stats['free_capacity_gb'] == 1000.6, "free_capacity_gb is not correct") self.assertTrue( stats['reserved_percentage'] == 0, "reserved_percentage is not correct") self.assertTrue( stats['storage_protocol'] == 'FC', "storage_protocol is not correct") self.assertTrue( stats['total_capacity_gb'] == 10000.5, "total_capacity_gb is not correct") self.assertTrue( stats['vendor_name'] == "EMC", "vender name is not correct") self.assertTrue( stats['volume_backend_name'] == "namedbackend", "volume backend name is not correct") self.assertTrue(stats['location_info'] == "unit_test_pool|fakeSerial") self.assertTrue( stats['driver_version'] == "04.01.00", "driver version is incorrect.") class EMCVNXCLIToggleSPTestData(): def FAKE_COMMAND_PREFIX(self, sp_address): return ('/opt/Navisphere/bin/naviseccli', '-address', sp_address, '-user', 'sysadmin', '-password', 'sysadmin', '-scope', 'global') class EMCVNXCLIToggleSPTestCase(test.TestCase): def setUp(self): super(EMCVNXCLIToggleSPTestCase, self).setUp() self.stubs.Set(os.path, 'exists', mock.Mock(return_value=1)) self.configuration = mock.Mock(conf.Configuration) self.configuration.naviseccli_path = '/opt/Navisphere/bin/naviseccli' self.configuration.san_ip = '10.10.10.10' self.configuration.san_secondary_ip = "10.10.10.11" self.configuration.storage_vnx_pool_name = 'unit_test_pool' self.configuration.san_login = 'sysadmin' self.configuration.san_password = 'sysadmin' self.configuration.default_timeout = 1 self.configuration.max_luns_per_storage_group = 10 self.configuration.destroy_empty_storage_group = 10 self.configuration.storage_vnx_authentication_type = "global" self.configuration.iscsi_initiators = '{"fakehost": ["10.0.0.2"]}' self.configuration.zoning_mode = None self.configuration.storage_vnx_security_file_dir = "" self.cli_client = emc_vnx_cli.CommandLineHelper( configuration=self.configuration) self.test_data = EMCVNXCLIToggleSPTestData() def tearDown(self): super(EMCVNXCLIToggleSPTestCase, self).tearDown() def test_no_sp_toggle(self): self.cli_client.active_storage_ip = '10.10.10.10' FAKE_SUCCESS_RETURN = ('success', 0) FAKE_COMMAND = ('list', 'pool') SIDE_EFFECTS = [FAKE_SUCCESS_RETURN, FAKE_SUCCESS_RETURN] with mock.patch('cinder.utils.execute') as mock_utils: mock_utils.side_effect = SIDE_EFFECTS self.cli_client.command_execute(*FAKE_COMMAND) self.assertEqual(self.cli_client.active_storage_ip, "10.10.10.10") expected = [mock.call(*('ping', '-c', 1, '10.10.10.10'), check_exit_code=True), mock.call( *(self.test_data.FAKE_COMMAND_PREFIX('10.10.10.10') + FAKE_COMMAND), check_exit_code=True)] mock_utils.assert_has_calls(expected) def test_toggle_sp_with_server_unavailabe(self): self.cli_client.active_storage_ip = '10.10.10.10' FAKE_ERROR_MSG = """\ Error occurred during HTTP request/response from the target: '10.244.213.142'. Message : HTTP/1.1 503 Service Unavailable""" FAKE_SUCCESS_RETURN = ('success', 0) FAKE_COMMAND = ('list', 'pool') SIDE_EFFECTS = [FAKE_SUCCESS_RETURN, processutils.ProcessExecutionError( exit_code=255, stdout=FAKE_ERROR_MSG), FAKE_SUCCESS_RETURN] with mock.patch('cinder.utils.execute') as mock_utils: mock_utils.side_effect = SIDE_EFFECTS self.cli_client.command_execute(*FAKE_COMMAND) self.assertEqual(self.cli_client.active_storage_ip, "10.10.10.11") expected = [ mock.call( *(self.test_data.FAKE_COMMAND_PREFIX('10.10.10.10') + FAKE_COMMAND), check_exit_code=True), mock.call( *(self.test_data.FAKE_COMMAND_PREFIX('10.10.10.11') + FAKE_COMMAND), check_exit_code=True)] mock_utils.assert_has_calls(expected) def test_toggle_sp_with_end_of_data(self): self.cli_client.active_storage_ip = '10.10.10.10' FAKE_ERROR_MSG = """\ Error occurred during HTTP request/response from the target: '10.244.213.142'. Message : End of data stream""" FAKE_SUCCESS_RETURN = ('success', 0) FAKE_COMMAND = ('list', 'pool') SIDE_EFFECTS = [FAKE_SUCCESS_RETURN, processutils.ProcessExecutionError( exit_code=255, stdout=FAKE_ERROR_MSG), FAKE_SUCCESS_RETURN] with mock.patch('cinder.utils.execute') as mock_utils: mock_utils.side_effect = SIDE_EFFECTS self.cli_client.command_execute(*FAKE_COMMAND) self.assertEqual(self.cli_client.active_storage_ip, "10.10.10.11") expected = [ mock.call( *(self.test_data.FAKE_COMMAND_PREFIX('10.10.10.10') + FAKE_COMMAND), check_exit_code=True), mock.call( *(self.test_data.FAKE_COMMAND_PREFIX('10.10.10.11') + FAKE_COMMAND), check_exit_code=True)] mock_utils.assert_has_calls(expected) def test_toggle_sp_with_connection_refused(self): self.cli_client.active_storage_ip = '10.10.10.10' FAKE_ERROR_MSG = """\ A network error occurred while trying to connect: '10.244.213.142'. Message : Error occurred because connection refused. \ Unable to establish a secure connection to the Management Server. """ FAKE_SUCCESS_RETURN = ('success', 0) FAKE_COMMAND = ('list', 'pool') SIDE_EFFECTS = [FAKE_SUCCESS_RETURN, processutils.ProcessExecutionError( exit_code=255, stdout=FAKE_ERROR_MSG), FAKE_SUCCESS_RETURN] with mock.patch('cinder.utils.execute') as mock_utils: mock_utils.side_effect = SIDE_EFFECTS self.cli_client.command_execute(*FAKE_COMMAND) self.assertEqual(self.cli_client.active_storage_ip, "10.10.10.11") expected = [ mock.call( *(self.test_data.FAKE_COMMAND_PREFIX('10.10.10.10') + FAKE_COMMAND), check_exit_code=True), mock.call( *(self.test_data.FAKE_COMMAND_PREFIX('10.10.10.11') + FAKE_COMMAND), check_exit_code=True)] mock_utils.assert_has_calls(expected)
hguemar/cinder
cinder/tests/test_emc_vnxdirect.py
Python
apache-2.0
125,902
#!/usr/bin/env python # -*- coding: utf-8 -*- """Tests for the file system implementation using pyfsapfs.""" import unittest from dfvfs.lib import definitions from dfvfs.path import factory as path_spec_factory from dfvfs.resolver import context from dfvfs.vfs import apfs_file_system from tests import test_lib as shared_test_lib class APFSFileSystemTest(shared_test_lib.BaseTestCase): """Tests the APFS file entry.""" _IDENTIFIER_PASSWORDS_TXT = 20 def setUp(self): """Sets up the needed objects used throughout the test.""" self._resolver_context = context.Context() test_path = self._GetTestFilePath(['apfs.raw']) self._SkipIfPathNotExists(test_path) test_os_path_spec = path_spec_factory.Factory.NewPathSpec( definitions.TYPE_INDICATOR_OS, location=test_path) test_raw_path_spec = path_spec_factory.Factory.NewPathSpec( definitions.TYPE_INDICATOR_RAW, parent=test_os_path_spec) self._apfs_container_path_spec = path_spec_factory.Factory.NewPathSpec( definitions.TYPE_INDICATOR_APFS_CONTAINER, location='/apfs1', parent=test_raw_path_spec) self._apfs_path_spec = path_spec_factory.Factory.NewPathSpec( definitions.TYPE_INDICATOR_APFS, location='/', parent=self._apfs_container_path_spec) def tearDown(self): """Cleans up the needed objects used throughout the test.""" self._resolver_context.Empty() def testOpenAndClose(self): """Test the open and close functionality.""" file_system = apfs_file_system.APFSFileSystem( self._resolver_context, self._apfs_path_spec) self.assertIsNotNone(file_system) file_system.Open() def testFileEntryExistsByPathSpec(self): """Test the file entry exists by path specification functionality.""" file_system = apfs_file_system.APFSFileSystem( self._resolver_context, self._apfs_path_spec) self.assertIsNotNone(file_system) file_system.Open() path_spec = path_spec_factory.Factory.NewPathSpec( definitions.TYPE_INDICATOR_APFS, location='/passwords.txt', identifier=self._IDENTIFIER_PASSWORDS_TXT, parent=self._apfs_container_path_spec) self.assertTrue(file_system.FileEntryExistsByPathSpec(path_spec)) path_spec = path_spec_factory.Factory.NewPathSpec( definitions.TYPE_INDICATOR_APFS, location='/bogus.txt', parent=self._apfs_container_path_spec) self.assertFalse(file_system.FileEntryExistsByPathSpec(path_spec)) def testGetFileEntryByPathSpec(self): """Tests the GetFileEntryByPathSpec function.""" file_system = apfs_file_system.APFSFileSystem( self._resolver_context, self._apfs_path_spec) self.assertIsNotNone(file_system) file_system.Open() path_spec = path_spec_factory.Factory.NewPathSpec( definitions.TYPE_INDICATOR_APFS, identifier=self._IDENTIFIER_PASSWORDS_TXT, parent=self._apfs_container_path_spec) file_entry = file_system.GetFileEntryByPathSpec(path_spec) self.assertIsNotNone(file_entry) # There is no way to determine the file_entry.name without a location string # in the path_spec or retrieving the file_entry from its parent. path_spec = path_spec_factory.Factory.NewPathSpec( definitions.TYPE_INDICATOR_APFS, location='/passwords.txt', identifier=self._IDENTIFIER_PASSWORDS_TXT, parent=self._apfs_container_path_spec) file_entry = file_system.GetFileEntryByPathSpec(path_spec) self.assertIsNotNone(file_entry) self.assertEqual(file_entry.name, 'passwords.txt') path_spec = path_spec_factory.Factory.NewPathSpec( definitions.TYPE_INDICATOR_APFS, location='/bogus.txt', parent=self._apfs_container_path_spec) file_entry = file_system.GetFileEntryByPathSpec(path_spec) self.assertIsNone(file_entry) # TODO: add tests for GetAPFSFileEntryByPathSpec function. def testGetRootFileEntry(self): """Test the get root file entry functionality.""" file_system = apfs_file_system.APFSFileSystem( self._resolver_context, self._apfs_path_spec) self.assertIsNotNone(file_system) file_system.Open() file_entry = file_system.GetRootFileEntry() self.assertIsNotNone(file_entry) self.assertEqual(file_entry.name, '') if __name__ == '__main__': unittest.main()
joachimmetz/dfvfs
tests/vfs/apfs_file_system.py
Python
apache-2.0
4,316
# (c) Copyright 2014 Brocade Communications Systems Inc. # All Rights Reserved. # # Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ ZoneManager is responsible to manage access control using FC zoning when zoning mode is set as 'fabric'. ZoneManager provides interfaces to add connection and remove connection for given initiator and target list associated with a FC volume attach and detach operation. **Related Flags** :zone_driver: Used by:class:`ZoneManager`. Defaults to `cinder.zonemanager.drivers.brocade.brcd_fc_zone_driver.BrcdFCZoneDriver` :zoning_policy: Used by: class: 'ZoneManager'. Defaults to 'none' """ from oslo_config import cfg from oslo_log import log as logging from oslo_utils import importutils import six from cinder import exception from cinder.i18n import _, _LI from cinder.volume import configuration as config from cinder.zonemanager import fc_common import cinder.zonemanager.fczm_constants as zone_constant LOG = logging.getLogger(__name__) zone_manager_opts = [ cfg.StrOpt('zone_driver', default='cinder.zonemanager.drivers.brocade.brcd_fc_zone_driver' '.BrcdFCZoneDriver', help='FC Zone Driver responsible for zone management'), cfg.StrOpt('zoning_policy', default='initiator-target', help='Zoning policy configured by user; valid values include ' '"initiator-target" or "initiator"'), cfg.StrOpt('fc_fabric_names', help='Comma separated list of Fibre Channel fabric names.' ' This list of names is used to retrieve other SAN credentials' ' for connecting to each SAN fabric'), cfg.StrOpt('fc_san_lookup_service', default='cinder.zonemanager.drivers.brocade' '.brcd_fc_san_lookup_service.BrcdFCSanLookupService', help='FC SAN Lookup Service') ] CONF = cfg.CONF CONF.register_opts(zone_manager_opts, group='fc-zone-manager') class ZoneManager(fc_common.FCCommon): """Manages Connection control during attach/detach. Version History: 1.0 - Initial version 1.0.1 - Added __new__ for singleton 1.0.2 - Added friendly zone name """ VERSION = "1.0.2" driver = None fabric_names = [] def __new__(class_, *args, **kwargs): if not hasattr(class_, "_instance"): class_._instance = object.__new__(class_) return class_._instance def __init__(self, **kwargs): """Load the driver from the one specified in args, or from flags.""" super(ZoneManager, self).__init__(**kwargs) self.configuration = config.Configuration(zone_manager_opts, 'fc-zone-manager') self._build_driver() def _build_driver(self): zone_driver = self.configuration.zone_driver LOG.debug("Zone driver from config: %(driver)s", {'driver': zone_driver}) zm_config = config.Configuration(zone_manager_opts, 'fc-zone-manager') # Initialize vendor specific implementation of FCZoneDriver self.driver = importutils.import_object( zone_driver, configuration=zm_config) def get_zoning_state_ref_count(self, initiator_wwn, target_wwn): """Zone management state check. Performs state check for given I-T pair to return the current count of active attach for the pair. """ # TODO(sk): ref count state management count = 0 # check the state for I-T pair return count def add_connection(self, conn_info): """Add connection control. Adds connection control for the given initiator target map. initiator_target_map - each initiator WWN mapped to a list of one or more target WWN: eg: { '10008c7cff523b01': ['20240002ac000a50', '20240002ac000a40'] } """ connected_fabric = None host_name = None storage_system = None try: initiator_target_map = ( conn_info[zone_constant.DATA][zone_constant.IT_MAP]) if zone_constant.HOST in conn_info[zone_constant.DATA]: host_name = conn_info[ zone_constant.DATA][ zone_constant.HOST].replace(" ", "_") if zone_constant.STORAGE in conn_info[zone_constant.DATA]: storage_system = ( conn_info[ zone_constant.DATA][ zone_constant.STORAGE].replace(" ", "_")) for initiator in initiator_target_map.keys(): target_list = initiator_target_map[initiator] LOG.debug("Target list : %(targets)s", {'targets': target_list}) # get SAN context for the target list fabric_map = self.get_san_context(target_list) LOG.debug("Fabric map after context lookup: %(fabricmap)s", {'fabricmap': fabric_map}) # iterate over each SAN and apply connection control for fabric in fabric_map.keys(): connected_fabric = fabric t_list = fabric_map[fabric] # get valid I-T map to add connection control i_t_map = {initiator: t_list} valid_i_t_map = self.get_valid_initiator_target_map( i_t_map, True) LOG.info(_LI("Final filtered map for fabric: %(i_t_map)s"), {'i_t_map': valid_i_t_map}) # Call driver to add connection control self.driver.add_connection(fabric, valid_i_t_map, host_name, storage_system) LOG.info(_LI("Add connection: finished iterating " "over all target list")) except Exception as e: msg = _("Failed adding connection for fabric=%(fabric)s: " "Error: %(err)s") % {'fabric': connected_fabric, 'err': six.text_type(e)} LOG.error(msg) raise exception.ZoneManagerException(reason=msg) def delete_connection(self, conn_info): """Delete connection. Updates/deletes connection control for the given initiator target map. initiator_target_map - each initiator WWN mapped to a list of one or more target WWN: eg: { '10008c7cff523b01': ['20240002ac000a50', '20240002ac000a40'] } """ connected_fabric = None host_name = None storage_system = None try: initiator_target_map = ( conn_info[zone_constant.DATA][zone_constant.IT_MAP]) if zone_constant.HOST in conn_info[zone_constant.DATA]: host_name = conn_info[zone_constant.DATA][zone_constant.HOST] if zone_constant.STORAGE in conn_info[zone_constant.DATA]: storage_system = ( conn_info[ zone_constant.DATA][ zone_constant.STORAGE].replace(" ", "_")) for initiator in initiator_target_map.keys(): target_list = initiator_target_map[initiator] LOG.info(_LI("Delete connection target list: %(targets)s"), {'targets': target_list}) # get SAN context for the target list fabric_map = self.get_san_context(target_list) LOG.debug("Delete connection fabric map from SAN " "context: %(fabricmap)s", {'fabricmap': fabric_map}) # iterate over each SAN and apply connection control for fabric in fabric_map.keys(): connected_fabric = fabric t_list = fabric_map[fabric] # get valid I-T map to add connection control i_t_map = {initiator: t_list} valid_i_t_map = self.get_valid_initiator_target_map( i_t_map, False) LOG.info(_LI("Final filtered map for delete connection: " "%(i_t_map)s"), {'i_t_map': valid_i_t_map}) # Call driver to delete connection control if len(valid_i_t_map) > 0: self.driver.delete_connection(fabric, valid_i_t_map, host_name, storage_system) LOG.debug("Delete connection - finished iterating over all" " target list") except Exception as e: msg = _("Failed removing connection for fabric=%(fabric)s: " "Error: %(err)s") % {'fabric': connected_fabric, 'err': six.text_type(e)} LOG.error(msg) raise exception.ZoneManagerException(reason=msg) def get_san_context(self, target_wwn_list): """SAN lookup for end devices. Look up each SAN configured and return a map of SAN (fabric IP) to list of target WWNs visible to the fabric. """ fabric_map = self.driver.get_san_context(target_wwn_list) LOG.debug("Got SAN context: %(fabricmap)s", {'fabricmap': fabric_map}) return fabric_map def get_valid_initiator_target_map(self, initiator_target_map, add_control): """Reference count check for end devices. Looks up the reference count for each initiator-target pair from the map and returns a filtered list based on the operation type add_control - operation type can be true for add connection control and false for remove connection control """ filtered_i_t_map = {} for initiator in initiator_target_map.keys(): t_list = initiator_target_map[initiator] for target in t_list: count = self.get_zoning_state_ref_count(initiator, target) if add_control: if count > 0: t_list.remove(target) # update count = count + 1 else: if count > 1: t_list.remove(target) # update count = count - 1 if t_list: filtered_i_t_map[initiator] = t_list else: LOG.info(_LI("No targets to add or remove connection for " "initiator: %(init_wwn)s"), {'init_wwn': initiator}) return filtered_i_t_map
dims/cinder
cinder/zonemanager/fc_zone_manager.py
Python
apache-2.0
11,517
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2016-2018, Eric Jacob <erjac77@gmail.com> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ANSIBLE_METADATA = { 'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community' } DOCUMENTATION = ''' --- module: f5bigip_sys_crypto_cert short_description: BIG-IP sys crypto cert module description: - Manage cryptographic certificates on the BIG-IP system. version_added: "2.4" author: - "Eric Jacob (@erjac77)" options: city: description: - Specifies the x509 city field to be used in creation of the certificate associated with the given key. command: description: - Specifies the command to execute. choices: ['install'] common_name: description: - Specifies the x509 common-name to be used in creation of the certificate associated with the given key. consumer: description: - Specifies the system component by which a key and/or associated cryptographic file will be consumed. default: ltm choices: ['enterprise-manager', 'iquery', 'iquery-big3d', 'ltm', 'webserver'] country: description: - Specifies the x509 country to be used in creation of the certificate associated with the given key. email_address: description: - Specifies the x509 email-address to be used in creation of the certificate associated with the given key. from_editor: description: - Specifies that the key should be obtained from a text editor session. from_local_file: description: - Specifies a local file path from which a key is to be copied. from_url: description: - Specifies a URI which is to be used to obtain a key for import into the configuration of the system. key: description: - Specifies a key from which a certificate should be generated when using the create command. required: true lifetime: description: - Specifies the certificate life time to be used in creation of the certificate associated with the given key. default: 365 organization: description: - Specifies the x509 organization to be used in creation of the certificate associated with the given key. ou: description: - Specifies the x509 organizational unit to be used in creation of the certificate associated with the given key. name: description: - Specifies unique name for the component. required: true no_overwrite: description: - Specifies option of not overwriting a key if it is in the scope. default: true type: bool partition: description: - Displays the administrative partition in which the component object resides. default: Common state: description: - Specifies the state of the component on the BIG-IP system. default: present choices: ['absent', 'present'] state_province: description: - Specifies the x509 state or province of the certificate associated with the given key. subject_alternative_name: description: - Specifies standard X.509 extensions as shown in RFC 2459. requirements: - BIG-IP >= 12.0 - ansible-common-f5 - f5-sdk ''' EXAMPLES = ''' - name: Install SYS Crypto Cert from local file f5bigip_sys_crypto_cert: f5_hostname: 172.16.227.35 f5_username: admin f5_password: admin f5_port: 443 name: exemple.localhost.crt partition: Common from_local_file: /tmp/exemple.localhost.crt state: present delegate_to: localhost - name: Create SYS Crypto Cert f5bigip_sys_crypto_cert: f5_hostname: 172.16.227.35 f5_username: admin f5_password: admin f5_port: 443 name: exemple.localhost.crt partition: Common key: exemple.localhost.key common_name: exemple.localhost city: city state_province: state country: US email_address: 'admin@localhost' organization: My Org ou: My Div state: present delegate_to: localhost ''' RETURN = ''' # ''' from ansible.module_utils.basic import AnsibleModule from ansible_common_f5.base import AnsibleF5Error from ansible_common_f5.base import F5_NAMED_OBJ_ARGS from ansible_common_f5.base import F5_PROVIDER_ARGS from ansible_common_f5.bigip import F5BigIpNamedObject class ModuleParams(object): @property def argument_spec(self): argument_spec = dict( consumer=dict(type='str', choices=['enterprise-manager', 'iquery', 'iquery-big3d', 'ltm', 'webserver']), # create city=dict(type='str'), common_name=dict(type='str'), country=dict(type='str'), email_address=dict(type='str'), key=dict(type='str'), lifetime=dict(type='int'), organization=dict(type='str'), ou=dict(type='str'), state_province=dict(type='str'), subject_alternative_name=dict(type='str'), # install command=dict(type='str', choices=['install']), from_editor=dict(type='str'), from_local_file=dict(type='str'), from_url=dict(type='str'), no_overwrite=dict(type='bool') ) argument_spec.update(F5_PROVIDER_ARGS) argument_spec.update(F5_NAMED_OBJ_ARGS) return argument_spec @property def supports_check_mode(self): return True @property def mutually_exclusive(self): return [ ['from_editor', 'from_local_file', 'from_url'] ] @property def tr(self): # Translation dict for conflictual params return {'state_province': 'state'} class F5BigIpSysCryptoCert(F5BigIpNamedObject): def _set_crud_methods(self): self._methods = { 'create': self._api.tm.sys.crypto.certs.cert.create, 'read': self._api.tm.sys.crypto.certs.cert.load, 'update': self._api.tm.sys.crypto.certs.cert.update, 'delete': self._api.tm.sys.crypto.certs.cert.delete, 'exists': self._api.tm.sys.crypto.certs.cert.exists, 'exec_cmd': self._api.tm.sys.crypto.certs.exec_cmd } def _install(self): """Upload the key on the BIG-IP system.""" name = self._params['name'] param_set = {} if self._params['fromEditor']: param_set = {'from-editor': self._params['fromEditor']} if self._params['fromLocalFile']: param_set = {'from-local-file': self._params['fromLocalFile']} if self._params['fromUrl']: param_set = {'from-url': self._params['fromUrl']} if param_set: param_set.update({'name': name}) if self._params['consumer']: param_set.update({'consumer': self._params['consumer']}) if self._params['noOverwrite']: param_set.update({'no-overwrite': self._params['noOverwrite']}) # Install the key self._methods['exec_cmd']('install', **param_set) else: raise AnsibleF5Error("Missing required parameter 'from-*' to install the cert.") # Make sure it is installed if not self._exists(): raise AnsibleF5Error("Failed to create the object.") return True def _present(self): has_changed = False if self._params['command'] == 'install': if not self._exists() or (self._params['noOverwrite'] is not None and self._params['noOverwrite'] is False): has_changed = self._install() else: if not self._exists(): has_changed = self._create() return has_changed def main(): params = ModuleParams() module = AnsibleModule(argument_spec=params.argument_spec, supports_check_mode=params.supports_check_mode, mutually_exclusive=params.mutually_exclusive) try: obj = F5BigIpSysCryptoCert(check_mode=module.check_mode, tr=params.tr, **module.params) result = obj.flush() module.exit_json(**result) except Exception as exc: module.fail_json(msg=str(exc)) if __name__ == '__main__': main()
erjac77/ansible-module-f5bigip
library/f5bigip_sys_crypto_cert.py
Python
apache-2.0
8,937
# Copyright 2012 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import webob.exc from nova.api.openstack.compute.contrib import agents from nova import context from nova import db from nova.db.sqlalchemy import models from nova import exception from nova import test fake_agents_list = [{'hypervisor': 'kvm', 'os': 'win', 'architecture': 'x86', 'version': '7.0', 'url': 'xxx://xxxx/xxx/xxx', 'md5hash': 'add6bb58e139be103324d04d82d8f545', 'id': 1}, {'hypervisor': 'kvm', 'os': 'linux', 'architecture': 'x86', 'version': '16.0', 'url': 'xxx://xxxx/xxx/xxx1', 'md5hash': 'add6bb58e139be103324d04d82d8f546', 'id': 2}, {'hypervisor': 'xen', 'os': 'linux', 'architecture': 'x86', 'version': '16.0', 'url': 'xxx://xxxx/xxx/xxx2', 'md5hash': 'add6bb58e139be103324d04d82d8f547', 'id': 3}, {'hypervisor': 'xen', 'os': 'win', 'architecture': 'power', 'version': '7.0', 'url': 'xxx://xxxx/xxx/xxx3', 'md5hash': 'add6bb58e139be103324d04d82d8f548', 'id': 4}, ] def fake_agent_build_get_all(context, hypervisor): agent_build_all = [] for agent in fake_agents_list: if hypervisor and hypervisor != agent['hypervisor']: continue agent_build_ref = models.AgentBuild() agent_build_ref.update(agent) agent_build_all.append(agent_build_ref) return agent_build_all def fake_agent_build_update(context, agent_build_id, values): pass def fake_agent_build_destroy(context, agent_update_id): pass def fake_agent_build_create(context, values): values['id'] = 1 agent_build_ref = models.AgentBuild() agent_build_ref.update(values) return agent_build_ref class FakeRequest(object): environ = {"nova.context": context.get_admin_context()} GET = {} class FakeRequestWithHypervisor(object): environ = {"nova.context": context.get_admin_context()} GET = {'hypervisor': 'kvm'} class AgentsTest(test.NoDBTestCase): def setUp(self): super(AgentsTest, self).setUp() self.stubs.Set(db, "agent_build_get_all", fake_agent_build_get_all) self.stubs.Set(db, "agent_build_update", fake_agent_build_update) self.stubs.Set(db, "agent_build_destroy", fake_agent_build_destroy) self.stubs.Set(db, "agent_build_create", fake_agent_build_create) self.context = context.get_admin_context() self.controller = agents.AgentController() def test_agents_create(self): req = FakeRequest() body = {'agent': {'hypervisor': 'kvm', 'os': 'win', 'architecture': 'x86', 'version': '7.0', 'url': 'xxx://xxxx/xxx/xxx', 'md5hash': 'add6bb58e139be103324d04d82d8f545'}} response = {'agent': {'hypervisor': 'kvm', 'os': 'win', 'architecture': 'x86', 'version': '7.0', 'url': 'xxx://xxxx/xxx/xxx', 'md5hash': 'add6bb58e139be103324d04d82d8f545', 'agent_id': 1}} res_dict = self.controller.create(req, body) self.assertEqual(res_dict, response) def test_agents_create_key_error(self): req = FakeRequest() body = {'agent': {'hypervisordummy': 'kvm', 'os': 'win', 'architecture': 'x86', 'version': '7.0', 'url': 'xxx://xxxx/xxx/xxx', 'md5hash': 'add6bb58e139be103324d04d82d8f545'}} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, body) def test_agents_create_with_existed_agent(self): def fake_agent_build_create_with_exited_agent(context, values): raise exception.AgentBuildExists(**values) self.stubs.Set(db, 'agent_build_create', fake_agent_build_create_with_exited_agent) req = FakeRequest() body = {'agent': {'hypervisor': 'kvm', 'os': 'win', 'architecture': 'x86', 'version': '7.0', 'url': 'xxx://xxxx/xxx/xxx', 'md5hash': 'add6bb58e139be103324d04d82d8f545'}} self.assertRaises(webob.exc.HTTPConflict, self.controller.create, req, body=body) def _test_agents_create_with_invalid_length(self, key): req = FakeRequest() body = {'agent': {'hypervisor': 'kvm', 'os': 'win', 'architecture': 'x86', 'version': '7.0', 'url': 'xxx://xxxx/xxx/xxx', 'md5hash': 'add6bb58e139be103324d04d82d8f545'}} body['agent'][key] = 'x' * 256 self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, body) def test_agents_create_with_invalid_length_hypervisor(self): self._test_agents_create_with_invalid_length('hypervisor') def test_agents_create_with_invalid_length_os(self): self._test_agents_create_with_invalid_length('os') def test_agents_create_with_invalid_length_architecture(self): self._test_agents_create_with_invalid_length('architecture') def test_agents_create_with_invalid_length_version(self): self._test_agents_create_with_invalid_length('version') def test_agents_create_with_invalid_length_url(self): self._test_agents_create_with_invalid_length('url') def test_agents_create_with_invalid_length_md5hash(self): self._test_agents_create_with_invalid_length('md5hash') def test_agents_delete(self): req = FakeRequest() self.controller.delete(req, 1) def test_agents_list(self): req = FakeRequest() res_dict = self.controller.index(req) agents_list = [{'hypervisor': 'kvm', 'os': 'win', 'architecture': 'x86', 'version': '7.0', 'url': 'xxx://xxxx/xxx/xxx', 'md5hash': 'add6bb58e139be103324d04d82d8f545', 'agent_id': 1}, {'hypervisor': 'kvm', 'os': 'linux', 'architecture': 'x86', 'version': '16.0', 'url': 'xxx://xxxx/xxx/xxx1', 'md5hash': 'add6bb58e139be103324d04d82d8f546', 'agent_id': 2}, {'hypervisor': 'xen', 'os': 'linux', 'architecture': 'x86', 'version': '16.0', 'url': 'xxx://xxxx/xxx/xxx2', 'md5hash': 'add6bb58e139be103324d04d82d8f547', 'agent_id': 3}, {'hypervisor': 'xen', 'os': 'win', 'architecture': 'power', 'version': '7.0', 'url': 'xxx://xxxx/xxx/xxx3', 'md5hash': 'add6bb58e139be103324d04d82d8f548', 'agent_id': 4}, ] self.assertEqual(res_dict, {'agents': agents_list}) def test_agents_list_with_hypervisor(self): req = FakeRequestWithHypervisor() res_dict = self.controller.index(req) response = [{'hypervisor': 'kvm', 'os': 'win', 'architecture': 'x86', 'version': '7.0', 'url': 'xxx://xxxx/xxx/xxx', 'md5hash': 'add6bb58e139be103324d04d82d8f545', 'agent_id': 1}, {'hypervisor': 'kvm', 'os': 'linux', 'architecture': 'x86', 'version': '16.0', 'url': 'xxx://xxxx/xxx/xxx1', 'md5hash': 'add6bb58e139be103324d04d82d8f546', 'agent_id': 2}, ] self.assertEqual(res_dict, {'agents': response}) def test_agents_update(self): req = FakeRequest() body = {'para': {'version': '7.0', 'url': 'xxx://xxxx/xxx/xxx', 'md5hash': 'add6bb58e139be103324d04d82d8f545'}} response = {'agent': {'agent_id': 1, 'version': '7.0', 'url': 'xxx://xxxx/xxx/xxx', 'md5hash': 'add6bb58e139be103324d04d82d8f545'}} res_dict = self.controller.update(req, 1, body) self.assertEqual(res_dict, response) def test_agents_update_key_error(self): req = FakeRequest() body = {'para': {'versiondummy': '7.0', 'url': 'xxx://xxxx/xxx/xxx', 'md5hash': 'add6bb58e139be103324d04d82d8f545'}} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, 1, body) def test_agents_update_value_error(self): req = FakeRequest() body = {'para': {'version': '7.0', 'url': 1111, 'md5hash': 'add6bb58e139be103324d04d82d8f545'}} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, 1, body) def _test_agents_update_with_invalid_length(self, key): req = FakeRequest() body = {'para': {'version': '7.0', 'url': 'xxx://xxxx/xxx/xxx', 'md5hash': 'add6bb58e139be103324d04d82d8f545'}} body['para'][key] = 'x' * 256 self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, 1, body) def test_agents_update_with_invalid_length_version(self): self._test_agents_update_with_invalid_length('version') def test_agents_update_with_invalid_length_url(self): self._test_agents_update_with_invalid_length('url') def test_agents_update_with_invalid_length_md5hash(self): self._test_agents_update_with_invalid_length('md5hash')
viggates/nova
nova/tests/api/openstack/compute/contrib/test_agents.py
Python
apache-2.0
10,887
# -*- coding: utf-8 -*- # Generated by Django 1.11.2 on 2017-07-07 17:27 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('climate_data', '0029_auto_20170628_1527'), ] operations = [ migrations.AlterField( model_name='datatype', name='short_name', field=models.CharField(db_index=True, max_length=20, unique=True), ), ]
qubs/climate-data-api
climate_data/migrations/0030_auto_20170707_1727.py
Python
apache-2.0
490
__author__ = 'alberto' import time from functools import wraps from config import logger def measure_time(func): """ Decorator that reports the execution time. """ @wraps(func) def wrapper(*args, **kwargs): logger.info("Running %s", func.__name__) start = time.time() result = func(*args, **kwargs) end = time.time() logger.info("Execution time: %s", end - start) return result return wrapper
jresendiz27/EvolutionaryComputing
escom/pepo/utils.py
Python
apache-2.0
469
# Copyright 2018-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Execute Transactions Spec tests.""" import os import sys from io import BytesIO sys.path[0:0] = [""] from test import client_context, unittest from test.utils import ( OvertCommandListener, TestCreator, rs_client, single_client, wait_until, ) from test.utils_spec_runner import SpecRunner from gridfs import GridFS, GridFSBucket from pymongo import WriteConcern, client_session from pymongo.client_session import TransactionOptions from pymongo.errors import ( CollectionInvalid, ConfigurationError, ConnectionFailure, InvalidOperation, OperationFailure, ) from pymongo.operations import IndexModel, InsertOne from pymongo.read_concern import ReadConcern from pymongo.read_preferences import ReadPreference # Location of JSON test specifications. TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "transactions", "legacy") _TXN_TESTS_DEBUG = os.environ.get("TRANSACTION_TESTS_DEBUG") # Max number of operations to perform after a transaction to prove unpinning # occurs. Chosen so that there's a low false positive rate. With 2 mongoses, # 50 attempts yields a one in a quadrillion chance of a false positive # (1/(0.5^50)). UNPIN_TEST_MAX_ATTEMPTS = 50 class TransactionsBase(SpecRunner): @classmethod def setUpClass(cls): super(TransactionsBase, cls).setUpClass() if client_context.supports_transactions(): for address in client_context.mongoses: cls.mongos_clients.append(single_client("%s:%s" % address)) @classmethod def tearDownClass(cls): for client in cls.mongos_clients: client.close() super(TransactionsBase, cls).tearDownClass() def maybe_skip_scenario(self, test): super(TransactionsBase, self).maybe_skip_scenario(test) if ( "secondary" in self.id() and not client_context.is_mongos and not client_context.has_secondaries ): raise unittest.SkipTest("No secondaries") class TestTransactions(TransactionsBase): RUN_ON_SERVERLESS = True @client_context.require_transactions def test_transaction_options_validation(self): default_options = TransactionOptions() self.assertIsNone(default_options.read_concern) self.assertIsNone(default_options.write_concern) self.assertIsNone(default_options.read_preference) self.assertIsNone(default_options.max_commit_time_ms) # No error when valid options are provided. TransactionOptions( read_concern=ReadConcern(), write_concern=WriteConcern(), read_preference=ReadPreference.PRIMARY, max_commit_time_ms=10000, ) with self.assertRaisesRegex(TypeError, "read_concern must be "): TransactionOptions(read_concern={}) # type: ignore with self.assertRaisesRegex(TypeError, "write_concern must be "): TransactionOptions(write_concern={}) # type: ignore with self.assertRaisesRegex( ConfigurationError, "transactions do not support unacknowledged write concern" ): TransactionOptions(write_concern=WriteConcern(w=0)) with self.assertRaisesRegex(TypeError, "is not valid for read_preference"): TransactionOptions(read_preference={}) # type: ignore with self.assertRaisesRegex(TypeError, "max_commit_time_ms must be an integer or None"): TransactionOptions(max_commit_time_ms="10000") # type: ignore @client_context.require_transactions def test_transaction_write_concern_override(self): """Test txn overrides Client/Database/Collection write_concern.""" client = rs_client(w=0) self.addCleanup(client.close) db = client.test coll = db.test coll.insert_one({}) with client.start_session() as s: with s.start_transaction(write_concern=WriteConcern(w=1)): self.assertTrue(coll.insert_one({}, session=s).acknowledged) self.assertTrue(coll.insert_many([{}, {}], session=s).acknowledged) self.assertTrue(coll.bulk_write([InsertOne({})], session=s).acknowledged) self.assertTrue(coll.replace_one({}, {}, session=s).acknowledged) self.assertTrue(coll.update_one({}, {"$set": {"a": 1}}, session=s).acknowledged) self.assertTrue(coll.update_many({}, {"$set": {"a": 1}}, session=s).acknowledged) self.assertTrue(coll.delete_one({}, session=s).acknowledged) self.assertTrue(coll.delete_many({}, session=s).acknowledged) coll.find_one_and_delete({}, session=s) coll.find_one_and_replace({}, {}, session=s) coll.find_one_and_update({}, {"$set": {"a": 1}}, session=s) unsupported_txn_writes: list = [ (client.drop_database, [db.name], {}), (db.drop_collection, ["collection"], {}), (coll.drop, [], {}), (coll.rename, ["collection2"], {}), # Drop collection2 between tests of "rename", above. (coll.database.drop_collection, ["collection2"], {}), (coll.create_indexes, [[IndexModel("a")]], {}), (coll.create_index, ["a"], {}), (coll.drop_index, ["a_1"], {}), (coll.drop_indexes, [], {}), (coll.aggregate, [[{"$out": "aggout"}]], {}), ] # Creating a collection in a transaction requires MongoDB 4.4+. if client_context.version < (4, 3, 4): unsupported_txn_writes.extend( [ (db.create_collection, ["collection"], {}), ] ) for op in unsupported_txn_writes: op, args, kwargs = op with client.start_session() as s: kwargs["session"] = s s.start_transaction(write_concern=WriteConcern(w=1)) with self.assertRaises(OperationFailure): op(*args, **kwargs) s.abort_transaction() @client_context.require_transactions @client_context.require_multiple_mongoses def test_unpin_for_next_transaction(self): # Increase localThresholdMS and wait until both nodes are discovered # to avoid false positives. client = rs_client(client_context.mongos_seeds(), localThresholdMS=1000) wait_until(lambda: len(client.nodes) > 1, "discover both mongoses") coll = client.test.test # Create the collection. coll.insert_one({}) self.addCleanup(client.close) with client.start_session() as s: # Session is pinned to Mongos. with s.start_transaction(): coll.insert_one({}, session=s) addresses = set() for _ in range(UNPIN_TEST_MAX_ATTEMPTS): with s.start_transaction(): cursor = coll.find({}, session=s) self.assertTrue(next(cursor)) addresses.add(cursor.address) # Break early if we can. if len(addresses) > 1: break self.assertGreater(len(addresses), 1) @client_context.require_transactions @client_context.require_multiple_mongoses def test_unpin_for_non_transaction_operation(self): # Increase localThresholdMS and wait until both nodes are discovered # to avoid false positives. client = rs_client(client_context.mongos_seeds(), localThresholdMS=1000) wait_until(lambda: len(client.nodes) > 1, "discover both mongoses") coll = client.test.test # Create the collection. coll.insert_one({}) self.addCleanup(client.close) with client.start_session() as s: # Session is pinned to Mongos. with s.start_transaction(): coll.insert_one({}, session=s) addresses = set() for _ in range(UNPIN_TEST_MAX_ATTEMPTS): cursor = coll.find({}, session=s) self.assertTrue(next(cursor)) addresses.add(cursor.address) # Break early if we can. if len(addresses) > 1: break self.assertGreater(len(addresses), 1) @client_context.require_transactions @client_context.require_version_min(4, 3, 4) def test_create_collection(self): client = client_context.client db = client.pymongo_test coll = db.test_create_collection self.addCleanup(coll.drop) # Use with_transaction to avoid StaleConfig errors on sharded clusters. def create_and_insert(session): coll2 = db.create_collection(coll.name, session=session) self.assertEqual(coll, coll2) coll.insert_one({}, session=session) with client.start_session() as s: s.with_transaction(create_and_insert) # Outside a transaction we raise CollectionInvalid on existing colls. with self.assertRaises(CollectionInvalid): db.create_collection(coll.name) # Inside a transaction we raise the OperationFailure from create. with client.start_session() as s: s.start_transaction() with self.assertRaises(OperationFailure) as ctx: db.create_collection(coll.name, session=s) self.assertEqual(ctx.exception.code, 48) # NamespaceExists @client_context.require_transactions def test_gridfs_does_not_support_transactions(self): client = client_context.client db = client.pymongo_test gfs = GridFS(db) bucket = GridFSBucket(db) def gridfs_find(*args, **kwargs): return gfs.find(*args, **kwargs).next() def gridfs_open_upload_stream(*args, **kwargs): bucket.open_upload_stream(*args, **kwargs).write(b"1") gridfs_ops = [ (gfs.put, (b"123",)), (gfs.get, (1,)), (gfs.get_version, ("name",)), (gfs.get_last_version, ("name",)), (gfs.delete, (1,)), (gfs.list, ()), (gfs.find_one, ()), (gridfs_find, ()), (gfs.exists, ()), (gridfs_open_upload_stream, ("name",)), ( bucket.upload_from_stream, ( "name", b"data", ), ), ( bucket.download_to_stream, ( 1, BytesIO(), ), ), ( bucket.download_to_stream_by_name, ( "name", BytesIO(), ), ), (bucket.delete, (1,)), (bucket.find, ()), (bucket.open_download_stream, (1,)), (bucket.open_download_stream_by_name, ("name",)), ( bucket.rename, ( 1, "new-name", ), ), ] with client.start_session() as s, s.start_transaction(): for op, args in gridfs_ops: with self.assertRaisesRegex( InvalidOperation, "GridFS does not support multi-document transactions", ): op(*args, session=s) # type: ignore # Require 4.2+ for large (16MB+) transactions. @client_context.require_version_min(4, 2) @client_context.require_transactions @unittest.skipIf(sys.platform == "win32", "Our Windows machines are too slow to pass this test") def test_transaction_starts_with_batched_write(self): if "PyPy" in sys.version and client_context.tls: self.skipTest( "PYTHON-2937 PyPy is so slow sending large " "messages over TLS that this test fails" ) # Start a transaction with a batch of operations that needs to be # split. listener = OvertCommandListener() client = rs_client(event_listeners=[listener]) coll = client[self.db.name].test coll.delete_many({}) listener.reset() self.addCleanup(client.close) self.addCleanup(coll.drop) large_str = "\0" * (10 * 1024 * 1024) ops = [InsertOne({"a": large_str}) for _ in range(10)] with client.start_session() as session: with session.start_transaction(): coll.bulk_write(ops, session=session) # Assert commands were constructed properly. self.assertEqual( ["insert", "insert", "insert", "commitTransaction"], listener.started_command_names() ) first_cmd = listener.results["started"][0].command self.assertTrue(first_cmd["startTransaction"]) lsid = first_cmd["lsid"] txn_number = first_cmd["txnNumber"] for event in listener.results["started"][1:]: self.assertNotIn("startTransaction", event.command) self.assertEqual(lsid, event.command["lsid"]) self.assertEqual(txn_number, event.command["txnNumber"]) self.assertEqual(10, coll.count_documents({})) class PatchSessionTimeout(object): """Patches the client_session's with_transaction timeout for testing.""" def __init__(self, mock_timeout): self.real_timeout = client_session._WITH_TRANSACTION_RETRY_TIME_LIMIT self.mock_timeout = mock_timeout def __enter__(self): client_session._WITH_TRANSACTION_RETRY_TIME_LIMIT = self.mock_timeout return self def __exit__(self, exc_type, exc_val, exc_tb): client_session._WITH_TRANSACTION_RETRY_TIME_LIMIT = self.real_timeout class TestTransactionsConvenientAPI(TransactionsBase): TEST_PATH = os.path.join( os.path.dirname(os.path.realpath(__file__)), "transactions-convenient-api" ) @client_context.require_transactions def test_callback_raises_custom_error(self): class _MyException(Exception): pass def raise_error(_): raise _MyException() with self.client.start_session() as s: with self.assertRaises(_MyException): s.with_transaction(raise_error) @client_context.require_transactions def test_callback_returns_value(self): def callback(_): return "Foo" with self.client.start_session() as s: self.assertEqual(s.with_transaction(callback), "Foo") self.db.test.insert_one({}) def callback2(session): self.db.test.insert_one({}, session=session) return "Foo" with self.client.start_session() as s: self.assertEqual(s.with_transaction(callback2), "Foo") @client_context.require_transactions def test_callback_not_retried_after_timeout(self): listener = OvertCommandListener() client = rs_client(event_listeners=[listener]) self.addCleanup(client.close) coll = client[self.db.name].test def callback(session): coll.insert_one({}, session=session) err: dict = { "ok": 0, "errmsg": "Transaction 7819 has been aborted.", "code": 251, "codeName": "NoSuchTransaction", "errorLabels": ["TransientTransactionError"], } raise OperationFailure(err["errmsg"], err["code"], err) # Create the collection. coll.insert_one({}) listener.results.clear() with client.start_session() as s: with PatchSessionTimeout(0): with self.assertRaises(OperationFailure): s.with_transaction(callback) self.assertEqual(listener.started_command_names(), ["insert", "abortTransaction"]) @client_context.require_test_commands @client_context.require_transactions def test_callback_not_retried_after_commit_timeout(self): listener = OvertCommandListener() client = rs_client(event_listeners=[listener]) self.addCleanup(client.close) coll = client[self.db.name].test def callback(session): coll.insert_one({}, session=session) # Create the collection. coll.insert_one({}) self.set_fail_point( { "configureFailPoint": "failCommand", "mode": {"times": 1}, "data": { "failCommands": ["commitTransaction"], "errorCode": 251, # NoSuchTransaction }, } ) self.addCleanup(self.set_fail_point, {"configureFailPoint": "failCommand", "mode": "off"}) listener.results.clear() with client.start_session() as s: with PatchSessionTimeout(0): with self.assertRaises(OperationFailure): s.with_transaction(callback) self.assertEqual(listener.started_command_names(), ["insert", "commitTransaction"]) @client_context.require_test_commands @client_context.require_transactions def test_commit_not_retried_after_timeout(self): listener = OvertCommandListener() client = rs_client(event_listeners=[listener]) self.addCleanup(client.close) coll = client[self.db.name].test def callback(session): coll.insert_one({}, session=session) # Create the collection. coll.insert_one({}) self.set_fail_point( { "configureFailPoint": "failCommand", "mode": {"times": 2}, "data": {"failCommands": ["commitTransaction"], "closeConnection": True}, } ) self.addCleanup(self.set_fail_point, {"configureFailPoint": "failCommand", "mode": "off"}) listener.results.clear() with client.start_session() as s: with PatchSessionTimeout(0): with self.assertRaises(ConnectionFailure): s.with_transaction(callback) # One insert for the callback and two commits (includes the automatic # retry). self.assertEqual( listener.started_command_names(), ["insert", "commitTransaction", "commitTransaction"] ) # Tested here because this supports Motor's convenient transactions API. @client_context.require_transactions def test_in_transaction_property(self): client = client_context.client coll = client.test.testcollection coll.insert_one({}) self.addCleanup(coll.drop) with client.start_session() as s: self.assertFalse(s.in_transaction) s.start_transaction() self.assertTrue(s.in_transaction) coll.insert_one({}, session=s) self.assertTrue(s.in_transaction) s.commit_transaction() self.assertFalse(s.in_transaction) with client.start_session() as s: s.start_transaction() # commit empty transaction s.commit_transaction() self.assertFalse(s.in_transaction) with client.start_session() as s: s.start_transaction() s.abort_transaction() self.assertFalse(s.in_transaction) # Using a callback def callback(session): self.assertTrue(session.in_transaction) with client.start_session() as s: self.assertFalse(s.in_transaction) s.with_transaction(callback) self.assertFalse(s.in_transaction) def create_test(scenario_def, test, name): @client_context.require_test_commands @client_context.require_transactions def run_scenario(self): self.run_scenario(scenario_def, test) return run_scenario test_creator = TestCreator(create_test, TestTransactions, TEST_PATH) test_creator.create_tests() TestCreator( create_test, TestTransactionsConvenientAPI, TestTransactionsConvenientAPI.TEST_PATH ).create_tests() if __name__ == "__main__": unittest.main()
mongodb/mongo-python-driver
test/test_transactions.py
Python
apache-2.0
20,801
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import fixture as config_fixture from keystone.identity.backends import ldap from keystone.tests.unit import core from keystone.tests.unit.identity.backends import test_base from keystone.tests.unit.ksfixtures import ldapdb class TestIdentityDriver(core.BaseTestCase, test_base.IdentityDriverTests): allows_name_update = False allows_self_service_change_password = False expected_is_domain_aware = False expected_default_assignment_driver = 'sql' expected_is_sql = False expected_generates_uuids = False def setUp(self): super(TestIdentityDriver, self).setUp() config_fixture_ = self.useFixture(config_fixture.Config()) config_fixture_.config( group='ldap', url='fake://memory', user='cn=Admin', password='password', suffix='cn=example,cn=com') self.useFixture(ldapdb.LDAPDatabase()) self.driver = ldap.Identity()
ilay09/keystone
keystone/tests/unit/identity/backends/test_ldap.py
Python
apache-2.0
1,538
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Exceptions coverage.py can raise.""" class BaseCoverageException(Exception): """The base of all Coverage exceptions.""" pass class CoverageException(BaseCoverageException): """An exception raised by a coverage.py function.""" pass class NoSource(CoverageException): """We couldn't find the source for a module.""" pass class NoCode(NoSource): """We couldn't find any code at all.""" pass class NotPython(CoverageException): """A source file turned out not to be parsable Python.""" pass class ExceptionDuringRun(CoverageException): """An exception happened while running customer code. Construct it with three arguments, the values from `sys.exc_info`. """ pass class StopEverything(BaseCoverageException): """An exception that means everything should stop. The CoverageTest class converts these to SkipTest, so that when running tests, raising this exception will automatically skip the test. """ pass class CoverageWarning(Warning): """A warning from Coverage.py.""" pass
hugovk/coveragepy
coverage/exceptions.py
Python
apache-2.0
1,237
# Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Common configs for compatibility_lib and compatibility_server. Note that a unit test exists for checking that the configs.py file in compatibility_lib is the same as the configs.py file in compatibility_server. The reason for this set up is that these modules need to be isolated from each other, but there also needs to be consistency in the objects and data in this file since they exist in the same workflow. Steps for updating the package list / white list: 1. Make sure to update both lists when appropriate (the package has been release to PyPI and the github repo exists) 2. Skip the dashboard tests and build when adding any new packages to either list 3. Release a new version of compatibility lib 4. Redeploy the badge and compatibility servers 5. Unskip the dashboard tests and build """ def _format_url(repo_name, setuppy_path=''): url = 'git+git://github.com/{}.git'.format(repo_name) if setuppy_path != '': url = '{}#subdirectory={}'.format(url, setuppy_path) return url # IGNORED_DEPENDENCIES are not direct dependencies for many packages and are # not installed via pip, resulting in unresolvable high priority warnings. IGNORED_DEPENDENCIES = [ 'pip', 'setuptools', 'wheel', 'virtualenv', ] # If updating this list, make sure to update the whitelist as well with the # appropiate github repo if one exists. PKG_LIST = [ 'google-api-core', 'google-api-python-client', 'google-auth', 'google-cloud-asset', 'google-cloud-automl', 'google-cloud-bigquery', 'google-cloud-bigquery-datatransfer', 'google-cloud-bigquery-storage', 'google-cloud-bigtable', 'google-cloud-container', 'google-cloud-core', 'google-cloud-datacatalog', 'google-cloud-datalabeling', 'google-cloud-dataproc', 'google-cloud-datastore', 'google-cloud-dlp', 'google-cloud-dns', 'google-cloud-error-reporting', 'google-cloud-firestore', 'google-cloud-iam', 'google-cloud-iot', # 'google-cloud-irm', # unreleased 'google-cloud-kms', 'google-cloud-language', 'google-cloud-logging', 'google-cloud-monitoring', 'google-cloud-os-login', # 'google-cloud-phishing-protection', # unreleased 'google-cloud-pubsub', 'google-cloud-redis', 'google-cloud-resource-manager', 'google-cloud-runtimeconfig', 'google-cloud-scheduler', 'google-cloud-securitycenter', 'google-cloud-spanner', 'google-cloud-speech', 'google-cloud-storage', 'google-cloud-talent', 'google-cloud-tasks', 'google-cloud-texttospeech', 'google-cloud-trace', 'google-cloud-translate', 'google-cloud-videointelligence', 'google-cloud-vision', 'google-cloud-webrisk', 'google-cloud-websecurityscanner', 'google-resumable-media', 'apache-beam[gcp]', 'google-apitools', 'googleapis-common-protos', 'grpc-google-iam-v1', 'grpcio', 'opencensus', 'protobuf', 'protorpc', 'tensorboard', 'tensorflow', 'gcloud', 'compatibility-lib', ] WHITELIST_PKGS = PKG_LIST # WHITELIST_URLS maps a github url to its associated pypi package name. This is # used for sanitizing input packages and making sure we don't run random pypi # or github packages. # If updating this list, make sure to update the `PKG_LIST` with the # appropriate pypi package if one has been released. WHITELIST_URLS = { _format_url('googleapis/google-cloud-python', 'asset'): 'google-cloud-asset', _format_url('googleapis/google-cloud-python', 'automl'): 'google-cloud-automl', _format_url('googleapis/google-cloud-python', 'datacatalog'): 'google-cloud-datacatalog', _format_url('googleapis/google-cloud-python', 'datalabeling'): 'google-cloud-datalabeling', _format_url('googleapis/google-cloud-python', 'dataproc'): 'google-cloud-dataproc', _format_url('googleapis/google-cloud-python', 'dlp'): 'google-cloud-dlp', _format_url('googleapis/google-cloud-python', 'iam'): 'google-cloud-iam', _format_url('googleapis/google-cloud-python', 'iot'): 'google-cloud-iot', # unreleased _format_url('googleapis/google-cloud-python', 'irm'): 'google-cloud-irm', _format_url('googleapis/google-cloud-python', 'kms'): 'google-cloud-kms', _format_url('googleapis/python-ndb', ''): 'google-cloud-ndb', _format_url('googleapis/google-cloud-python', 'oslogin'): 'google-cloud-os-login', _format_url('googleapis/google-cloud-python', 'redis'): 'google-cloud-redis', _format_url('googleapis/google-cloud-python', 'scheduler'): 'google-cloud-scheduler', _format_url('googleapis/google-cloud-python', 'securitycenter'): 'google-cloud-securitycenter', _format_url('googleapis/google-cloud-python', 'tasks'): 'google-cloud-tasks', _format_url('googleapis/google-cloud-python', 'texttospeech'): 'google-cloud-texttospeech', _format_url('googleapis/google-cloud-python', 'webrisk'): 'google-cloud-webrisk', _format_url('googleapis/google-cloud-python', 'websecurityscanner'): 'google-cloud-websecurityscanner', _format_url('googleapis/google-cloud-python', 'api_core'): 'google-api-core', _format_url('googleapis/google-cloud-python', 'bigquery'): 'google-cloud-bigquery', _format_url('googleapis/google-cloud-python', 'bigquery_datatransfer'): 'google-cloud-bigquery-datatransfer', _format_url('googleapis/google-cloud-python', 'bigquery_storage'): 'google-cloud-bigquery-storage', _format_url('googleapis/google-cloud-python', 'bigtable'): 'google-cloud-bigtable', _format_url('googleapis/google-cloud-python', 'container'): 'google-cloud-container', _format_url('googleapis/google-cloud-python', 'core'): 'google-cloud-core', _format_url('googleapis/google-cloud-python', 'datastore'): 'google-cloud-datastore', _format_url('googleapis/google-cloud-python', 'dns'): 'google-cloud-dns', _format_url('googleapis/google-cloud-python', 'error_reporting'): 'google-cloud-error-reporting', _format_url('googleapis/google-cloud-python', 'firestore'): 'google-cloud-firestore', _format_url('googleapis/google-cloud-python', 'language'): 'google-cloud-language', _format_url('googleapis/google-cloud-python', 'logging'): 'google-cloud-logging', _format_url('googleapis/google-cloud-python', 'monitoring'): 'google-cloud-monitoring', # unreleased _format_url('googleapis/google-cloud-python', 'phishingprotection'): 'google-cloud-phishing-protection', _format_url('googleapis/google-cloud-python', 'pubsub'): 'google-cloud-pubsub', _format_url('googleapis/google-cloud-python', 'resource_manager'): 'google-cloud-resource-manager', _format_url('googleapis/google-cloud-python', 'runtimeconfig'): 'google-cloud-runtimeconfig', _format_url('googleapis/google-cloud-python', 'spanner'): 'google-cloud-spanner', _format_url('googleapis/google-cloud-python', 'speech'): 'google-cloud-speech', _format_url('googleapis/google-cloud-python', 'storage'): 'google-cloud-storage', _format_url('googleapis/google-cloud-python', 'talent'): 'google-cloud-talent', _format_url('googleapis/google-cloud-python', 'trace'): 'google-cloud-trace', _format_url('googleapis/google-cloud-python', 'translate'): 'google-cloud-translate', _format_url('googleapis/google-cloud-python', 'videointelligence'): 'google-cloud-videointelligence', _format_url('googleapis/google-cloud-python', 'vision'): 'google-cloud-vision', _format_url('googleapis/google-api-python-client'): 'google-api-python-client', _format_url('googleapis/google-auth-library-python'): 'google-auth', _format_url('GoogleCloudPlatform/google-resumable-media-python'): 'google-resumable-media', _format_url('apache/beam', 'sdks/python'): 'apache-beam[gcp]', _format_url('google/apitools'): 'google-apitools', _format_url('census-instrumentation/opencensus-python'): 'opencensus', _format_url('google/protorpc'): 'protorpc', _format_url('tensorflow/tensorflow', 'tensorflow/tools/pip_package'): 'tensorflow', _format_url('GoogleCloudPlatform/cloud-opensource-python', 'compatibility_lib'): 'compatibility-lib', # TODO: The following projects do not use setup.py # googleapis-common-protos # grpc-google-iam-v1 # grpcio # protobuf # tensorboard - not sure what the build process is # _format_url('tensorflow/tensorboard', 'tensorboard/pip_package'): # 'tensorboard', } # TODO: Find top 30 packages by download count in BigQuery table. THIRD_PARTY_PACKAGE_LIST = [ 'requests', 'flask', 'django', ] PKG_PY_VERSION_NOT_SUPPORTED = { 2: ['tensorflow', ], 3: ['apache-beam[gcp]', 'gsutil', ], }
GoogleCloudPlatform/cloud-opensource-python
compatibility_server/configs.py
Python
apache-2.0
9,653
# Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This script is used to synthesize generated parts of this library.""" import synthtool as s import synthtool.gcp as gcp import logging logging.basicConfig(level=logging.DEBUG) gapic = gcp.GAPICGenerator() v1_library = gapic.ruby_library( 'datastore', 'v1', config_path='/google/datastore/artman_datastore.yaml', artman_output_name='google-cloud-ruby/google-cloud-datastore' ) s.copy(v1_library / 'lib/google/cloud/datastore/v1') s.copy(v1_library / 'lib/google/datastore/v1') # Omitting lib/google/cloud/datastore/v1.rb for now because we are not exposing # the low-level API. # Support for service_address s.replace( 'lib/google/cloud/datastore/v*/*_client.rb', '\n(\\s+)#(\\s+)@param exception_transformer', '\n\\1#\\2@param service_address [String]\n' + '\\1#\\2 Override for the service hostname, or `nil` to leave as the default.\n' + '\\1#\\2@param service_port [Integer]\n' + '\\1#\\2 Override for the service port, or `nil` to leave as the default.\n' + '\\1#\\2@param exception_transformer' ) s.replace( 'lib/google/cloud/datastore/v*/*_client.rb', '\n(\\s+)metadata: nil,\n\\s+exception_transformer: nil,\n', '\n\\1metadata: nil,\n\\1service_address: nil,\n\\1service_port: nil,\n\\1exception_transformer: nil,\n' ) s.replace( 'lib/google/cloud/datastore/v*/*_client.rb', 'service_path = self\\.class::SERVICE_ADDRESS', 'service_path = service_address || self.class::SERVICE_ADDRESS' ) s.replace( 'lib/google/cloud/datastore/v*/*_client.rb', 'port = self\\.class::DEFAULT_SERVICE_PORT', 'port = service_port || self.class::DEFAULT_SERVICE_PORT' ) # https://github.com/googleapis/gapic-generator/issues/2124 s.replace( 'lib/google/cloud/datastore/v1/credentials.rb', 'SCOPE = \[[^\]]+\]\.freeze', 'SCOPE = ["https://www.googleapis.com/auth/datastore"].freeze') # https://github.com/googleapis/gapic-generator/issues/2243 s.replace( 'lib/google/cloud/datastore/v1/*_client.rb', '(\n\\s+class \\w+Client\n)(\\s+)(attr_reader :\\w+_stub)', '\\1\\2# @private\n\\2\\3') # https://github.com/googleapis/gapic-generator/issues/2279 s.replace( 'lib/**/*.rb', '\\A(((#[^\n]*)?\n)*# (Copyright \\d+|Generated by the protocol buffer compiler)[^\n]+\n(#[^\n]*\n)*\n)([^\n])', '\\1\n\\6') # https://github.com/googleapis/google-cloud-ruby/issues/3058 s.replace( 'lib/google/cloud/datastore/v1/*_client.rb', '(require \".*credentials\"\n)\n', '\\1require "google/cloud/datastore/version"\n\n' ) s.replace( 'lib/google/cloud/datastore/v1/*_client.rb', 'Gem.loaded_specs\[.*\]\.version\.version', 'Google::Cloud::Datastore::VERSION' )
quartzmo/google-cloud-ruby
google-cloud-datastore/synth.py
Python
apache-2.0
3,270
from proboscis import test @test(groups=['benchmark.discovery']) class BenchmarkDiscoveryTests(object): def __init__(self): pass
jlongever/RackHD
test/benchmark/api_v2_0/discovery_tests.py
Python
apache-2.0
142
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.apps import AppConfig class ScheduleConfig(AppConfig): name = 'schedule'
CongBao/mrsys.online
sub_mrsys/schedule/apps.py
Python
apache-2.0
156
# Copyright 2014 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import mock def _make_credentials(): import google.auth.credentials return mock.Mock(spec=google.auth.credentials.Credentials) def _make_entity_pb(project, kind, integer_id, name=None, str_val=None): from google.cloud.datastore_v1.proto import entity_pb2 from google.cloud.datastore.helpers import _new_value_pb entity_pb = entity_pb2.Entity() entity_pb.key.partition_id.project_id = project path_element = entity_pb.key.path.add() path_element.kind = kind path_element.id = integer_id if name is not None and str_val is not None: value_pb = _new_value_pb(entity_pb, name) value_pb.string_value = str_val return entity_pb class Test__get_gcd_project(unittest.TestCase): def _call_fut(self): from google.cloud.datastore.client import _get_gcd_project return _get_gcd_project() def test_no_value(self): environ = {} with mock.patch("os.getenv", new=environ.get): project = self._call_fut() self.assertIsNone(project) def test_value_set(self): from google.cloud.datastore.client import GCD_DATASET MOCK_PROJECT = object() environ = {GCD_DATASET: MOCK_PROJECT} with mock.patch("os.getenv", new=environ.get): project = self._call_fut() self.assertEqual(project, MOCK_PROJECT) class Test__determine_default_project(unittest.TestCase): def _call_fut(self, project=None): from google.cloud.datastore.client import _determine_default_project return _determine_default_project(project=project) def _determine_default_helper(self, gcd=None, fallback=None, project_called=None): _callers = [] def gcd_mock(): _callers.append("gcd_mock") return gcd def fallback_mock(project=None): _callers.append(("fallback_mock", project)) return fallback patch = mock.patch.multiple( "google.cloud.datastore.client", _get_gcd_project=gcd_mock, _base_default_project=fallback_mock, ) with patch: returned_project = self._call_fut(project_called) return returned_project, _callers def test_no_value(self): project, callers = self._determine_default_helper() self.assertIsNone(project) self.assertEqual(callers, ["gcd_mock", ("fallback_mock", None)]) def test_explicit(self): PROJECT = object() project, callers = self._determine_default_helper(project_called=PROJECT) self.assertEqual(project, PROJECT) self.assertEqual(callers, []) def test_gcd(self): PROJECT = object() project, callers = self._determine_default_helper(gcd=PROJECT) self.assertEqual(project, PROJECT) self.assertEqual(callers, ["gcd_mock"]) def test_fallback(self): PROJECT = object() project, callers = self._determine_default_helper(fallback=PROJECT) self.assertEqual(project, PROJECT) self.assertEqual(callers, ["gcd_mock", ("fallback_mock", None)]) class TestClient(unittest.TestCase): PROJECT = "PROJECT" @staticmethod def _get_target_class(): from google.cloud.datastore.client import Client return Client def _make_one( self, project=PROJECT, namespace=None, credentials=None, client_info=None, client_options=None, _http=None, _use_grpc=None, ): return self._get_target_class()( project=project, namespace=namespace, credentials=credentials, client_info=client_info, client_options=client_options, _http=_http, _use_grpc=_use_grpc, ) def test_constructor_w_project_no_environ(self): # Some environments (e.g. AppVeyor CI) run in GCE, so # this test would fail artificially. patch = mock.patch( "google.cloud.datastore.client._base_default_project", return_value=None ) with patch: self.assertRaises(EnvironmentError, self._make_one, None) def test_constructor_w_implicit_inputs(self): from google.cloud.datastore.client import _CLIENT_INFO from google.cloud.datastore.client import _DATASTORE_BASE_URL other = "other" creds = _make_credentials() klass = self._get_target_class() patch1 = mock.patch( "google.cloud.datastore.client._determine_default_project", return_value=other, ) patch2 = mock.patch("google.auth.default", return_value=(creds, None)) with patch1 as _determine_default_project: with patch2 as default: client = klass() self.assertEqual(client.project, other) self.assertIsNone(client.namespace) self.assertIs(client._credentials, creds) self.assertIs(client._client_info, _CLIENT_INFO) self.assertIsNone(client._http_internal) self.assertIsNone(client._client_options) self.assertEqual(client.base_url, _DATASTORE_BASE_URL) self.assertIsNone(client.current_batch) self.assertIsNone(client.current_transaction) default.assert_called_once_with() _determine_default_project.assert_called_once_with(None) def test_constructor_w_explicit_inputs(self): from google.api_core.client_options import ClientOptions other = "other" namespace = "namespace" creds = _make_credentials() client_info = mock.Mock() client_options = ClientOptions("endpoint") http = object() client = self._make_one( project=other, namespace=namespace, credentials=creds, client_info=client_info, client_options=client_options, _http=http, ) self.assertEqual(client.project, other) self.assertEqual(client.namespace, namespace) self.assertIs(client._credentials, creds) self.assertIs(client._client_info, client_info) self.assertIs(client._http_internal, http) self.assertIsNone(client.current_batch) self.assertIs(client._base_url, "endpoint") self.assertEqual(list(client._batch_stack), []) def test_constructor_use_grpc_default(self): import google.cloud.datastore.client as MUT project = "PROJECT" creds = _make_credentials() http = object() with mock.patch.object(MUT, "_USE_GRPC", new=True): client1 = self._make_one(project=project, credentials=creds, _http=http) self.assertTrue(client1._use_grpc) # Explicitly over-ride the environment. client2 = self._make_one( project=project, credentials=creds, _http=http, _use_grpc=False ) self.assertFalse(client2._use_grpc) with mock.patch.object(MUT, "_USE_GRPC", new=False): client3 = self._make_one(project=project, credentials=creds, _http=http) self.assertFalse(client3._use_grpc) # Explicitly over-ride the environment. client4 = self._make_one( project=project, credentials=creds, _http=http, _use_grpc=True ) self.assertTrue(client4._use_grpc) def test_constructor_gcd_host(self): from google.cloud.environment_vars import GCD_HOST host = "localhost:1234" fake_environ = {GCD_HOST: host} project = "PROJECT" creds = _make_credentials() http = object() with mock.patch("os.environ", new=fake_environ): client = self._make_one(project=project, credentials=creds, _http=http) self.assertEqual(client.base_url, "http://" + host) def test_base_url_property(self): from google.cloud.datastore.client import _DATASTORE_BASE_URL from google.api_core.client_options import ClientOptions alternate_url = "https://alias.example.com/" project = "PROJECT" creds = _make_credentials() http = object() client_options = ClientOptions() client = self._make_one( project=project, credentials=creds, _http=http, client_options=client_options, ) self.assertEqual(client.base_url, _DATASTORE_BASE_URL) client.base_url = alternate_url self.assertEqual(client.base_url, alternate_url) def test_base_url_property_w_client_options(self): alternate_url = "https://alias.example.com/" project = "PROJECT" creds = _make_credentials() http = object() client_options = {"api_endpoint": "endpoint"} client = self._make_one( project=project, credentials=creds, _http=http, client_options=client_options, ) self.assertEqual(client.base_url, "endpoint") client.base_url = alternate_url self.assertEqual(client.base_url, alternate_url) def test__datastore_api_property_already_set(self): client = self._make_one( project="prahj-ekt", credentials=_make_credentials(), _use_grpc=True ) already = client._datastore_api_internal = object() self.assertIs(client._datastore_api, already) def test__datastore_api_property_gapic(self): client_info = mock.Mock() client = self._make_one( project="prahj-ekt", credentials=_make_credentials(), client_info=client_info, _http=object(), _use_grpc=True, ) self.assertIsNone(client._datastore_api_internal) patch = mock.patch( "google.cloud.datastore.client.make_datastore_api", return_value=mock.sentinel.ds_api, ) with patch as make_api: ds_api = client._datastore_api self.assertIs(ds_api, mock.sentinel.ds_api) self.assertIs(client._datastore_api_internal, mock.sentinel.ds_api) make_api.assert_called_once_with(client) def test__datastore_api_property_http(self): client_info = mock.Mock() client = self._make_one( project="prahj-ekt", credentials=_make_credentials(), client_info=client_info, _http=object(), _use_grpc=False, ) self.assertIsNone(client._datastore_api_internal) patch = mock.patch( "google.cloud.datastore.client.HTTPDatastoreAPI", return_value=mock.sentinel.ds_api, ) with patch as make_api: ds_api = client._datastore_api self.assertIs(ds_api, mock.sentinel.ds_api) self.assertIs(client._datastore_api_internal, mock.sentinel.ds_api) make_api.assert_called_once_with(client) def test__push_batch_and__pop_batch(self): creds = _make_credentials() client = self._make_one(credentials=creds) batch = client.batch() xact = client.transaction() client._push_batch(batch) self.assertEqual(list(client._batch_stack), [batch]) self.assertIs(client.current_batch, batch) self.assertIsNone(client.current_transaction) client._push_batch(xact) self.assertIs(client.current_batch, xact) self.assertIs(client.current_transaction, xact) # list(_LocalStack) returns in reverse order. self.assertEqual(list(client._batch_stack), [xact, batch]) self.assertIs(client._pop_batch(), xact) self.assertEqual(list(client._batch_stack), [batch]) self.assertIs(client._pop_batch(), batch) self.assertEqual(list(client._batch_stack), []) def test_get_miss(self): _called_with = [] def _get_multi(*args, **kw): _called_with.append((args, kw)) return [] creds = _make_credentials() client = self._make_one(credentials=creds) client.get_multi = _get_multi key = object() self.assertIsNone(client.get(key)) self.assertEqual(_called_with[0][0], ()) self.assertEqual(_called_with[0][1]["keys"], [key]) self.assertIsNone(_called_with[0][1]["missing"]) self.assertIsNone(_called_with[0][1]["deferred"]) self.assertIsNone(_called_with[0][1]["transaction"]) def test_get_hit(self): TXN_ID = "123" _called_with = [] _entity = object() def _get_multi(*args, **kw): _called_with.append((args, kw)) return [_entity] creds = _make_credentials() client = self._make_one(credentials=creds) client.get_multi = _get_multi key, missing, deferred = object(), [], [] self.assertIs(client.get(key, missing, deferred, TXN_ID), _entity) self.assertEqual(_called_with[0][0], ()) self.assertEqual(_called_with[0][1]["keys"], [key]) self.assertIs(_called_with[0][1]["missing"], missing) self.assertIs(_called_with[0][1]["deferred"], deferred) self.assertEqual(_called_with[0][1]["transaction"], TXN_ID) def test_get_multi_no_keys(self): creds = _make_credentials() client = self._make_one(credentials=creds) results = client.get_multi([]) self.assertEqual(results, []) def test_get_multi_miss(self): from google.cloud.datastore_v1.proto import datastore_pb2 from google.cloud.datastore.key import Key creds = _make_credentials() client = self._make_one(credentials=creds) ds_api = _make_datastore_api() client._datastore_api_internal = ds_api key = Key("Kind", 1234, project=self.PROJECT) results = client.get_multi([key]) self.assertEqual(results, []) read_options = datastore_pb2.ReadOptions() ds_api.lookup.assert_called_once_with( self.PROJECT, [key.to_protobuf()], read_options=read_options ) def test_get_multi_miss_w_missing(self): from google.cloud.datastore_v1.proto import entity_pb2 from google.cloud.datastore_v1.proto import datastore_pb2 from google.cloud.datastore.key import Key KIND = "Kind" ID = 1234 # Make a missing entity pb to be returned from mock backend. missed = entity_pb2.Entity() missed.key.partition_id.project_id = self.PROJECT path_element = missed.key.path.add() path_element.kind = KIND path_element.id = ID creds = _make_credentials() client = self._make_one(credentials=creds) # Set missing entity on mock connection. lookup_response = _make_lookup_response(missing=[missed]) ds_api = _make_datastore_api(lookup_response=lookup_response) client._datastore_api_internal = ds_api key = Key(KIND, ID, project=self.PROJECT) missing = [] entities = client.get_multi([key], missing=missing) self.assertEqual(entities, []) key_pb = key.to_protobuf() self.assertEqual([missed.key.to_protobuf() for missed in missing], [key_pb]) read_options = datastore_pb2.ReadOptions() ds_api.lookup.assert_called_once_with( self.PROJECT, [key_pb], read_options=read_options ) def test_get_multi_w_missing_non_empty(self): from google.cloud.datastore.key import Key creds = _make_credentials() client = self._make_one(credentials=creds) key = Key("Kind", 1234, project=self.PROJECT) missing = ["this", "list", "is", "not", "empty"] self.assertRaises(ValueError, client.get_multi, [key], missing=missing) def test_get_multi_w_deferred_non_empty(self): from google.cloud.datastore.key import Key creds = _make_credentials() client = self._make_one(credentials=creds) key = Key("Kind", 1234, project=self.PROJECT) deferred = ["this", "list", "is", "not", "empty"] self.assertRaises(ValueError, client.get_multi, [key], deferred=deferred) def test_get_multi_miss_w_deferred(self): from google.cloud.datastore_v1.proto import datastore_pb2 from google.cloud.datastore.key import Key key = Key("Kind", 1234, project=self.PROJECT) key_pb = key.to_protobuf() # Set deferred entity on mock connection. creds = _make_credentials() client = self._make_one(credentials=creds) lookup_response = _make_lookup_response(deferred=[key_pb]) ds_api = _make_datastore_api(lookup_response=lookup_response) client._datastore_api_internal = ds_api deferred = [] entities = client.get_multi([key], deferred=deferred) self.assertEqual(entities, []) self.assertEqual([def_key.to_protobuf() for def_key in deferred], [key_pb]) read_options = datastore_pb2.ReadOptions() ds_api.lookup.assert_called_once_with( self.PROJECT, [key_pb], read_options=read_options ) def test_get_multi_w_deferred_from_backend_but_not_passed(self): from google.cloud.datastore_v1.proto import datastore_pb2 from google.cloud.datastore_v1.proto import entity_pb2 from google.cloud.datastore.entity import Entity from google.cloud.datastore.key import Key key1 = Key("Kind", project=self.PROJECT) key1_pb = key1.to_protobuf() key2 = Key("Kind", 2345, project=self.PROJECT) key2_pb = key2.to_protobuf() entity1_pb = entity_pb2.Entity() entity1_pb.key.CopyFrom(key1_pb) entity2_pb = entity_pb2.Entity() entity2_pb.key.CopyFrom(key2_pb) creds = _make_credentials() client = self._make_one(credentials=creds) # Mock up two separate requests. Using an iterable as side_effect # allows multiple return values. lookup_response1 = _make_lookup_response( results=[entity1_pb], deferred=[key2_pb] ) lookup_response2 = _make_lookup_response(results=[entity2_pb]) ds_api = _make_datastore_api() ds_api.lookup = mock.Mock( side_effect=[lookup_response1, lookup_response2], spec=[] ) client._datastore_api_internal = ds_api missing = [] found = client.get_multi([key1, key2], missing=missing) self.assertEqual(len(found), 2) self.assertEqual(len(missing), 0) # Check the actual contents on the response. self.assertIsInstance(found[0], Entity) self.assertEqual(found[0].key.path, key1.path) self.assertEqual(found[0].key.project, key1.project) self.assertIsInstance(found[1], Entity) self.assertEqual(found[1].key.path, key2.path) self.assertEqual(found[1].key.project, key2.project) self.assertEqual(ds_api.lookup.call_count, 2) read_options = datastore_pb2.ReadOptions() ds_api.lookup.assert_any_call( self.PROJECT, [key2_pb], read_options=read_options ) ds_api.lookup.assert_any_call( self.PROJECT, [key1_pb, key2_pb], read_options=read_options ) def test_get_multi_hit(self): from google.cloud.datastore_v1.proto import datastore_pb2 from google.cloud.datastore.key import Key kind = "Kind" id_ = 1234 path = [{"kind": kind, "id": id_}] # Make a found entity pb to be returned from mock backend. entity_pb = _make_entity_pb(self.PROJECT, kind, id_, "foo", "Foo") # Make a connection to return the entity pb. creds = _make_credentials() client = self._make_one(credentials=creds) lookup_response = _make_lookup_response(results=[entity_pb]) ds_api = _make_datastore_api(lookup_response=lookup_response) client._datastore_api_internal = ds_api key = Key(kind, id_, project=self.PROJECT) result, = client.get_multi([key]) new_key = result.key # Check the returned value is as expected. self.assertIsNot(new_key, key) self.assertEqual(new_key.project, self.PROJECT) self.assertEqual(new_key.path, path) self.assertEqual(list(result), ["foo"]) self.assertEqual(result["foo"], "Foo") read_options = datastore_pb2.ReadOptions() ds_api.lookup.assert_called_once_with( self.PROJECT, [key.to_protobuf()], read_options=read_options ) def test_get_multi_hit_w_transaction(self): from google.cloud.datastore_v1.proto import datastore_pb2 from google.cloud.datastore.key import Key txn_id = b"123" kind = "Kind" id_ = 1234 path = [{"kind": kind, "id": id_}] # Make a found entity pb to be returned from mock backend. entity_pb = _make_entity_pb(self.PROJECT, kind, id_, "foo", "Foo") # Make a connection to return the entity pb. creds = _make_credentials() client = self._make_one(credentials=creds) lookup_response = _make_lookup_response(results=[entity_pb]) ds_api = _make_datastore_api(lookup_response=lookup_response) client._datastore_api_internal = ds_api key = Key(kind, id_, project=self.PROJECT) txn = client.transaction() txn._id = txn_id result, = client.get_multi([key], transaction=txn) new_key = result.key # Check the returned value is as expected. self.assertIsNot(new_key, key) self.assertEqual(new_key.project, self.PROJECT) self.assertEqual(new_key.path, path) self.assertEqual(list(result), ["foo"]) self.assertEqual(result["foo"], "Foo") read_options = datastore_pb2.ReadOptions(transaction=txn_id) ds_api.lookup.assert_called_once_with( self.PROJECT, [key.to_protobuf()], read_options=read_options ) def test_get_multi_hit_multiple_keys_same_project(self): from google.cloud.datastore_v1.proto import datastore_pb2 from google.cloud.datastore.key import Key kind = "Kind" id1 = 1234 id2 = 2345 # Make a found entity pb to be returned from mock backend. entity_pb1 = _make_entity_pb(self.PROJECT, kind, id1) entity_pb2 = _make_entity_pb(self.PROJECT, kind, id2) # Make a connection to return the entity pbs. creds = _make_credentials() client = self._make_one(credentials=creds) lookup_response = _make_lookup_response(results=[entity_pb1, entity_pb2]) ds_api = _make_datastore_api(lookup_response=lookup_response) client._datastore_api_internal = ds_api key1 = Key(kind, id1, project=self.PROJECT) key2 = Key(kind, id2, project=self.PROJECT) retrieved1, retrieved2 = client.get_multi([key1, key2]) # Check values match. self.assertEqual(retrieved1.key.path, key1.path) self.assertEqual(dict(retrieved1), {}) self.assertEqual(retrieved2.key.path, key2.path) self.assertEqual(dict(retrieved2), {}) read_options = datastore_pb2.ReadOptions() ds_api.lookup.assert_called_once_with( self.PROJECT, [key1.to_protobuf(), key2.to_protobuf()], read_options=read_options, ) def test_get_multi_hit_multiple_keys_different_project(self): from google.cloud.datastore.key import Key PROJECT1 = "PROJECT" PROJECT2 = "PROJECT-ALT" # Make sure our IDs are actually different. self.assertNotEqual(PROJECT1, PROJECT2) key1 = Key("KIND", 1234, project=PROJECT1) key2 = Key("KIND", 1234, project=PROJECT2) creds = _make_credentials() client = self._make_one(credentials=creds) with self.assertRaises(ValueError): client.get_multi([key1, key2]) def test_get_multi_max_loops(self): from google.cloud.datastore.key import Key kind = "Kind" id_ = 1234 # Make a found entity pb to be returned from mock backend. entity_pb = _make_entity_pb(self.PROJECT, kind, id_, "foo", "Foo") # Make a connection to return the entity pb. creds = _make_credentials() client = self._make_one(credentials=creds) lookup_response = _make_lookup_response(results=[entity_pb]) ds_api = _make_datastore_api(lookup_response=lookup_response) client._datastore_api_internal = ds_api key = Key(kind, id_, project=self.PROJECT) deferred = [] missing = [] patch = mock.patch("google.cloud.datastore.client._MAX_LOOPS", new=-1) with patch: result = client.get_multi([key], missing=missing, deferred=deferred) # Make sure we have no results, even though the connection has been # set up as in `test_hit` to return a single result. self.assertEqual(result, []) self.assertEqual(missing, []) self.assertEqual(deferred, []) ds_api.lookup.assert_not_called() def test_put(self): _called_with = [] def _put_multi(*args, **kw): _called_with.append((args, kw)) creds = _make_credentials() client = self._make_one(credentials=creds) client.put_multi = _put_multi entity = object() client.put(entity) self.assertEqual(_called_with[0][0], ()) self.assertEqual(_called_with[0][1]["entities"], [entity]) def test_put_multi_no_entities(self): creds = _make_credentials() client = self._make_one(credentials=creds) self.assertIsNone(client.put_multi([])) def test_put_multi_w_single_empty_entity(self): # https://github.com/GoogleCloudPlatform/google-cloud-python/issues/649 from google.cloud.datastore.entity import Entity creds = _make_credentials() client = self._make_one(credentials=creds) self.assertRaises(ValueError, client.put_multi, Entity()) def test_put_multi_no_batch_w_partial_key(self): from google.cloud.datastore_v1.proto import datastore_pb2 from google.cloud.datastore.helpers import _property_tuples entity = _Entity(foo=u"bar") key = entity.key = _Key(self.PROJECT) key._id = None creds = _make_credentials() client = self._make_one(credentials=creds) key_pb = _make_key(234) ds_api = _make_datastore_api(key_pb) client._datastore_api_internal = ds_api result = client.put_multi([entity]) self.assertIsNone(result) self.assertEqual(ds_api.commit.call_count, 1) _, positional, keyword = ds_api.commit.mock_calls[0] self.assertEqual(keyword, {"transaction": None}) self.assertEqual(len(positional), 3) self.assertEqual(positional[0], self.PROJECT) self.assertEqual(positional[1], datastore_pb2.CommitRequest.NON_TRANSACTIONAL) mutations = positional[2] mutated_entity = _mutated_pb(self, mutations, "insert") self.assertEqual(mutated_entity.key, key.to_protobuf()) prop_list = list(_property_tuples(mutated_entity)) self.assertTrue(len(prop_list), 1) name, value_pb = prop_list[0] self.assertEqual(name, "foo") self.assertEqual(value_pb.string_value, u"bar") def test_put_multi_existing_batch_w_completed_key(self): from google.cloud.datastore.helpers import _property_tuples creds = _make_credentials() client = self._make_one(credentials=creds) entity = _Entity(foo=u"bar") key = entity.key = _Key(self.PROJECT) with _NoCommitBatch(client) as CURR_BATCH: result = client.put_multi([entity]) self.assertIsNone(result) mutated_entity = _mutated_pb(self, CURR_BATCH.mutations, "upsert") self.assertEqual(mutated_entity.key, key.to_protobuf()) prop_list = list(_property_tuples(mutated_entity)) self.assertTrue(len(prop_list), 1) name, value_pb = prop_list[0] self.assertEqual(name, "foo") self.assertEqual(value_pb.string_value, u"bar") def test_delete(self): _called_with = [] def _delete_multi(*args, **kw): _called_with.append((args, kw)) creds = _make_credentials() client = self._make_one(credentials=creds) client.delete_multi = _delete_multi key = object() client.delete(key) self.assertEqual(_called_with[0][0], ()) self.assertEqual(_called_with[0][1]["keys"], [key]) def test_delete_multi_no_keys(self): creds = _make_credentials() client = self._make_one(credentials=creds) client._datastore_api_internal = _make_datastore_api() result = client.delete_multi([]) self.assertIsNone(result) client._datastore_api_internal.commit.assert_not_called() def test_delete_multi_no_batch(self): from google.cloud.datastore_v1.proto import datastore_pb2 key = _Key(self.PROJECT) creds = _make_credentials() client = self._make_one(credentials=creds) ds_api = _make_datastore_api() client._datastore_api_internal = ds_api result = client.delete_multi([key]) self.assertIsNone(result) self.assertEqual(ds_api.commit.call_count, 1) _, positional, keyword = ds_api.commit.mock_calls[0] self.assertEqual(keyword, {"transaction": None}) self.assertEqual(len(positional), 3) self.assertEqual(positional[0], self.PROJECT) self.assertEqual(positional[1], datastore_pb2.CommitRequest.NON_TRANSACTIONAL) mutations = positional[2] mutated_key = _mutated_pb(self, mutations, "delete") self.assertEqual(mutated_key, key.to_protobuf()) def test_delete_multi_w_existing_batch(self): creds = _make_credentials() client = self._make_one(credentials=creds) client._datastore_api_internal = _make_datastore_api() key = _Key(self.PROJECT) with _NoCommitBatch(client) as CURR_BATCH: result = client.delete_multi([key]) self.assertIsNone(result) mutated_key = _mutated_pb(self, CURR_BATCH.mutations, "delete") self.assertEqual(mutated_key, key._key) client._datastore_api_internal.commit.assert_not_called() def test_delete_multi_w_existing_transaction(self): creds = _make_credentials() client = self._make_one(credentials=creds) client._datastore_api_internal = _make_datastore_api() key = _Key(self.PROJECT) with _NoCommitTransaction(client) as CURR_XACT: result = client.delete_multi([key]) self.assertIsNone(result) mutated_key = _mutated_pb(self, CURR_XACT.mutations, "delete") self.assertEqual(mutated_key, key._key) client._datastore_api_internal.commit.assert_not_called() def test_allocate_ids_w_partial_key(self): num_ids = 2 incomplete_key = _Key(self.PROJECT) incomplete_key._id = None creds = _make_credentials() client = self._make_one(credentials=creds, _use_grpc=False) allocated = mock.Mock(keys=[_KeyPB(i) for i in range(num_ids)], spec=["keys"]) alloc_ids = mock.Mock(return_value=allocated, spec=[]) ds_api = mock.Mock(allocate_ids=alloc_ids, spec=["allocate_ids"]) client._datastore_api_internal = ds_api result = client.allocate_ids(incomplete_key, num_ids) # Check the IDs returned. self.assertEqual([key._id for key in result], list(range(num_ids))) def test_allocate_ids_w_completed_key(self): creds = _make_credentials() client = self._make_one(credentials=creds) complete_key = _Key(self.PROJECT) self.assertRaises(ValueError, client.allocate_ids, complete_key, 2) def test_reserve_ids_w_completed_key(self): num_ids = 2 creds = _make_credentials() client = self._make_one(credentials=creds, _use_grpc=False) complete_key = _Key(self.PROJECT) reserve_ids = mock.Mock() ds_api = mock.Mock(reserve_ids=reserve_ids, spec=["reserve_ids"]) client._datastore_api_internal = ds_api self.assertTrue(not complete_key.is_partial) client.reserve_ids(complete_key, num_ids) expected_keys = [complete_key.to_protobuf()] * num_ids reserve_ids.assert_called_once_with(self.PROJECT, expected_keys) def test_reserve_ids_w_partial_key(self): num_ids = 2 incomplete_key = _Key(self.PROJECT) incomplete_key._id = None creds = _make_credentials() client = self._make_one(credentials=creds) with self.assertRaises(ValueError): client.reserve_ids(incomplete_key, num_ids) def test_reserve_ids_w_wrong_num_ids(self): num_ids = "2" complete_key = _Key(self.PROJECT) creds = _make_credentials() client = self._make_one(credentials=creds) with self.assertRaises(ValueError): client.reserve_ids(complete_key, num_ids) def test_key_w_project(self): KIND = "KIND" ID = 1234 creds = _make_credentials() client = self._make_one(credentials=creds) self.assertRaises(TypeError, client.key, KIND, ID, project=self.PROJECT) def test_key_wo_project(self): kind = "KIND" id_ = 1234 creds = _make_credentials() client = self._make_one(credentials=creds) patch = mock.patch("google.cloud.datastore.client.Key", spec=["__call__"]) with patch as mock_klass: key = client.key(kind, id_) self.assertIs(key, mock_klass.return_value) mock_klass.assert_called_once_with( kind, id_, project=self.PROJECT, namespace=None ) def test_key_w_namespace(self): kind = "KIND" id_ = 1234 namespace = object() creds = _make_credentials() client = self._make_one(namespace=namespace, credentials=creds) patch = mock.patch("google.cloud.datastore.client.Key", spec=["__call__"]) with patch as mock_klass: key = client.key(kind, id_) self.assertIs(key, mock_klass.return_value) mock_klass.assert_called_once_with( kind, id_, project=self.PROJECT, namespace=namespace ) def test_key_w_namespace_collision(self): kind = "KIND" id_ = 1234 namespace1 = object() namespace2 = object() creds = _make_credentials() client = self._make_one(namespace=namespace1, credentials=creds) patch = mock.patch("google.cloud.datastore.client.Key", spec=["__call__"]) with patch as mock_klass: key = client.key(kind, id_, namespace=namespace2) self.assertIs(key, mock_klass.return_value) mock_klass.assert_called_once_with( kind, id_, project=self.PROJECT, namespace=namespace2 ) def test_batch(self): creds = _make_credentials() client = self._make_one(credentials=creds) patch = mock.patch("google.cloud.datastore.client.Batch", spec=["__call__"]) with patch as mock_klass: batch = client.batch() self.assertIs(batch, mock_klass.return_value) mock_klass.assert_called_once_with(client) def test_transaction_defaults(self): creds = _make_credentials() client = self._make_one(credentials=creds) patch = mock.patch( "google.cloud.datastore.client.Transaction", spec=["__call__"] ) with patch as mock_klass: xact = client.transaction() self.assertIs(xact, mock_klass.return_value) mock_klass.assert_called_once_with(client) def test_read_only_transaction_defaults(self): from google.cloud.datastore_v1.types import TransactionOptions creds = _make_credentials() client = self._make_one(credentials=creds) xact = client.transaction(read_only=True) self.assertEqual( xact._options, TransactionOptions(read_only=TransactionOptions.ReadOnly()) ) self.assertFalse(xact._options.HasField("read_write")) self.assertTrue(xact._options.HasField("read_only")) self.assertEqual(xact._options.read_only, TransactionOptions.ReadOnly()) def test_query_w_client(self): KIND = "KIND" creds = _make_credentials() client = self._make_one(credentials=creds) other = self._make_one(credentials=_make_credentials()) self.assertRaises(TypeError, client.query, kind=KIND, client=other) def test_query_w_project(self): KIND = "KIND" creds = _make_credentials() client = self._make_one(credentials=creds) self.assertRaises(TypeError, client.query, kind=KIND, project=self.PROJECT) def test_query_w_defaults(self): creds = _make_credentials() client = self._make_one(credentials=creds) patch = mock.patch("google.cloud.datastore.client.Query", spec=["__call__"]) with patch as mock_klass: query = client.query() self.assertIs(query, mock_klass.return_value) mock_klass.assert_called_once_with( client, project=self.PROJECT, namespace=None ) def test_query_explicit(self): kind = "KIND" namespace = "NAMESPACE" ancestor = object() filters = [("PROPERTY", "==", "VALUE")] projection = ["__key__"] order = ["PROPERTY"] distinct_on = ["DISTINCT_ON"] creds = _make_credentials() client = self._make_one(credentials=creds) patch = mock.patch("google.cloud.datastore.client.Query", spec=["__call__"]) with patch as mock_klass: query = client.query( kind=kind, namespace=namespace, ancestor=ancestor, filters=filters, projection=projection, order=order, distinct_on=distinct_on, ) self.assertIs(query, mock_klass.return_value) mock_klass.assert_called_once_with( client, project=self.PROJECT, kind=kind, namespace=namespace, ancestor=ancestor, filters=filters, projection=projection, order=order, distinct_on=distinct_on, ) def test_query_w_namespace(self): kind = "KIND" namespace = object() creds = _make_credentials() client = self._make_one(namespace=namespace, credentials=creds) patch = mock.patch("google.cloud.datastore.client.Query", spec=["__call__"]) with patch as mock_klass: query = client.query(kind=kind) self.assertIs(query, mock_klass.return_value) mock_klass.assert_called_once_with( client, project=self.PROJECT, namespace=namespace, kind=kind ) def test_query_w_namespace_collision(self): kind = "KIND" namespace1 = object() namespace2 = object() creds = _make_credentials() client = self._make_one(namespace=namespace1, credentials=creds) patch = mock.patch("google.cloud.datastore.client.Query", spec=["__call__"]) with patch as mock_klass: query = client.query(kind=kind, namespace=namespace2) self.assertIs(query, mock_klass.return_value) mock_klass.assert_called_once_with( client, project=self.PROJECT, namespace=namespace2, kind=kind ) class _NoCommitBatch(object): def __init__(self, client): from google.cloud.datastore.batch import Batch self._client = client self._batch = Batch(client) self._batch.begin() def __enter__(self): self._client._push_batch(self._batch) return self._batch def __exit__(self, *args): self._client._pop_batch() class _NoCommitTransaction(object): def __init__(self, client, transaction_id="TRANSACTION"): from google.cloud.datastore.batch import Batch from google.cloud.datastore.transaction import Transaction self._client = client xact = self._transaction = Transaction(client) xact._id = transaction_id Batch.begin(xact) def __enter__(self): self._client._push_batch(self._transaction) return self._transaction def __exit__(self, *args): self._client._pop_batch() class _Entity(dict): key = None exclude_from_indexes = () _meanings = {} class _Key(object): _MARKER = object() _kind = "KIND" _key = "KEY" _path = None _id = 1234 _stored = None def __init__(self, project): self.project = project @property def is_partial(self): return self._id is None def to_protobuf(self): from google.cloud.datastore_v1.proto import entity_pb2 key = self._key = entity_pb2.Key() # Don't assign it, because it will just get ripped out # key.partition_id.project_id = self.project element = key.path.add() element.kind = self._kind if self._id is not None: element.id = self._id return key def completed_key(self, new_id): assert self.is_partial new_key = self.__class__(self.project) new_key._id = new_id return new_key class _PathElementPB(object): def __init__(self, id_): self.id = id_ class _KeyPB(object): def __init__(self, id_): self.path = [_PathElementPB(id_)] def _assert_num_mutations(test_case, mutation_pb_list, num_mutations): test_case.assertEqual(len(mutation_pb_list), num_mutations) def _mutated_pb(test_case, mutation_pb_list, mutation_type): # Make sure there is only one mutation. _assert_num_mutations(test_case, mutation_pb_list, 1) # We grab the only mutation. mutated_pb = mutation_pb_list[0] # Then check if it is the correct type. test_case.assertEqual(mutated_pb.WhichOneof("operation"), mutation_type) return getattr(mutated_pb, mutation_type) def _make_key(id_): from google.cloud.datastore_v1.proto import entity_pb2 key = entity_pb2.Key() elem = key.path.add() elem.id = id_ return key def _make_commit_response(*keys): from google.cloud.datastore_v1.proto import datastore_pb2 mutation_results = [datastore_pb2.MutationResult(key=key) for key in keys] return datastore_pb2.CommitResponse(mutation_results=mutation_results) def _make_lookup_response(results=(), missing=(), deferred=()): entity_results_found = [ mock.Mock(entity=result, spec=["entity"]) for result in results ] entity_results_missing = [ mock.Mock(entity=missing_entity, spec=["entity"]) for missing_entity in missing ] return mock.Mock( found=entity_results_found, missing=entity_results_missing, deferred=deferred, spec=["found", "missing", "deferred"], ) def _make_datastore_api(*keys, **kwargs): commit_method = mock.Mock(return_value=_make_commit_response(*keys), spec=[]) lookup_response = kwargs.pop("lookup_response", _make_lookup_response()) lookup_method = mock.Mock(return_value=lookup_response, spec=[]) return mock.Mock( commit=commit_method, lookup=lookup_method, spec=["commit", "lookup"] )
tseaver/google-cloud-python
datastore/tests/unit/test_client.py
Python
apache-2.0
44,363
""" # Licensed to the Apache Software Foundation (ASF) under one * # or more contributor license agreements. See the NOTICE file * # distributed with this work for additional information * # regarding copyright ownership. The ASF licenses this file * # to you under the Apache License, Version 2.0 (the * # "License"); you may not use this file except in compliance * # with the License. You may obtain a copy of the License at * # * # http://www.apache.org/licenses/LICENSE-2.0 * # * # Unless required by applicable law or agreed to in writing, * # software distributed under the License is distributed on an * # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * # KIND, either express or implied. See the License for the * # specific language governing permissions and limitations * # under the License. """ from __future__ import absolute_import from ..msg.Field import * from ..msg.ImportExportHelper import * from ..msg.StructValue import * from ..msg.Type import * from ..msg.ValueFactory import * from ..support.Class2TypeMap import * from ..support.Validator_string import * from ...util.URL import * class URLSerializer(ImportExportHelper): """ An etch serializer for URL """ FIELD_NAME = "urlStr" @classmethod def init(cls, typ, class2type): """ Defines custom fields in the value factory so that the importer can find them. @param typ @param class2type """ field = typ.getField(cls.FIELD_NAME) class2type.put(URL, typ) typ.setComponentType(URL) typ.setImportExportHelper( URLSerializer(typ, field)) typ.putValidator( field, Validator_string.get(0)) typ.lock() def __init__(self, typ, field): self.__type = typ self.__field = field def importValue(self, struct): return URL(struct.get(field)) def exportValue(self, vf, value): struct = StructValue(self.__type, vf) struct.put(self.__field, repr(value)) return struct
OBIGOGIT/etch
binding-python/runtime/src/main/python/etch/binding/util/URLSerializer.py
Python
apache-2.0
2,249
# Inviwo Python script import inviwopy tf = inviwopy.app.network.VolumeRaycaster.transferFunction tf.save( inviwopy.app.getPath( inviwopy.PathType.TransferFunctions) + "/transferfunction.itf" )
Sparkier/inviwo
data/scripts/savetransferfunction.py
Python
bsd-2-clause
196
#!/usr/bin/env python # test --create command += oiiotool ("--create 320x240 3 -d uint8 -o black.tif") command += oiiotool ("--stats black.tif") # test --pattern constant command += oiiotool ("--pattern constant:color=.1,.2,.3,1 320x240 4 -o constant.tif") command += oiiotool ("--stats constant.tif") # test --pattern noise command += oiiotool ("--pattern noise:type=uniform:min=0.25:max=0.75 64x64 3 -d uint8 -o noise-uniform3.tif") command += oiiotool ("--pattern noise:type=gaussian:mean=0.5:stddev=0.1 64x64 3 -d uint8 -o noise-gauss.tif") command += oiiotool ("--pattern noise:type=salt:portion=0.01:value=1 64x64 3 -d uint8 -o noise-salt.tif") # test --pattern fill command += oiiotool ("--pattern fill:color=0,0,0.5 64x64 3 -d uint8 -o pattern-const.tif") command += oiiotool ("--pattern fill:top=0.1,0.1,0.1:bottom=0,0,0.5 64x64 3 -d uint8 -o pattern-gradientv.tif") command += oiiotool ("--pattern fill:left=0.1,0.1,0.1:right=0,0.5,0 64x64 3 -d uint8 -o pattern-gradienth.tif") command += oiiotool ("--pattern fill:topleft=0.1,0.1,0.1:topright=0,0.5,0:bottomleft=0.5,0,0:bottomright=0,0,0.5 64x64 3 -d uint8 -o pattern-gradient4.tif") # test --fill command += oiiotool ("--create 256x256 3 --fill:color=1,.5,.5 256x256 --fill:color=0,1,0 80x80+100+100 -d uint8 -o filled.tif") command += oiiotool ("--create 64x64 3 --fill:top=0.1,0.1,0.1:bottom=0,0,0.5 64x64 -d uint8 -o fillv.tif") command += oiiotool ("--create 64x64 3 --fill:left=0.1,0.1,0.1:right=0,0.5,0 64x64 -d uint8 -o fillh.tif") command += oiiotool ("--create 64x64 3 --fill:topleft=0.1,0.1,0.1:topright=0,0.5,0:bottomleft=0.5,0,0:bottomright=0,0,0.5 64x64 -d uint8 -o fill4.tif") # test --line command += oiiotool ("--pattern checker:color1=.1,.1,.1:color2=0,0,0 256x256 3 " + "-line:color=0.25,0,0,0.25 10,60,250,20 " + "-line:color=0.5,0,0,0.5 10,62,250,100 " + "-line:color=1,0,0,1 10,64,250,400 " + "-line:color=0,1,0,1 250,100,10,184 " + "-line:color=0,0.5,0,0.5 250,200,10,182 " + "-line:color=0,0.25,0,0.25 100,400,10,180 " + "-line:color=.5,.5,0,0.5 100,100,120,100,120,100,120,120,120,120,100,120,100,120,100,100 " + "-box:color=0,0.5,0.5,0.5 150,100,240,180 " + "-d uint8 -o lines.tif") # test --box command += oiiotool ("--pattern checker:color1=.1,.1,.1:color2=0,0,0 256x256 3 " + "--box:color=0,1,1,1 150,100,240,180 " + "--box:color=0.5,0.5,0,0.5:fill=1 100,50,180,140 " + "-d uint8 -o box.tif") # test --point command += oiiotool ("--create 64x64 3 " + "--point:color=0,1,1,1 50,10 " + "--point:color=1,0,1,1 20,20,30,30,40,40 " + "-d uint8 -o points.tif") # To add more tests, just append more lines like the above and also add # the new 'feature.tif' (or whatever you call it) to the outputs list, # below. # Outputs to check against references outputs = [ "pattern-const.tif", "pattern-gradienth.tif", "pattern-gradientv.tif", "pattern-gradient4.tif", "noise-uniform3.tif", "noise-gauss.tif", "noise-salt.tif", "filled.tif", "fillh.tif", "fillv.tif", "fill4.tif", "lines.tif", "box.tif", "points.tif", "out.txt" ] #print "Running this command:\n" + command + "\n"
OpenImageIO/oiio
testsuite/oiiotool-pattern/run.py
Python
bsd-3-clause
3,464
import foauth.providers class Cheddar(foauth.providers.OAuth2): # General info about the provider provider_url = 'https://cheddarapp.com/' docs_url = 'https://cheddarapp.com/developer/' category = 'Tasks' # URLs to interact with the API authorize_url = 'https://api.cheddarapp.com/oauth/authorize' access_token_url = 'https://api.cheddarapp.com/oauth/token' api_domain = 'api.cheddarapp.com' available_permissions = [ (None, 'read and write to your tasks'), ] def get_user_id(self, key): r = self.api(key, self.api_domain, u'/v1/me') return unicode(r.json()[u'id'])
foauth/foauth.org
services/cheddar.py
Python
bsd-3-clause
639
"""tuple sub-class which holds weak references to objects""" import weakref class WeakTuple( tuple ): """tuple sub-class holding weakrefs to items The weak reference tuple is intended to allow you to store references to a list of objects without needing to manage weak references directly. For the most part, the WeakTuple operates just like a tuple object, in that it allows for all of the standard tuple operations. The difference is that the WeakTuple class only stores weak references to its items. As a result, adding an object to the tuple does not necessarily mean that it will still be there later on during execution (if the referent has been garbage collected). Because WeakTuple's are static (their membership doesn't change), they will raise ReferenceError when a sub-item is missing rather than skipping missing items as does the WeakList. This can occur for basically _any_ use of the tuple. """ def __init__( self, sequence=() ): """Initialize the tuple The WeakTuple will store weak references to objects within the sequence. """ super( WeakTuple, self).__init__( map( self.wrap, sequence)) def valid( self ): """Explicit validity check for the tuple Checks whether all references can be resolved, basically just sees whether calling list(self) raises a ReferenceError """ try: list( self ) return 1 except weakref.ReferenceError: return 0 def wrap( self, item ): """Wrap an individual item in a weak-reference If the item is already a weak reference, we store a reference to the original item. We use approximately the same weak reference callback mechanism as the standard weakref.WeakKeyDictionary object. """ if isinstance( item, weakref.ReferenceType ): item = item() return weakref.ref( item ) def unwrap( self, item ): """Unwrap an individual item This is a fairly trivial operation at the moment, it merely calls the item with no arguments and returns the result. """ ref = item() if ref is None: raise weakref.ReferenceError( """%s instance no longer valid (item %s has been collected)"""%( self.__class__.__name__, item)) return ref def __iter__( self ): """Iterate over the tuple, yielding strong references""" index = 0 while index < len(self): yield self[index] index += 1 def __getitem__( self, index ): """Get the item at the given index""" return self.unwrap(super (WeakTuple,self).__getitem__( index )) def __getslice__( self, start, stop ): """Get the items in the range start to stop""" return map( self.unwrap, super (WeakTuple,self).__getslice__( start, stop) ) def __contains__( self, item ): """Return boolean indicating whether the item is in the tuple""" for node in self: if item is node: return 1 return 0 def count( self, item ): """Return integer count of instances of item in tuple""" count = 0 for node in self: if item is node: count += 1 return count def index( self, item ): """Return integer index of item in tuple""" count = 0 for node in self: if item is node: return count count += 1 return -1 def __add__(self, other): """Return a new path with other as tail""" return tuple(self) + other def __eq__( self, sequence ): """Compare the tuple to another (==)""" return list(self) == sequence def __ge__( self, sequence ): """Compare the tuple to another (>=)""" return list(self) >= sequence def __gt__( self, sequence ): """Compare the tuple to another (>)""" return list(self) > sequence def __le__( self, sequence ): """Compare the tuple to another (<=)""" return list(self) <= sequence def __lt__( self, sequence ): """Compare the tuple to another (<)""" return list(self) < sequence def __ne__( self, sequence ): """Compare the tuple to another (!=)""" return list(self) != sequence def __repr__( self ): """Return a code-like representation of the weak tuple""" return """%s( %s )"""%( self.__class__.__name__, super(WeakTuple,self).__repr__())
menpo/vrml97
vrml/weaktuple.py
Python
bsd-3-clause
4,708
# -*- coding: utf-8 -*- # Copyright (c) 2011-2015 Berkeley Model United Nations. All rights reserved. # Use of this source code is governed by a BSD License (see LICENSE). import re from rest_framework.serializers import ValidationError def name(value): '''Matches names of people, countries and and other things.''' if re.match(r'^[A-Za-z\s\.\-\'àèéìòóôù]+$', value) is None: raise ValidationError('This field contains invalid characters.') def address(value): '''Matches street addresses.''' if re.match(r'^[\w\s\.\-\'àèéìòóôù]+$', value) is None: raise ValidationError('This field contains invalid characters.') def numeric(value): '''Matches numbers and spaces.''' if re.match(r'^[\d\s]+$', value) is None: raise ValidationError('This field can only contain numbers and spaces.') def email(value): '''Loosely matches email addresses.''' if re.match(r'^[\w_.+-]+@[\w-]+\.[\w\-.]+$', value) is None: raise ValidationError('This is an invalid email address.') def phone_international(value): '''Loosely matches phone numbers.''' if re.match(r'^[\d\-x\s\+\(\)]+$', value) is None: raise ValidationError('This is an invalid phone number.') def phone_domestic(value): '''Matches domestic phone numbers.''' if re.match(r'^\(?(\d{3})\)?\s(\d{3})-(\d{4})(\sx\d{1,5})?$', value) is None: raise ValidationError('This is an invalid phone number.') def nonempty(value): '''Requires that a field be non-empty.''' if not value: raise ValidationError('This field is required.')
ctmunwebmaster/huxley
huxley/api/validators.py
Python
bsd-3-clause
1,610
## Automatically adapted for scipy Oct 21, 2005 by """ Integration routines ==================== Methods for Integrating Functions given function object. quad -- General purpose integration. dblquad -- General purpose double integration. tplquad -- General purpose triple integration. fixed_quad -- Integrate func(x) using Gaussian quadrature of order n. quadrature -- Integrate with given tolerance using Gaussian quadrature. romberg -- Integrate func using Romberg integration. Methods for Integrating Functions given fixed samples. trapz -- Use trapezoidal rule to compute integral from samples. cumtrapz -- Use trapezoidal rule to cumulatively compute integral. simps -- Use Simpson's rule to compute integral from samples. romb -- Use Romberg Integration to compute integral from (2**k + 1) evenly-spaced samples. See the special module's orthogonal polynomials (special) for Gaussian quadrature roots and weights for other weighting factors and regions. Interface to numerical integrators of ODE systems. odeint -- General integration of ordinary differential equations. ode -- Integrate ODE using VODE and ZVODE routines. """ postpone_import = 1
stefanv/scipy3
scipy/integrate/info.py
Python
bsd-3-clause
1,311
DEPS = [ 'recipe_engine/platform', ]
shishkander/recipes-py
recipe_modules/path/__init__.py
Python
bsd-3-clause
39
""" 1.0 version of the API. """ from sharrock.descriptors import Descriptor version = '1.0' class MultiversionExample(Descriptor): """ This is the first version of this particular function. """
Axilent/sharrock
sharrock_multiversion_example/descriptors/one.py
Python
bsd-3-clause
208
#!/usr/bin/env python number = 0 for a in range(999,99,-1): for b in range(999,99,-1): pal=a*b if (str(pal) == str(pal)[::-1]): if (pal > number): number = pal break print(number)
GT-IDEaS/SkillsWorkshop2017
Week01/Problem04/cruiz_04.py
Python
bsd-3-clause
242
from __future__ import unicode_literals from unittest import TestCase from shutil import rmtree from tempfile import mkdtemp from os import makedirs from os.path import join, exists, dirname from awsfabrictasks.s3.api import dirlist_absfilenames from awsfabrictasks.s3.api import localpath_to_s3path from awsfabrictasks.s3.api import s3path_to_localpath def makefile(tempdir, path, contents): path = join(tempdir, *path.split('/')) if not exists(dirname(path)): makedirs(dirname(path)) open(path, 'wb').write(contents.encode('utf-8')) return path class TestDirlistAbsfilenames(TestCase): def setUp(self): self.tempdir = mkdtemp() files = (('hello/world.txt', 'Hello world'), ('test.py', 'print "test"'), ('hello/cruel/world.txt', 'Cruel?')) self.paths = set() for path, contents in files: realpath = makefile(self.tempdir, path, contents) self.paths.add(realpath) def tearDown(self): rmtree(self.tempdir) def test_dirlist_absfilenames(self): result = dirlist_absfilenames(self.tempdir) self.assertEquals(result, self.paths) class TestLocalpathToS3path(TestCase): def setUp(self): self.tempdir = mkdtemp() makefile(self.tempdir, 'hello/world.txt', '') def tearDown(self): rmtree(self.tempdir) def test_localpath_to_s3path(self): s3path = localpath_to_s3path(self.tempdir, join(self.tempdir, 'hello/world.txt'), 'my/test') self.assertEquals(s3path, 'my/test/hello/world.txt') def test_s3path_to_localpath(self): localpath = s3path_to_localpath('mydir/', 'mydir/hello/world.txt', join(self.tempdir, 'my', 'test')) self.assertEquals(localpath, join(self.tempdir, 'my', 'test', 'hello', 'world.txt'))
espenak/awsfabrictasks
awsfabrictasks/tests/s3/test_api.py
Python
bsd-3-clause
1,830
""" Classes for using robotic or other hardware using Topographica. This module contains several classes for constructing robotics interfaces to Topographica simulations. It includes modules that read input from or send output to robot devices, and a (quasi) real-time simulation object that attempts to maintain a correspondence between simulation time and real time. This module requires the PlayerStage robot interface system (from playerstage.sourceforge.net), and the playerrobot module for high-level communications with Player robots. """ import Image import ImageOps from math import pi,cos,sin import param from topo.base.simulation import EventProcessor from imagen.image import GenericImage from playerrobot import CameraDevice, PTZDevice class CameraImage(GenericImage): """ An image pattern generator that gets its image from a Player camera device. """ camera = param.ClassSelector(CameraDevice,default=None,doc=""" An instance of playerrobot.CameraDevice to be used to generate images.""") def __init__(self,**params): super(CameraImage,self).__init__(**params) self._image = None def _get_image(self,params): self._decode_image(*self.camera.image) return True def _decode_image(self,fmt,w,h,bpp,fdiv,data): if fmt==1: self._image = Image.new('L',(w,h)) self._image.fromstring(data,'raw') else: # JPALERT: if not grayscale, then assume color. This # should be expanded for other modes. rgb_im = Image.new('RGB',(w,h)) rgb_im.fromstring(data,'raw') self._image = ImageOps.grayscale(rgb_im) class CameraImageQueued(CameraImage): """ A version of CameraImage that gets the image from the camera's image queue, rather than directly from the camera object. Using queues is necessary when running the playerrobot in a separate process without shared memory. When getting an image, this pattern generator will fetch every image in the image queue and use the most recent as the current pattern. """ def _get_image(self,params): im_spec = None if self._image is None: # if we don't have an image then block until we get one im_spec = self.camera.image_queue.get() self.camera.image_queue.task_done() # Make sure we clear the image queue and get the most recent image. while not self.camera.image_queue.empty(): im_spec = self.camera.image_queue.get_nowait() self.camera.image_queue.task_done() if im_spec: # If we got a new image from the queue, then # construct a PIL image from it. self._decode_image(*im_spec) return True else: return False class PTZ(EventProcessor): """ Pan/Tilt/Zoom control. This event processor takes input events on its 'Saccade' input port in the form of (amplitude,direction) saccade commands (as produced by the topo.sheet.saccade.SaccadeController class) and appropriately servoes the attached PTZ object. There is not currently any dynamic zoom control, though the static zoom level can be set as a parameter. """ ptz = param.ClassSelector(PTZDevice,default=None,doc=""" An instance of playerrobot.PTZDevice to be controlled.""") zoom = param.Number(default=120,bounds=(0,None),doc=""" Desired FOV width in degrees.""") speed = param.Number(default=200,bounds=(0,None),doc=""" Desired max pan/tilt speed in deg/sec.""") invert_amplitude = param.Boolean(default=False,doc=""" Invert the sense of the amplitude signal, in order to get the appropriate ipsi-/contralateral sense of saccades.""") dest_ports = ["Saccade"] src_ports = ["State"] def start(self): pass def input_event(self,conn,data): if conn.dest_port == "Saccade": # the data should be (amplitude,direction) amplitude,direction = data self.shift(amplitude,direction) def shift(self,amplitude,direction): self.debug("Executing shift, amplitude=%.2f, direction=%.2f"%(amplitude,direction)) if self.invert_amplitude: amplitude *= -1 # if the amplitude is negative, invert the direction, so up is still up. if amplitude < 0: direction *= -1 angle = direction * pi/180 pan,tilt,zoom = self.ptz.state_deg pan += amplitude * cos(angle) tilt += amplitude * sin(angle) self.ptz.set_ws_deg(pan,tilt,self.zoom,self.speed,self.speed) ## self.ptz.cmd_queue.put_nowait(('set_ws_deg', ## (pan,tilt,self.zoom,self.speed,self.speed)))
ioam/topographica
topo/hardware/robotics.py
Python
bsd-3-clause
4,844
# -*- coding: utf-8 -*- import os from os import path as op import shutil import glob import warnings from nose.tools import assert_true, assert_raises from numpy.testing import assert_equal, assert_allclose from mne import concatenate_raws, read_bem_surfaces from mne.commands import (mne_browse_raw, mne_bti2fiff, mne_clean_eog_ecg, mne_compute_proj_ecg, mne_compute_proj_eog, mne_coreg, mne_kit2fiff, mne_make_scalp_surfaces, mne_maxfilter, mne_report, mne_surf2bem, mne_watershed_bem, mne_compare_fiff, mne_flash_bem, mne_show_fiff, mne_show_info) from mne.datasets import testing, sample from mne.io import read_raw_fif from mne.utils import (run_tests_if_main, _TempDir, requires_mne, requires_PIL, requires_mayavi, requires_tvtk, requires_freesurfer, ArgvSetter, slow_test, ultra_slow_test) base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data') raw_fname = op.join(base_dir, 'test_raw.fif') subjects_dir = op.join(testing.data_path(download=False), 'subjects') warnings.simplefilter('always') def check_usage(module, force_help=False): """Helper to ensure we print usage""" args = ('--help',) if force_help else () with ArgvSetter(args) as out: try: module.run() except SystemExit: pass assert_true('Usage: ' in out.stdout.getvalue()) @slow_test def test_browse_raw(): """Test mne browse_raw.""" check_usage(mne_browse_raw) def test_bti2fiff(): """Test mne bti2fiff.""" check_usage(mne_bti2fiff) def test_compare_fiff(): """Test mne compare_fiff.""" check_usage(mne_compare_fiff) def test_show_fiff(): """Test mne compare_fiff.""" check_usage(mne_show_fiff) with ArgvSetter((raw_fname,)): mne_show_fiff.run() @requires_mne def test_clean_eog_ecg(): """Test mne clean_eog_ecg.""" check_usage(mne_clean_eog_ecg) tempdir = _TempDir() raw = concatenate_raws([read_raw_fif(f) for f in [raw_fname, raw_fname, raw_fname]]) raw.info['bads'] = ['MEG 2443'] use_fname = op.join(tempdir, op.basename(raw_fname)) raw.save(use_fname) with ArgvSetter(('-i', use_fname, '--quiet')): mne_clean_eog_ecg.run() fnames = glob.glob(op.join(tempdir, '*proj.fif')) assert_true(len(fnames) == 2) # two projs fnames = glob.glob(op.join(tempdir, '*-eve.fif')) assert_true(len(fnames) == 3) # raw plus two projs @slow_test def test_compute_proj_ecg_eog(): """Test mne compute_proj_ecg/eog.""" for fun in (mne_compute_proj_ecg, mne_compute_proj_eog): check_usage(fun) tempdir = _TempDir() use_fname = op.join(tempdir, op.basename(raw_fname)) bad_fname = op.join(tempdir, 'bads.txt') with open(bad_fname, 'w') as fid: fid.write('MEG 2443\n') shutil.copyfile(raw_fname, use_fname) with ArgvSetter(('-i', use_fname, '--bad=' + bad_fname, '--rej-eeg', '150')): fun.run() fnames = glob.glob(op.join(tempdir, '*proj.fif')) assert_true(len(fnames) == 1) fnames = glob.glob(op.join(tempdir, '*-eve.fif')) assert_true(len(fnames) == 1) def test_coreg(): """Test mne coreg.""" assert_true(hasattr(mne_coreg, 'run')) def test_kit2fiff(): """Test mne kit2fiff.""" # Can't check check_usage(mne_kit2fiff, force_help=True) @requires_tvtk @testing.requires_testing_data def test_make_scalp_surfaces(): """Test mne make_scalp_surfaces.""" check_usage(mne_make_scalp_surfaces) # Copy necessary files to avoid FreeSurfer call tempdir = _TempDir() surf_path = op.join(subjects_dir, 'sample', 'surf') surf_path_new = op.join(tempdir, 'sample', 'surf') os.mkdir(op.join(tempdir, 'sample')) os.mkdir(surf_path_new) subj_dir = op.join(tempdir, 'sample', 'bem') os.mkdir(subj_dir) shutil.copy(op.join(surf_path, 'lh.seghead'), surf_path_new) orig_fs = os.getenv('FREESURFER_HOME', None) if orig_fs is not None: del os.environ['FREESURFER_HOME'] cmd = ('-s', 'sample', '--subjects-dir', tempdir) os.environ['_MNE_TESTING_SCALP'] = 'true' dense_fname = op.join(subj_dir, 'sample-head-dense.fif') medium_fname = op.join(subj_dir, 'sample-head-medium.fif') try: with ArgvSetter(cmd, disable_stdout=False, disable_stderr=False): assert_raises(RuntimeError, mne_make_scalp_surfaces.run) os.environ['FREESURFER_HOME'] = tempdir # don't actually use it mne_make_scalp_surfaces.run() assert_true(op.isfile(dense_fname)) assert_true(op.isfile(medium_fname)) assert_raises(IOError, mne_make_scalp_surfaces.run) # no overwrite finally: if orig_fs is not None: os.environ['FREESURFER_HOME'] = orig_fs else: del os.environ['FREESURFER_HOME'] del os.environ['_MNE_TESTING_SCALP'] # actually check the outputs head_py = read_bem_surfaces(dense_fname) assert_equal(len(head_py), 1) head_py = head_py[0] head_c = read_bem_surfaces(op.join(subjects_dir, 'sample', 'bem', 'sample-head-dense.fif'))[0] assert_allclose(head_py['rr'], head_c['rr']) def test_maxfilter(): """Test mne maxfilter.""" check_usage(mne_maxfilter) with ArgvSetter(('-i', raw_fname, '--st', '--movecomp', '--linefreq', '60', '--trans', raw_fname)) as out: with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') os.environ['_MNE_MAXFILTER_TEST'] = 'true' try: mne_maxfilter.run() finally: del os.environ['_MNE_MAXFILTER_TEST'] assert_true(len(w) == 1) for check in ('maxfilter', '-trans', '-movecomp'): assert_true(check in out.stdout.getvalue(), check) @slow_test @requires_mayavi @requires_PIL @testing.requires_testing_data def test_report(): """Test mne report.""" check_usage(mne_report) tempdir = _TempDir() use_fname = op.join(tempdir, op.basename(raw_fname)) shutil.copyfile(raw_fname, use_fname) with ArgvSetter(('-p', tempdir, '-i', use_fname, '-d', subjects_dir, '-s', 'sample', '--no-browser', '-m', '30')): mne_report.run() fnames = glob.glob(op.join(tempdir, '*.html')) assert_true(len(fnames) == 1) def test_surf2bem(): """Test mne surf2bem.""" check_usage(mne_surf2bem) @ultra_slow_test @requires_freesurfer @testing.requires_testing_data def test_watershed_bem(): """Test mne watershed bem.""" check_usage(mne_watershed_bem) # Copy necessary files to tempdir tempdir = _TempDir() mridata_path = op.join(subjects_dir, 'sample', 'mri') mridata_path_new = op.join(tempdir, 'sample', 'mri') os.mkdir(op.join(tempdir, 'sample')) os.mkdir(mridata_path_new) if op.exists(op.join(mridata_path, 'T1')): shutil.copytree(op.join(mridata_path, 'T1'), op.join(mridata_path_new, 'T1')) if op.exists(op.join(mridata_path, 'T1.mgz')): shutil.copyfile(op.join(mridata_path, 'T1.mgz'), op.join(mridata_path_new, 'T1.mgz')) with ArgvSetter(('-d', tempdir, '-s', 'sample', '-o'), disable_stdout=False, disable_stderr=False): mne_watershed_bem.run() @ultra_slow_test @requires_freesurfer @sample.requires_sample_data def test_flash_bem(): """Test mne flash_bem.""" check_usage(mne_flash_bem, force_help=True) # Using the sample dataset subjects_dir = op.join(sample.data_path(download=False), 'subjects') # Copy necessary files to tempdir tempdir = _TempDir() mridata_path = op.join(subjects_dir, 'sample', 'mri') mridata_path_new = op.join(tempdir, 'sample', 'mri') os.makedirs(op.join(mridata_path_new, 'flash')) os.makedirs(op.join(tempdir, 'sample', 'bem')) shutil.copyfile(op.join(mridata_path, 'T1.mgz'), op.join(mridata_path_new, 'T1.mgz')) shutil.copyfile(op.join(mridata_path, 'brain.mgz'), op.join(mridata_path_new, 'brain.mgz')) # Copy the available mri/flash/mef*.mgz files from the dataset files = glob.glob(op.join(mridata_path, 'flash', 'mef*.mgz')) for infile in files: shutil.copyfile(infile, op.join(mridata_path_new, 'flash', op.basename(infile))) # Test mne flash_bem with --noconvert option # (since there are no DICOM Flash images in dataset) currdir = os.getcwd() with ArgvSetter(('-d', tempdir, '-s', 'sample', '-n'), disable_stdout=False, disable_stderr=False): mne_flash_bem.run() os.chdir(currdir) def test_show_info(): """Test mne show_info.""" check_usage(mne_show_info) with ArgvSetter((raw_fname,)): mne_show_info.run() run_tests_if_main()
nicproulx/mne-python
mne/commands/tests/test_commands.py
Python
bsd-3-clause
9,165
from sqlagg.columns import SimpleColumn from sqlagg.filters import BETWEEN, IN, EQ from corehq.apps.reports.datatables import DataTablesHeader, DataTablesColumn from corehq.apps.reports.sqlreport import SqlData, DataFormatter, TableDataFormat, DatabaseColumn from custom.tdh.reports import UNNECESSARY_FIELDS, CHILD_HEADERS_MAP, INFANT_HEADERS_MAP, NEWBORN_HEADERS_MAP def merge_rows(classification_sql_data, enroll_sql_data, treatment_sql_data): result = [] classification_case_id_index = [id for id, column in enumerate(classification_sql_data.columns) if column.slug == 'case_id'][0] enroll_case_id_index = [id for id, column in enumerate(enroll_sql_data.columns) if column.slug == 'case_id'][0] treatment_case_id_index = [id for id, column in enumerate(treatment_sql_data.columns) if column.slug == 'case_id'][0] enroll_map = {row[enroll_case_id_index]: row for row in enroll_sql_data.rows} treatment_map = {row[treatment_case_id_index]: row[:treatment_case_id_index] + row[treatment_case_id_index + 1:] for row in treatment_sql_data.rows} for classification_row in classification_sql_data.rows: row = classification_row[:classification_case_id_index] + classification_row[ classification_case_id_index + 1:] classification_case_id = classification_row[classification_case_id_index] if classification_case_id in enroll_map: row = enroll_map[classification_case_id] + row else: row = [classification_case_id] + ['' for i in range(len(enroll_sql_data.headers) - 1)] + row if classification_case_id in treatment_map: row.extend(treatment_map[classification_case_id]) else: row.extend(['' for i in range(len(treatment_sql_data.headers))]) result.append(row) return result class BaseSqlData(SqlData): datatables = True no_value = {'sort_key': 0, 'html': 0} def header(self, header): if self.__class__.__name__[0] == 'N': return NEWBORN_HEADERS_MAP[header] if header in NEWBORN_HEADERS_MAP else header elif self.__class__.__name__[0] == 'I': return INFANT_HEADERS_MAP[header] if header in INFANT_HEADERS_MAP else header else: return CHILD_HEADERS_MAP[header] if header in CHILD_HEADERS_MAP else header @property def filters(self): filters = [BETWEEN("date", "startdate", "enddate"), EQ('domain', 'domain')] if self.config['emw']: filters.append(IN('user_id', 'emw')) return filters @property def group_by(self): return [] @property def columns(self): columns = [] for k in self.group_by: if k in ['zscore_hfa', 'zscore_wfa', 'zscore_wfh', 'mean_hfa', 'mean_wfa', 'mean_wfh']: columns.append(DatabaseColumn(k, SimpleColumn(k), format_fn=lambda x: "%.2f" % float(x if x else 0))) else: columns.append(DatabaseColumn(k, SimpleColumn(k))) return columns @property def headers(self): return [DataTablesColumn(self.header(k)) for k in self.group_by[1:]] @property def rows(self): formatter = DataFormatter(TableDataFormat(self.columns, no_value=self.no_value)) return list(formatter.format(self.data, keys=self.keys, group_by=self.group_by)) class InfantConsultationHistory(BaseSqlData): table_name = "fluff_TDHInfantClassificationFluff" slug = 'infant_consultation_history' title = 'Infant Consultation History' @property def columns(self): return EnrollChild().columns + InfantClassification(config=self.config).columns + InfantTreatment().columns @property def headers(self): return DataTablesHeader( *EnrollChild().headers + InfantClassification(config=self.config).headers + InfantTreatment().headers) @property def group_by(self): return EnrollChild().group_by + InfantClassification( config=self.config).group_by + InfantTreatment().group_by @property def rows(self): return merge_rows(InfantClassification(config=self.config), EnrollChild(), InfantTreatment()) class InfantConsultationHistoryComplete(BaseSqlData): table_name = "fluff_TDHInfantClassificationFluff" slug = 'infant_consultation_history' title = 'Infant Consultation History' @property def columns(self): return EnrollChild().columns + InfantClassificationExtended( config=self.config).columns + InfantTreatmentExtended().columns @property def headers(self): return DataTablesHeader(*EnrollChild().headers + InfantClassificationExtended( config=self.config).headers + InfantTreatmentExtended().headers) @property def group_by(self): return EnrollChild().group_by + InfantClassificationExtended( config=self.config).group_by + InfantTreatmentExtended().group_by @property def rows(self): return merge_rows(InfantClassificationExtended(config=self.config), EnrollChild(), InfantTreatmentExtended()) class NewbornConsultationHistory(BaseSqlData): table_name = "fluff_TDHNewbornClassificationFluff" slug = 'newborn_consultation_history' title = 'Newborn Consultation History' @property def columns(self): return EnrollChild().columns + NewbornClassification( config=self.config).columns + NewbornTreatment().columns @property def headers(self): return DataTablesHeader(*EnrollChild().headers + NewbornClassification( config=self.config).headers + NewbornTreatment().headers) @property def group_by(self): return EnrollChild().group_by + NewbornClassification( config=self.config).group_by + NewbornTreatment().group_by @property def rows(self): return merge_rows(NewbornClassification(config=self.config), EnrollChild(), NewbornTreatment()) class NewbornConsultationHistoryComplete(BaseSqlData): table_name = "fluff_TDHNewbornClassificationFluff" slug = 'newborn_consultation_history' title = 'Newborn Consultation History' @property def columns(self): return EnrollChild().columns + NewbornClassificationExtended( config=self.config).columns + NewbornTreatmentExtended().columns @property def headers(self): return DataTablesHeader(*EnrollChild().headers + NewbornClassificationExtended( config=self.config).headers + NewbornTreatmentExtended().headers) @property def group_by(self): return EnrollChild().group_by + NewbornClassificationExtended( config=self.config).group_by + NewbornTreatmentExtended().group_by @property def rows(self): return merge_rows(NewbornClassificationExtended(config=self.config), EnrollChild(), NewbornTreatmentExtended()) class ChildConsultationHistory(BaseSqlData): table_name = "fluff_TDHChildClassificationFluff" slug = 'newborn_consultation_history' title = 'Newborn Consultation History' @property def columns(self): return EnrollChild().columns + ChildClassification(config=self.config).columns + ChildTreatment().columns @property def headers(self): return DataTablesHeader( *EnrollChild().headers + ChildClassification(config=self.config).headers + ChildTreatment().headers) @property def group_by(self): return EnrollChild().group_by + ChildClassification( config=self.config).group_by + ChildTreatment().group_by @property def rows(self): return merge_rows(ChildClassification(config=self.config), EnrollChild(), ChildTreatment()) class ChildConsultationHistoryComplete(BaseSqlData): table_name = "fluff_TDHChildClassificationFluff" slug = 'newborn_consultation_history' title = 'Newborn Consultation History' @property def columns(self): return EnrollChild().columns + ChildClassificationExtended( config=self.config).columns + ChildTreatmentExtended().columns @property def headers(self): return DataTablesHeader( *EnrollChild().headers + ChildClassificationExtended( config=self.config).headers + ChildTreatmentExtended().headers) @property def group_by(self): return EnrollChild().group_by + ChildClassificationExtended( config=self.config).group_by + ChildTreatmentExtended().group_by @property def rows(self): return merge_rows(ChildClassificationExtended(config=self.config), EnrollChild(), ChildTreatmentExtended()) class InfantClassification(BaseSqlData): table_name = "fluff_TDHInfantClassificationFluff" slug = 'infant_classification' title = 'Infant Classification' @property def group_by(self): return ['case_id', 'bcg', 'tablet_login_id', 'author_id', 'author_name', 'visit_date', 'consultation_type', 'number', 'weight', 'height', 'muac', 'temp', 'zscore_hfa', 'mean_hfa', 'zscore_wfa', 'mean_wfa', 'zscore_wfh', 'mean_wfh', 'classification_deshydratation', 'classification_diahree', 'classification_infection', 'classification_malnutrition', 'classification_vih', 'inf_bac_qa', 'inf_bac_freq_resp', 'inf_bac_qc', 'inf_bac_qd', 'inf_bac_qe', 'inf_bac_qf', 'inf_bac_qg', 'inf_bac_qh', 'inf_bac_qj', 'inf_bac_qk', 'inf_bac_ql', 'inf_bac_qm', 'diarrhee_qa', 'alimentation_qa', 'alimentation_qb', 'alimentation_qc', 'alimentation_qd', 'alimentation_qf', 'alimentation_qg', 'alimentation_qh', 'vih_qa', 'vih_qb', 'vih_qc', 'vih_qd', 'vih_qe', 'vih_qf', 'vih_qg', 'vih_qh', 'vih_qi', 'vih_qj', 'vih_qk', 'vih_ql', 'other_comments'] class InfantClassificationExtended(BaseSqlData): table_name = "fluff_TDHInfantClassificationFluff" slug = 'infant_classification' title = 'Infant Classification' @property def columns(self): from custom.tdh.models import TDHInfantClassificationFluff return [DatabaseColumn(k, SimpleColumn(k)) for k in TDHInfantClassificationFluff().__dict__['_obj'].keys() if k not in UNNECESSARY_FIELDS] @property def headers(self): from custom.tdh.models import TDHInfantClassificationFluff return [DataTablesColumn(self.header(k)) for k in TDHInfantClassificationFluff().__dict__['_obj'].keys() if k not in UNNECESSARY_FIELDS + ['case_id']] @property def group_by(self): from custom.tdh.models import TDHInfantClassificationFluff return [k for k in TDHInfantClassificationFluff().__dict__['_obj'].keys() if k not in UNNECESSARY_FIELDS] class NewbornClassification(BaseSqlData): table_name = "fluff_TDHNewbornClassificationFluff" slug = 'newborn_classification' title = 'Newborn Classification' @property def group_by(self): return ['case_id', 'bcg', 'tablet_login_id', 'author_id', 'author_name', 'visit_date', 'consultation_type', 'number', 'weight', 'height', 'muac', 'temp', 'zscore_hfa', 'mean_hfa', 'zscore_wfa', 'mean_wfa', 'zscore_wfh', 'mean_wfh', 'classification_infection', 'classification_malnutrition', 'classification_occular', 'classification_poids', 'classification_vih', 'inf_bac_qa', 'inf_bac_qb', 'inf_bac_freq_resp', 'inf_bac_qd', 'inf_bac_qe', 'inf_bac_qf', 'inf_bac_qg', 'inf_bac_qh', 'inf_bac_qi', 'inf_bac_qj', 'poids_qa', 'inf_occ_qa', 'vih_qa', 'vih_qb', 'vih_qc', 'vih_qd', 'vih_qe', 'vih_qf', 'vih_qg', 'alimentation_qa', 'alimentation_qb', 'alimentation_qd', 'alimentation_qf', 'alimentation_qg', 'other_comments'] class NewbornClassificationExtended(BaseSqlData): table_name = "fluff_TDHNewbornClassificationFluff" slug = 'newborn_classification' title = 'Newborn Classification' @property def columns(self): from custom.tdh.models import TDHNewbornClassificationFluff return [DatabaseColumn(k, SimpleColumn(k)) for k in TDHNewbornClassificationFluff().__dict__['_obj'].keys() if k not in UNNECESSARY_FIELDS] @property def headers(self): from custom.tdh.models import TDHNewbornClassificationFluff return [DataTablesColumn(self.header(k)) for k in TDHNewbornClassificationFluff().__dict__['_obj'].keys() if k not in UNNECESSARY_FIELDS + ['case_id']] @property def group_by(self): from custom.tdh.models import TDHNewbornClassificationFluff return [k for k in TDHNewbornClassificationFluff().__dict__['_obj'].keys() if k not in UNNECESSARY_FIELDS] class ChildClassification(BaseSqlData): table_name = "fluff_TDHChildClassificationFluff" slug = 'child_consultation_history' title = 'Child Consultation History' @property def group_by(self): return ['case_id', 'bcg', 'tablet_login_id', 'author_id', 'author_name', 'visit_date', 'consultation_type', 'number', 'weight', 'height', 'muac', 'temp', 'zscore_hfa', 'mean_hfa', 'zscore_wfa', 'mean_wfa', 'zscore_wfh', 'mean_wfh', 'measles_1', 'measles_2', 'opv_0', 'opv_1', 'opv_2', 'opv_3', 'penta_1', 'penta_2', 'penta_3', 'pneumo_1', 'pneumo_2', 'pneumo_3', 'rotavirus_1', 'rotavirus_2', 'rotavirus_3', 'yf', 'classification_anemie', 'classification_deshydratation', 'classification_diahree', 'classification_dysenterie', 'classification_malnutrition', 'classification_oreille', 'classification_paludisme', 'classification_pneumonie', 'classification_rougeole', 'classification_vih', 'classifications_graves', 'boire', 'vomit', 'convulsions_passe', 'lethargie', 'convulsions_present', 'toux_presence', 'toux_presence_duree', 'freq_resp', 'tirage', 'stridor', 'diarrhee', 'diarrhee_presence', 'diarrhee_presence_duree', 'sang_selles', 'conscience_agitation', 'yeux_enfonces', 'soif', 'pli_cutane', 'fievre_presence', 'fievre_presence_duree', 'fievre_presence_longue', 'tdr', 'urines_foncees', 'saignements_anormaux', 'raideur_nuque', 'ictere', 'choc', 'eruption_cutanee', 'ecoulement_nasal', 'yeux_rouge', 'ecoulement_oculaire', 'ulcerations', 'cornee', 'oreille', 'oreille_probleme', 'oreille_douleur', 'oreille_ecoulement', 'oreille_ecoulement_duree', 'oreille_gonflement', 'paleur_palmaire', 'oedemes', 'test_appetit', 'serologie_enfant', 'test_enfant', 'pneumonie_recidivante', 'diarrhee_dernierement', 'candidose_buccale', 'hypertrophie_ganglions_lymphatiques', 'augmentation_glande_parotide', 'test_mere', 'serologie_mere', 'other_comments'] class ChildClassificationExtended(BaseSqlData): table_name = "fluff_TDHChildClassificationFluff" slug = 'child_classification' title = 'Child Classification' @property def columns(self): from custom.tdh.models import TDHChildClassificationFluff return [DatabaseColumn(k, SimpleColumn(k)) for k in TDHChildClassificationFluff().__dict__['_obj'].keys() if k not in UNNECESSARY_FIELDS] @property def headers(self): from custom.tdh.models import TDHChildClassificationFluff return [DataTablesColumn(self.header(k)) for k in TDHChildClassificationFluff().__dict__['_obj'].keys() if k not in UNNECESSARY_FIELDS + ['case_id']] @property def group_by(self): from custom.tdh.models import TDHChildClassificationFluff return [k for k in TDHChildClassificationFluff().__dict__['_obj'].keys() if k not in UNNECESSARY_FIELDS] class EnrollChild(BaseSqlData): table_name = "fluff_TDHEnrollChildFluff" slug = 'enroll_child' title = 'Enroll Child' @property def filters(self): return [] @property def group_by(self): return ['case_id', 'dob', 'sex', 'village'] @property def headers(self): return [DataTablesColumn(self.header(k)) for k in self.group_by] class EnrollChildExtended(BaseSqlData): table_name = "fluff_TDHEnrollChildFluff" slug = 'enroll_child' title = 'Enroll Child' @property def filters(self): return [] @property def columns(self): from custom.tdh.models import TDHEnrollChildFluff return [DatabaseColumn(k, SimpleColumn(k)) for k in TDHEnrollChildFluff().__dict__['_obj'].keys() if k not in UNNECESSARY_FIELDS] @property def headers(self): from custom.tdh.models import TDHEnrollChildFluff return [DataTablesColumn(self.header(k)) for k in TDHEnrollChildFluff().__dict__['_obj'].keys() if k not in UNNECESSARY_FIELDS + ['case_id']] @property def group_by(self): from custom.tdh.models import TDHEnrollChildFluff return [k for k in TDHEnrollChildFluff().__dict__['_obj'].keys() if k not in UNNECESSARY_FIELDS] class InfantTreatment(BaseSqlData): table_name = "fluff_TDHInfantTreatmentFluff" slug = 'infant_treatment' title = 'Infant Treatment' @property def filters(self): return [] @property def group_by(self): return ['case_id', 'infection_grave_treat_0', 'infection_grave_treat_1', 'infection_grave_treat_2', 'infection_grave_no_ref_treat_0', 'infection_grave_no_ref_treat_1', 'infection_grave_no_ref_treat_2', 'infection_grave_no_ref_treat_5', 'infection_locale_treat_0', 'infection_locale_treat_1', 'maladie_grave_treat_0', 'maladie_grave_treat_1'] class InfantTreatmentExtended(BaseSqlData): table_name = "fluff_TDHInfantTreatmentFluff" slug = 'infant_treatment' title = 'Infant Treatment' @property def filters(self): return [] @property def columns(self): from custom.tdh.models import TDHInfantTreatmentFluff return [DatabaseColumn(k, SimpleColumn(k)) for k in TDHInfantTreatmentFluff().__dict__['_obj'].keys() if k not in UNNECESSARY_FIELDS] @property def headers(self): from custom.tdh.models import TDHInfantTreatmentFluff return [DataTablesColumn(self.header(k)) for k in TDHInfantTreatmentFluff().__dict__['_obj'].keys() if k not in UNNECESSARY_FIELDS + ['case_id']] @property def group_by(self): from custom.tdh.models import TDHInfantTreatmentFluff return [k for k in TDHInfantTreatmentFluff().__dict__['_obj'].keys() if k not in UNNECESSARY_FIELDS] class NewbornTreatment(BaseSqlData): table_name = "fluff_TDHNewbornTreatmentFluff" slug = 'newborn_treatment' title = 'Newborn Treatment' @property def filters(self): return [] @property def group_by(self): return ['case_id', 'infection_grave_treat_0', 'infection_grave_treat_1', 'infection_grave_no_ref_treat_0', 'infection_grave_no_ref_treat_1', 'infection_locale_treat_0', 'infection_locale_treat_1', 'incapable_nourrir_treat_0', 'incapable_nourrir_treat_1'] class NewbornTreatmentExtended(BaseSqlData): table_name = "fluff_TDHNewbornTreatmentFluff" slug = 'newborn_treatment' title = 'Newborn Treatment' @property def filters(self): return [] @property def columns(self): from custom.tdh.models import TDHNewbornTreatmentFluff return [DatabaseColumn(k, SimpleColumn(k)) for k in TDHNewbornTreatmentFluff().__dict__['_obj'].keys() if k not in UNNECESSARY_FIELDS] @property def headers(self): from custom.tdh.models import TDHNewbornTreatmentFluff return [DataTablesColumn(self.header(k)) for k in TDHNewbornTreatmentFluff().__dict__['_obj'].keys() if k not in UNNECESSARY_FIELDS + ['case_id']] @property def group_by(self): from custom.tdh.models import TDHNewbornTreatmentFluff return [k for k in TDHNewbornTreatmentFluff().__dict__['_obj'].keys() if k not in UNNECESSARY_FIELDS] class ChildTreatment(BaseSqlData): table_name = "fluff_TDHChildTreatmentFluff" slug = 'child_treatment' title = 'Child Treatment' @property def filters(self): return [] @property def group_by(self): return ['case_id', 'pneumonie_grave_treat_0', 'pneumonie_grave_treat_1', 'pneumonie_grave_treat_4', 'pneumonie_grave_no_ref_treat_0', 'pneumonie_grave_no_ref_treat_1', 'pneumonie_grave_no_ref_treat_3', 'pneumonie_grave_no_ref_treat_5', 'pneumonie_grave_no_ref_treat_6', 'pneumonie_treat_0', 'pneumonie_treat_1', 'deshydratation_severe_pas_grave_perfusion_treat_3', 'deshydratation_severe_pas_grave_perfusion_treat_4', 'deshydratation_severe_pas_grave_perfusion_treat_5', 'deshydratation_severe_pas_grave_perfusion_treat_6', 'deshydratation_severe_pas_grave_perfusion_treat_8', 'deshydratation_severe_pas_grave_perfusion_treat_9', 'deshydratation_severe_pas_grave_perfusion_treat_10', 'deshydratation_severe_pas_grave_perfusion_treat_11', 'deshydratation_severe_pas_grave_perfusion_treat_15', 'deshydratation_severe_pas_grave_perfusion_treat_16', 'deshydratation_severe_pas_grave_sng_treat_2', 'deshydratation_severe_pas_grave_sng_treat_3', 'deshydratation_severe_pas_grave_sans_sng_sans_perfusion_treat_3', 'deshydratation_severe_pas_grave_sans_sng_sans_perfusion_treat_4', 'signes_deshydratation_treat_0', 'signes_deshydratation_treat_3', 'pas_deshydratation_treat_1', 'dysenterie_treat_1', 'dysenterie_treat_2', 'dysenterie_treat_3', 'diahree_persistante_treat_0', 'diahree_persistante_treat_1', 'paludisme_grave_treat_0', 'paludisme_grave_treat_1', 'paludisme_grave_treat_2', 'paludisme_grave_treat_4', 'paludisme_grave_treat_5', 'paludisme_grave_treat_7', 'paludisme_grave_no_ref_treat_0', 'paludisme_grave_no_ref_treat_1', 'paludisme_grave_no_ref_treat_2', 'paludisme_grave_no_ref_treat_3', 'paludisme_grave_no_ref_treat_5', 'paludisme_grave_no_ref_treat_6', 'paludisme_simple_treat_1', 'paludisme_simple_treat_2', 'paludisme_simple_treat_3', 'paludisme_simple_treat_4', 'paludisme_simple_treat_6', 'rougeole_compliquee_treat_0', 'rougeole_compliquee_treat_1', 'rougeole_compliquee_treat_2', 'rougeole_compliquee_treat_3', 'rougeole_complications_treat_0', 'rougeole_complications_treat_1', 'rougeole_treat_0', 'rougeole_treat_1', 'rougeole_treat_2', 'rougeole_treat_3', 'antecedent_rougeole_treat_0', 'antecedent_rougeole_treat_1', 'mastoidite_treat_0', 'mastoidite_treat_1', 'mastoidite_treat_2', 'infection_aigue_oreille_treat_0', 'infection_aigue_oreille_treat_1', 'anemie_grave_treat_0', 'anemie_treat_0', 'anemie_treat_1', 'anemie_treat_2', 'anemie_treat_3', 'anemie_treat_4', 'anemie_treat_5', 'anemie_treat_6', 'mass_treat_2', 'mass_treat_3', 'mass_treat_4', 'mass_treat_5', 'mass_treat_7', 'mass_treat_8', 'mam_treat_2', 'mam_treat_3', 'mam_treat_5', 'mam_treat_6', 'mam_treat_7', 'pas_malnutrition_treat_2', 'pas_malnutrition_treat_3', 'vih_symp_confirmee_treat_1', 'vih_symp_confirmee_treat_2', 'vih_symp_confirmee_treat_4', 'vih_confirmee_treat_1', 'vih_confirmee_treat_2', 'vih_confirmee_treat_4', 'vih_symp_probable_treat_1', 'vih_symp_probable_treat_2', 'vih_symp_probable_treat_3', 'vih_possible_treat_1', 'vih_possible_treat_2', 'vih_possible_treat_3', 'paludisme_grave_tdr_negatif_treat_0', 'paludisme_grave_tdr_negatif_treat_1', 'paludisme_grave_tdr_negatif_treat_3', 'paludisme_grave_tdr_negatif_treat_4', 'paludisme_grave_tdr_negatif_treat_6', 'vitamine_a'] class ChildTreatmentExtended(BaseSqlData): table_name = "fluff_TDHChildTreatmentFluff" slug = 'child_treatment' title = 'Child Treatment' @property def filters(self): return [] @property def columns(self): from custom.tdh.models import TDHChildTreatmentFluff return [DatabaseColumn(k, SimpleColumn(k)) for k in TDHChildTreatmentFluff().__dict__['_obj'].keys() if k not in UNNECESSARY_FIELDS] @property def headers(self): from custom.tdh.models import TDHChildTreatmentFluff return [DataTablesColumn(self.header(k)) for k in TDHChildTreatmentFluff().__dict__['_obj'].keys() if k not in UNNECESSARY_FIELDS + ['case_id']] @property def group_by(self): from custom.tdh.models import TDHChildTreatmentFluff return [k for k in TDHChildTreatmentFluff().__dict__['_obj'].keys() if k not in UNNECESSARY_FIELDS]
puttarajubr/commcare-hq
custom/tdh/sqldata.py
Python
bsd-3-clause
25,506
#!/usr/bin/python -u # coding=utf-8 """ Generate certificates via Let's Encrypt """ import re from subprocess import check_output, check_call from os import path import click from colorama import Fore import pexpect # Extract the file/challenge from the LetsEncrypt output e.g. CREX = re.compile( ".well-known\/acme-challenge\/(\S+) before continuing:\s+(\S+)", re.MULTILINE ) MODULE_CONFIG = 'module.yaml' # The file in our project root APPENGINE_URL = ("https://console.cloud.google.com/" + "appengine/settings/certificates") def get_default_email(): """Get a default user email from the git config.""" return check_output(['git', 'config', 'user.email']).strip() @click.command() @click.option('--appid', '-A', prompt=True) @click.option('--test/--no-test', default=True) @click.option('--domains', '-d', multiple=True) @click.option('--app-path', default=path.abspath(path.dirname(__file__))) @click.option('--acme-path', required=True) @click.option('--email', default=get_default_email) def gen(test, appid, domains, acme_path, app_path, email): """Regenerate the keys. Run all the steps, being: 1. Call Let's Encrypt 2. Capture the challenges from the LE output 3. Deploy the AppEngine module 4. Print Cert. to terminal """ common_name = domains[0] # noqa sans = " ".join(domains) # noqa click.echo(""" APPID: {appid} Test: {test} Common Name: {common_name} Domain(s): {sans} App Path: {app_path} ACME path: {acme_path} User Email: {email} """.format(**{ k: Fore.YELLOW + str(v) + Fore.RESET for k, v in locals().items() })) CERT_PATH = acme_path KEY_PATH = acme_path CHAIN_PATH = acme_path FULLCHAIN_PATH = acme_path CONFIG_DIR = acme_path WORK_DIR = path.join(acme_path, 'tmp') LOG_DIR = path.join(acme_path, 'logs') cmd = [ 'letsencrypt', 'certonly', '--rsa-key-size', '2048', '--manual', '--agree-tos', '--manual-public-ip-logging-ok', '--text', '--cert-path', CERT_PATH, '--key-path', KEY_PATH, '--chain-path', CHAIN_PATH, '--fullchain-path', FULLCHAIN_PATH, '--config-dir', CONFIG_DIR, '--work-dir', WORK_DIR, '--logs-dir', LOG_DIR, '--email', email, '--domain', ",".join(domains), ] if test: cmd.append('--staging') print("$ " + Fore.MAGENTA + " ".join(cmd) + Fore.RESET) le = pexpect.spawn(" ".join(cmd)) out = '' idx = le.expect(["Press ENTER", "Select the appropriate number"]) if idx == 1: # 1: Keep the existing certificate for now # 2: Renew & replace the cert (limit ~5 per 7 days) print le.before + le.after le.interact("\r") print "..." le.sendline("") if le.expect(["Press ENTER", pexpect.EOF]) == 1: # EOF - User chose to not update certs. return out += le.before # Hit "Enter" for each domain; we extract all challenges at the end; # We stop just at the last "Enter to continue" so we can publish # our challenges on AppEngine. for i in xrange(len(domains) - 1): le.sendline("") le.expect("Press ENTER") out += le.before # The challenges will be in `out` in the form of CREX challenges = CREX.findall(out) if not challenges: raise Exception("Expected challenges from the output") for filename, challenge in challenges: filepath = path.join(app_path, "challenges", filename) print "[%s]\n\t%s\n\t=> %s" % ( Fore.BLUE + filepath + Fore.RESET, Fore.GREEN + filename + Fore.RESET, Fore.YELLOW + challenge + Fore.RESET ) with open(filepath, 'w') as f: f.write(challenge) # Deploy to AppEngine cmd = [ 'appcfg.py', 'update', '-A', appid, path.join(app_path, MODULE_CONFIG) ] print("$ " + Fore.MAGENTA + " ".join(cmd) + Fore.RESET) check_call(cmd) # After deployment, continue the Let's Encrypt (which has been waiting # on the last domain) le.sendline("") le.expect(pexpect.EOF) le.close() if le.exitstatus: print Fore.RED + "\nletsencrypt failure: " + Fore.RESET + le.before return print "\nletsencrypt complete.", le.before # Convert the key to a format AppEngine can use # LE seems to choose the domain at random, so we have to pluck it. CPATH_REX = ( "Your certificate and chain have been saved at (.+)fullchain\.pem\." ) outstr = le.before.replace("\n", "").replace('\r', '') results = re.search(CPATH_REX, outstr, re.MULTILINE) LIVE_PATH = "".join(results.group(1).split()) CHAIN_PATH = path.join(LIVE_PATH, "fullchain.pem") PRIVKEY_PATH = path.join(LIVE_PATH, "privkey.pem") cmd = [ 'openssl', 'rsa', '-in', PRIVKEY_PATH, '-outform', 'pem', '-inform', 'pem' ] print "$ " + Fore.MAGENTA + " ".join(cmd) + Fore.RESET priv_text = check_output(cmd) with open(CHAIN_PATH, 'r') as cp: pub_text = cp.read() print """ --- Private Key --- at {PRIVKEY_PATH} (the above file must be converted with {cmd} to a format usable by AppEngine, the result of which will be as follows) {priv_text} --- Public Key Chain --- at {CHAIN_PATH} {pub_text} ✄ Copy the above into the respective fields of AppEngine at https://console.cloud.google.com/appengine/settings/certificates """.format( PRIVKEY_PATH=PRIVKEY_PATH, priv_text=Fore.RED + priv_text + Fore.RESET, CHAIN_PATH=CHAIN_PATH, pub_text=Fore.BLUE + pub_text + Fore.RESET, cmd=Fore.MAGENTA + " ".join(cmd) + Fore.RESET, ) if __name__ == '__main__': gen()
SB-Technology-Holdings-International/WateringWebClient
regen.py
Python
bsd-3-clause
5,942
# Copyright (c) 2012, CyberPoint International, LLC # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the CyberPoint International, LLC nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL CYBERPOINT INTERNATIONAL, LLC BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ''' This module provides tools to represent and handle dynamic Bayesian networks with discrete conditional probability distributions. Dynamic Bayesian networks represent systems that change over time. This means that each node in the BN has a finite number of outcomes, the distribution over which is dependent on the outcomes of the node's parents and on the outcomes of the Bayesian network at the previous time interval. In other words, the Bayesian network changes over time according to Bayesian conditional probability rules. ''' import random import sys from orderedskeleton import OrderedSkeleton class DynDiscBayesianNetwork(OrderedSkeleton): ''' This class represents a dynamic Bayesian network with discrete CPD tables. It contains the attributes *V*, *E*, *initial_Vdata*, and *twotbn_Vdata*, and the method *randomsample*. ''' def __init__(self, orderedskeleton=None, nodedata=None): ''' This class can be called either with or without arguments. If it is called without arguments, none of its attributes are instantiated and it is left to the user to instantiate them manually. If it is called with arguments, the attributes will be loaded directly from the inputs. The arguments must be (in order): 1. *orderedskeleton* -- An instance of the :doc:`OrderedSkeleton <orderedskeleton>` or :doc:`GraphSkeleton <graphskeleton>` (as long as it's ordered) class. 2. *nodedata* -- An instance of the :doc:`NodeData <nodedata>` class. If these arguments are present, all attributes of the class (*V*, *E*, and *Vdata*) will be automatically copied from the graph skeleton and node data inputs. This class requires that the *initial_Vdata* and *twotbn_Vdata* attributes get loaded with a dictionary with node data of the following fomat:: { "initial_Vdata": { "<vertex 1>": <dict containing vertex 1 data>, ... "<vertex n>": <dict containing vertex n data> } "twotbn_Vdata": { "<vertex 1>": <dict containing vertex 1 data>, ... "<vertex n>": <dict containing vertex n data> } } In particular, the ``"parents"`` attribute of ``"twotbn_Vdata"`` has the following format:: "twotbn_Vdata": { "vertex": { "parents": ["past_<vertex 1>",...,"past_<vertex n>", "vertex 1",..., "vertex m"] ... } } Where vertices 1 through *n* come from the previous time interval, and vertices 1 through *m* come from the current time interval. Note that additional keys besides the ones listed are possible in the dict of each vertex. For a full example see :doc:`unittestdyndict`. Upon loading, the class will also check that the keys of *Vdata* correspond to the vertices in *V*. ''' if (orderedskeleton != None and nodedata != None): try: self.V = orderedskeleton.V '''A list of the names of the vertices.''' self.E = orderedskeleton.E '''A list of [origin, destination] pairs of vertices that make edges.''' self.initial_Vdata = nodedata.initial_Vdata '''A dictionary containing CPD data for the Bayesian network at time interval 0.''' self.twotbn_Vdata = nodedata.twotbn_Vdata '''A dictionary containing CPD data for the Bayesian network for time intervals greater than 0.''' except: raise Exception, "Inputs were malformed; first arg must contain V and E attributes and second arg must contain initial_Vdata and twotbn_Vdata attributes." # check that inputs match up assert (sorted(self.V) == sorted(self.initial_Vdata.keys())), ("initial_Vdata vertices did not match vertex data:", self.V, self.Vdata.keys()) assert (sorted(self.V) == sorted(self.twotbn_Vdata.keys())), ("twotbn_Vdata vertices did not match vertex data:", self.V, self.Vdata.keys()) def randomsample(self, n): ''' This method produces a sequence of length *n* containing one dynamic Bayesian network sample over *n* time units. In other words, the first entry of the sequence is a sample from the initial Bayesian network, and each subsequent entry is sampled from the Bayesian network conditioned on the outcomes of its predecessor. This function requires a specific dictionary format in Vdata, as shown in :doc:`dynamic discrete bayesian network<unittestdyndict>`. This function takes the following arguments: 1. *n* -- The number of time units over which to sample (thus also the length of the sequence produced) And returns: A list of *n* random samples, each conditioned on its precedessor, each a dict containing (vertex: value) pairs. Usage example: this would produce a sequence of 10 samples, one per time step, each conditioned on its predecessor:: import json from graphskeleton import GraphSkeleton from dyndiscbayesiannetwork import DynDiscBayesianNetwork path = "../tests/unittestdyndict.txt" # an input file f = open(path, 'r') g = eval(f.read()) d = DynDiscBayesianNetwork() skel = GraphSkeleton() skel.V = g["V"] skel.E = g["E"] skel.toporder() d.V = skel.V d.E = skel.E d.initial_Vdata = g["initial_Vdata"] d.twotbn_Vdata = g["twotbn_Vdata"] seq = d.randomsample(10) print json.dumps(seq, indent=2) ''' assert (isinstance(n, int) and n > 0), "Argument must be a positive integer." random.seed() seq = [] for t in range(n): outcome = dict() for vertex in self.V: outcome[vertex] = "default" def assignnode(s): # find entry in dictionary and store if t == 0: Vdataentry = self.initial_Vdata[s] else: Vdataentry = self.twotbn_Vdata[s] p = Vdataentry["parents"] if (not p): distribution = Vdataentry["cprob"] else: # find parents from previous time step (if necessary) pvalues = [] for parent in p: if parent[:5] == "past_": pvalues.append(str(seq[t-1][parent[5:]])) else: pvalues.append(str(outcome[parent])) for pvalue in pvalues: assert pvalue != 'default', "Graph skeleton was not topologically ordered." distribution = Vdataentry["cprob"][str(pvalues)] # choose interval rand = random.random() lbound = 0 ubound = 0 for interval in range(int(Vdataentry["numoutcomes"])): ubound += distribution[interval] if (lbound <= rand and rand < ubound): rindex = interval break else: lbound = ubound return Vdataentry["vals"][rindex] for s in self.V: if (outcome[s] == "default"): outcome[s] = assignnode(s) seq.append(outcome) return seq
ririw/libpgm
libpgm/dyndiscbayesiannetwork.py
Python
bsd-3-clause
9,472
from django.conf.urls import patterns, url from django.views.generic import RedirectView from . import views APP_SLUGS = { 'chrono': 'Chrono', 'face_value': 'Face_Value', 'podcasts': 'Podcasts', 'roller': 'Roller', 'webfighter': 'Webfighter', 'generalnotes': 'General_Notes', 'rtcamera': 'rtcamera' } def redirect_doc(uri, request=None): view = RedirectView.as_view( url='https://developer.mozilla.org/docs%s' % uri) return view(request) if request else view redirect_patterns = patterns('', url('^docs/firefox_os_guideline$', redirect_doc('/Web/Apps/Design'), name='ecosystem.ffos_guideline'), url('^docs/responsive_design$', redirect_doc('/Web_Development/Mobile/Responsive_design'), name='ecosystem.responsive_design'), url('^docs/patterns$', redirect_doc('/Web/Apps/Design/Responsive_Navigation_Patterns'), name='ecosystem.design_patterns'), url('^docs/review$', redirect_doc('/Web/Apps/Publishing/Marketplace_review_criteria'), name='ecosystem.publish_review'), url('^docs/deploy$', redirect_doc('/Mozilla/Marketplace/Options'), name='ecosystem.publish_deploy'), url('^docs/hosted$', redirect_doc('/Mozilla/Marketplace/Publish_options#Hosted_apps'), name='ecosystem.publish_hosted'), url('^docs/submission$', redirect_doc('/Web/Apps/Publishing/Submitting_an_app'), name='ecosystem.publish_submit'), url('^docs/packaged$', redirect_doc('/Web/Apps/Developing/Packaged_apps'), name='ecosystem.publish_packaged'), url('^docs/intro_apps$', redirect_doc('/Web/Apps/Quickstart/Build/Intro_to_open_web_apps'), name='ecosystem.build_intro'), url('^docs/firefox_os$', redirect_doc('/Mozilla/Firefox_OS'), name='ecosystem.build_ffos'), url('^docs/manifests$', redirect_doc('/Web/Apps/FAQs/About_app_manifests'), name='ecosystem.build_manifests'), url('^docs/apps_offline$', redirect_doc('/Web/Apps/Offline_apps'), name='ecosystem.build_apps_offline'), url('^docs/game_apps$', redirect_doc('/Web/Apps/Developing/Games'), name='ecosystem.build_game_apps'), url('^docs/mobile_developers$', redirect_doc('/Web/Apps/Quickstart/Build/For_mobile_developers'), name='ecosystem.build_mobile_developers'), url('^docs/web_developers$', redirect_doc('/Web/Apps/Quickstart/Build/For_Web_developers'), name='ecosystem.build_web_developers'), url('^docs/firefox_os_simulator$', redirect_doc('/Tools/Firefox_OS_Simulator'), name='ecosystem.firefox_os_simulator'), url('^docs/payments$', redirect_doc('/Web/Apps/Quickstart/Build/Payments'), name='ecosystem.build_payments'), url('^docs/concept$', redirect_doc('/Web/Apps/Quickstart/Design/Concept_A_great_app'), name='ecosystem.design_concept'), url('^docs/fundamentals$', redirect_doc('/Web/Apps/Quickstart/Design/Design_Principles'), name='ecosystem.design_fundamentals'), url('^docs/ui_guidelines$', redirect_doc('/Apps/Design'), name='ecosystem.design_ui'), url('^docs/quick_start$', redirect_doc('/Web/Apps/Quickstart/Build/Your_first_app'), name='ecosystem.build_quick'), url('^docs/reference_apps$', redirect_doc('/Web/Apps/Reference_apps'), name='ecosystem.build_reference'), url('^docs/apps/(?P<page>\w+)?$', lambda req, page: redirect_doc('/Web/Apps/Reference_apps/' + APP_SLUGS.get(page, ''), req), name='ecosystem.apps_documentation'), url('^docs/payments/status$', redirect_doc('/Mozilla/Marketplace/Payments_Status'), name='ecosystem.publish_payments'), url('^docs/tools$', redirect_doc('/Web/Apps/Quickstart/Build/App_tools'), name='ecosystem.build_tools'), url('^docs/app_generator$', redirect_doc('/Web/Apps/Developing/App_templates'), name='ecosystem.build_app_generator'), url('^docs/app_manager$', redirect_doc('/Mozilla/Firefox_OS/Using_the_App_Manager'), name='ecosystem.app_manager'), url('^docs/dev_tools$', redirect_doc('/Tools'), name='ecosystem.build_dev_tools'), # Doesn't start with docs/, but still redirects to MDN. url('^dev_phone$', redirect_doc('/Mozilla/Firefox_OS/Developer_phone_guide/Flame'), name='ecosystem.dev_phone'), ) urlpatterns = redirect_patterns + patterns('', url('^$', views.landing, name='ecosystem.landing'), url('^partners$', views.partners, name='ecosystem.partners'), url('^support$', views.support, name='ecosystem.support'), url('^docs/badges$', views.publish_badges, name='ecosystem.publish_badges') )
andymckay/zamboni
mkt/ecosystem/urls.py
Python
bsd-3-clause
4,853
from __future__ import division import argparse import multiprocessing import numpy as np import PIL import chainer import chainer.functions as F import chainer.links as L from chainer.optimizer_hooks import WeightDecay from chainer import serializers from chainer import training from chainer.training import extensions import chainermn from chainercv.chainer_experimental.datasets.sliceable import TransformDataset from chainercv.chainer_experimental.training.extensions import make_shift from chainercv.links.model.fpn.misc import scale_img from chainercv import transforms from chainercv.datasets import coco_instance_segmentation_label_names from chainercv.datasets import COCOInstanceSegmentationDataset from chainercv.links import MaskRCNNFPNResNet101 from chainercv.links import MaskRCNNFPNResNet50 from chainercv.datasets import coco_bbox_label_names from chainercv.datasets import COCOBboxDataset from chainercv.links import FasterRCNNFPNResNet101 from chainercv.links import FasterRCNNFPNResNet50 from chainercv.links.model.fpn import bbox_head_loss_post from chainercv.links.model.fpn import bbox_head_loss_pre from chainercv.links.model.fpn import mask_head_loss_post from chainercv.links.model.fpn import mask_head_loss_pre from chainercv.links.model.fpn import rpn_loss # https://docs.chainer.org/en/stable/tips.html#my-training-process-gets-stuck-when-using-multiprocessiterator try: import cv2 cv2.setNumThreads(0) except ImportError: pass class TrainChain(chainer.Chain): def __init__(self, model): super(TrainChain, self).__init__() with self.init_scope(): self.model = model def forward(self, imgs, bboxes, labels, masks=None): B = len(imgs) pad_size = np.array( [im.shape[1:] for im in imgs]).max(axis=0) pad_size = ( np.ceil( pad_size / self.model.stride) * self.model.stride).astype(int) x = np.zeros( (len(imgs), 3, pad_size[0], pad_size[1]), dtype=np.float32) for i, img in enumerate(imgs): _, H, W = img.shape x[i, :, :H, :W] = img x = self.xp.array(x) bboxes = [self.xp.array(bbox) for bbox in bboxes] labels = [self.xp.array(label) for label in labels] sizes = [img.shape[1:] for img in imgs] with chainer.using_config('train', False): hs = self.model.extractor(x) rpn_locs, rpn_confs = self.model.rpn(hs) anchors = self.model.rpn.anchors(h.shape[2:] for h in hs) rpn_loc_loss, rpn_conf_loss = rpn_loss( rpn_locs, rpn_confs, anchors, sizes, bboxes) rois, roi_indices = self.model.rpn.decode( rpn_locs, rpn_confs, anchors, x.shape) rois = self.xp.vstack([rois] + bboxes) roi_indices = self.xp.hstack( [roi_indices] + [self.xp.array((i,) * len(bbox)) for i, bbox in enumerate(bboxes)]) rois, roi_indices = self.model.bbox_head.distribute(rois, roi_indices) rois, roi_indices, head_gt_locs, head_gt_labels = bbox_head_loss_pre( rois, roi_indices, self.model.bbox_head.std, bboxes, labels) head_locs, head_confs = self.model.bbox_head(hs, rois, roi_indices) head_loc_loss, head_conf_loss = bbox_head_loss_post( head_locs, head_confs, roi_indices, head_gt_locs, head_gt_labels, B) mask_loss = 0 if masks is not None: # For reducing unnecessary CPU/GPU copy, `masks` is kept in CPU. pad_masks = [ np.zeros( (mask.shape[0], pad_size[0], pad_size[1]), dtype=np.bool) for mask in masks] for i, mask in enumerate(masks): _, H, W = mask.shape pad_masks[i][:, :H, :W] = mask masks = pad_masks mask_rois, mask_roi_indices, gt_segms, gt_mask_labels =\ mask_head_loss_pre( rois, roi_indices, masks, bboxes, head_gt_labels, self.model.mask_head.segm_size) n_roi = sum([len(roi) for roi in mask_rois]) if n_roi > 0: segms = self.model.mask_head(hs, mask_rois, mask_roi_indices) mask_loss = mask_head_loss_post( segms, mask_roi_indices, gt_segms, gt_mask_labels, B) else: # Compute dummy variables to complete the computational graph mask_rois[0] = self.xp.array([[0, 0, 1, 1]], dtype=np.float32) mask_roi_indices[0] = self.xp.array([0], dtype=np.int32) segms = self.model.mask_head(hs, mask_rois, mask_roi_indices) mask_loss = 0 * F.sum(segms) loss = (rpn_loc_loss + rpn_conf_loss + head_loc_loss + head_conf_loss + mask_loss) chainer.reporter.report({ 'loss': loss, 'loss/rpn/loc': rpn_loc_loss, 'loss/rpn/conf': rpn_conf_loss, 'loss/bbox_head/loc': head_loc_loss, 'loss/bbox_head/conf': head_conf_loss, 'loss/mask_head': mask_loss}, self) return loss class Transform(object): def __init__(self, min_size, max_size, mean): self.min_size = min_size self.max_size = max_size self.mean = mean def __call__(self, in_data): if len(in_data) == 4: img, mask, label, bbox = in_data else: img, bbox, label = in_data # Flipping img, params = transforms.random_flip( img, x_random=True, return_param=True) x_flip = params['x_flip'] bbox = transforms.flip_bbox( bbox, img.shape[1:], x_flip=x_flip) # Scaling and mean subtraction img, scale = scale_img( img, self.min_size, self.max_size) img -= self.mean bbox = bbox * scale if len(in_data) == 4: mask = transforms.flip(mask, x_flip=x_flip) mask = transforms.resize( mask.astype(np.float32), img.shape[1:], interpolation=PIL.Image.NEAREST).astype(np.bool) return img, bbox, label, mask else: return img, bbox, label def converter(batch, device=None): # do not send data to gpu (device is ignored) return tuple(list(v) for v in zip(*batch)) def main(): parser = argparse.ArgumentParser() parser.add_argument( '--model', choices=('mask_rcnn_fpn_resnet50', 'mask_rcnn_fpn_resnet101', 'faster_rcnn_fpn_resnet50', 'faster_rcnn_fpn_resnet101'), default='faster_rcnn_fpn_resnet50') parser.add_argument('--batchsize', type=int, default=16) parser.add_argument('--iteration', type=int, default=90000) parser.add_argument('--step', type=int, nargs='*', default=[60000, 80000]) parser.add_argument('--out', default='result') parser.add_argument('--resume') args = parser.parse_args() # https://docs.chainer.org/en/stable/chainermn/tutorial/tips_faqs.html#using-multiprocessiterator if hasattr(multiprocessing, 'set_start_method'): multiprocessing.set_start_method('forkserver') p = multiprocessing.Process() p.start() p.join() comm = chainermn.create_communicator('pure_nccl') device = comm.intra_rank if args.model == 'faster_rcnn_fpn_resnet50': mode = 'bbox' model = FasterRCNNFPNResNet50( n_fg_class=len(coco_bbox_label_names), pretrained_model='imagenet') elif args.model == 'faster_rcnn_fpn_resnet101': mode = 'bbox' model = FasterRCNNFPNResNet101( n_fg_class=len(coco_bbox_label_names), pretrained_model='imagenet') elif args.model == 'mask_rcnn_fpn_resnet50': mode = 'instance_segmentation' model = MaskRCNNFPNResNet50( n_fg_class=len(coco_instance_segmentation_label_names), pretrained_model='imagenet') elif args.model == 'mask_rcnn_fpn_resnet101': mode = 'instance_segmentation' model = MaskRCNNFPNResNet101( n_fg_class=len(coco_instance_segmentation_label_names), pretrained_model='imagenet') model.use_preset('evaluate') train_chain = TrainChain(model) chainer.cuda.get_device_from_id(device).use() train_chain.to_gpu() if mode == 'bbox': train = TransformDataset( COCOBboxDataset(year='2017', split='train'), ('img', 'bbox', 'label'), Transform(800, 1333, model.extractor.mean)) elif mode == 'instance_segmentation': train = TransformDataset( COCOInstanceSegmentationDataset(split='train', return_bbox=True), ('img', 'bbox', 'label', 'mask'), Transform(800, 1333, model.extractor.mean)) if comm.rank == 0: indices = np.arange(len(train)) else: indices = None indices = chainermn.scatter_dataset(indices, comm, shuffle=True) train = train.slice[indices] train_iter = chainer.iterators.MultiprocessIterator( train, args.batchsize // comm.size, n_processes=args.batchsize // comm.size, shared_mem=100 * 1000 * 1000 * 4) optimizer = chainermn.create_multi_node_optimizer( chainer.optimizers.MomentumSGD(), comm) optimizer.setup(train_chain) optimizer.add_hook(WeightDecay(0.0001)) model.extractor.base.conv1.disable_update() model.extractor.base.res2.disable_update() for link in model.links(): if isinstance(link, L.BatchNormalization): link.disable_update() n_iteration = args.iteration * 16 / args.batchsize updater = training.updaters.StandardUpdater( train_iter, optimizer, converter=converter, device=device) trainer = training.Trainer( updater, (n_iteration, 'iteration'), args.out) @make_shift('lr') def lr_schedule(trainer): base_lr = 0.02 * args.batchsize / 16 warm_up_duration = 500 warm_up_rate = 1 / 3 iteration = trainer.updater.iteration if iteration < warm_up_duration: rate = warm_up_rate \ + (1 - warm_up_rate) * iteration / warm_up_duration else: rate = 1 for step in args.step: if iteration >= step * 16 / args.batchsize: rate *= 0.1 return base_lr * rate trainer.extend(lr_schedule) if comm.rank == 0: log_interval = 10, 'iteration' trainer.extend(extensions.LogReport(trigger=log_interval)) trainer.extend(extensions.observe_lr(), trigger=log_interval) trainer.extend(extensions.PrintReport( ['epoch', 'iteration', 'lr', 'main/loss', 'main/loss/rpn/loc', 'main/loss/rpn/conf', 'main/loss/bbox_head/loc', 'main/loss/bbox_head/conf', 'main/loss/mask_head' ]), trigger=log_interval) trainer.extend(extensions.ProgressBar(update_interval=10)) trainer.extend(extensions.snapshot(), trigger=(10000, 'iteration')) trainer.extend( extensions.snapshot_object( model, 'model_iter_{.updater.iteration}'), trigger=(n_iteration, 'iteration')) if args.resume: serializers.load_npz(args.resume, trainer, strict=False) trainer.run() if __name__ == '__main__': main()
chainer/chainercv
examples/fpn/train_multi.py
Python
mit
11,453
__author__ = 'cgonzalez'
carlgonz/u-fit
src/python/u_fit/modules/__init__.py
Python
mit
25
from __future__ import unicode_literals, division, absolute_import from builtins import * # pylint: disable=unused-import, redefined-builtin from flexget import options, plugin from flexget.event import event from flexget.terminal import console from flexget.manager import Session try: from flexget.plugins.internal.api_t411 import (T411Proxy) except: raise plugin.DependencyError(issued_by='cli_series', missing='api_t411', message='Torrent411 commandline interface not loaded') def do_cli(manager, options): """ Dispach cli action :param manager: :param options: :return: """ if options.t411_action == 'list-cats': print_categories(parent_category_name=options.category) elif options.t411_action == 'add-auth': add_credential(username=options.username, password=options.password) elif options.t411_action == 'list-auth': pass elif options.t411_action == 'list-terms': print_terms(category_name=options.category, term_type_name=options.type) def add_credential(username, password): """ Add (or update) credential into database :param username: :param password: :return: """ proxy = T411Proxy() is_new = proxy.add_credential(username=username, password=password) if is_new: console('Credential successfully added') else: console('Credential successfully updated') def print_terms(category_name=None, term_type_name=None): proxy = T411Proxy() proxy.set_credential() formatting_main = '%-60s %-5s %-5s' formatting_sub = ' %-55s %-5s %-5s' console(formatting_main % ('Name', 'PID', 'ID')) if term_type_name: console("Not yet implemented !") else: with Session() as session: categories = proxy.find_categories(category_name=category_name, is_sub_category=True, session=session) for category in categories: console(formatting_main % (category.name, category.parent_id, category.id)) for term_type in category.term_types: console(formatting_main % (term_type.name, '', term_type.id)) for term in term_type.terms: console(formatting_sub % (term.name, term_type.id, term.id)) def print_categories(parent_category_name=None): """ Print category and its sub-categories :param parent_category_name: if None, all categories will be displayed :return: """ proxy = T411Proxy() proxy.set_credential() with Session() as session: if parent_category_name is None: categories = proxy.main_categories(session=session) else: categories = proxy.find_categories(parent_category_name, session=session) formatting_main = '%-30s %-5s %-5s' formatting_sub = ' %-25s %-5s %-5s' console(formatting_main % ('Category name', 'PID', 'ID')) for category in categories: console(formatting_main % (category.name, category.parent_id, category.id)) for sub_category in category.sub_categories: console(formatting_sub % (sub_category.name, sub_category.parent_id, sub_category.id)) @event('options.register') def register_parser_arguments(): # Register the command parser = options.register_command('t411', do_cli, help='view and manipulate the Torrent411 plugin database') # Set up our subparsers action_parsers = parser.add_subparsers(title='actions', metavar='<action>', dest='t411_action') auth_parser = action_parsers.add_parser('add-auth', help='authorize Flexget to access your Torrent411 account') auth_parser.add_argument('username', metavar='<username>', help='Your t411 username') auth_parser.add_argument('password', metavar='<password>', help='Your t411 password') list_categories_parser = action_parsers.add_parser('list-cats', help='list available categories on Torrent411') list_categories_parser.add_argument('category', nargs='?', metavar='<category>', help='limit list to all, main or sub categories (default: %(default)s)') list_terms = action_parsers.add_parser('list-terms', help='list available terms usable on Torrent411') list_terms.add_argument('--category', help='show terms only for this category') list_terms.add_argument('--type', help='show terms only for this term type')
oxc/Flexget
flexget/plugins/cli/t411.py
Python
mit
4,537
# -*- coding: utf-8 -*- """ .. module:: admin """ from django.contrib import admin from apps.volontulo.models import Offer from apps.volontulo.models import Organization from apps.volontulo.models import UserProfile admin.site.register(Offer) admin.site.register(Organization) admin.site.register(UserProfile)
magul/volontulo
backend/apps/volontulo/admin.py
Python
mit
315
"""SCons.Tool.packaging.msi The msi packager. """ # # Copyright (c) 2001 - 2014 The SCons Foundation # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. __revision__ = "src/engine/SCons/Tool/packaging/msi.py 2014/07/05 09:42:21 garyo" import os import SCons from SCons.Action import Action from SCons.Builder import Builder from xml.dom.minidom import * from xml.sax.saxutils import escape from SCons.Tool.packaging import stripinstallbuilder # # Utility functions # def convert_to_id(s, id_set): """ Some parts of .wxs need an Id attribute (for example: The File and Directory directives. The charset is limited to A-Z, a-z, digits, underscores, periods. Each Id must begin with a letter or with a underscore. Google for "CNDL0015" for information about this. Requirements: * the string created must only contain chars from the target charset. * the string created must have a minimal editing distance from the original string. * the string created must be unique for the whole .wxs file. Observation: * There are 62 chars in the charset. Idea: * filter out forbidden characters. Check for a collision with the help of the id_set. Add the number of the number of the collision at the end of the created string. Furthermore care for a correct start of the string. """ charset = 'ABCDEFGHIJKLMNOPQRSTUVWXYabcdefghijklmnopqrstuvwxyz0123456789_.' if s[0] in '0123456789.': s += '_'+s id = [c for c in s if c in charset] # did we already generate an id for this file? try: return id_set[id][s] except KeyError: # no we did not so initialize with the id if id not in id_set: id_set[id] = { s : id } # there is a collision, generate an id which is unique by appending # the collision number else: id_set[id][s] = id + str(len(id_set[id])) return id_set[id][s] def is_dos_short_file_name(file): """ examine if the given file is in the 8.3 form. """ fname, ext = os.path.splitext(file) proper_ext = len(ext) == 0 or (2 <= len(ext) <= 4) # the ext contains the dot proper_fname = file.isupper() and len(fname) <= 8 return proper_ext and proper_fname def gen_dos_short_file_name(file, filename_set): """ see http://support.microsoft.com/default.aspx?scid=kb;en-us;Q142982 These are no complete 8.3 dos short names. The ~ char is missing and replaced with one character from the filename. WiX warns about such filenames, since a collision might occur. Google for "CNDL1014" for more information. """ # guard this to not confuse the generation if is_dos_short_file_name(file): return file fname, ext = os.path.splitext(file) # ext contains the dot # first try if it suffices to convert to upper file = file.upper() if is_dos_short_file_name(file): return file # strip forbidden characters. forbidden = '."/[]:;=, ' fname = [c for c in fname if c not in forbidden] # check if we already generated a filename with the same number: # thisis1.txt, thisis2.txt etc. duplicate, num = not None, 1 while duplicate: shortname = "%s%s" % (fname[:8-len(str(num))].upper(),\ str(num)) if len(ext) >= 2: shortname = "%s%s" % (shortname, ext[:4].upper()) duplicate, num = shortname in filename_set, num+1 assert( is_dos_short_file_name(shortname) ), 'shortname is %s, longname is %s' % (shortname, file) filename_set.append(shortname) return shortname def create_feature_dict(files): """ X_MSI_FEATURE and doc FileTag's can be used to collect files in a hierarchy. This function collects the files into this hierarchy. """ dict = {} def add_to_dict( feature, file ): if not SCons.Util.is_List( feature ): feature = [ feature ] for f in feature: if f not in dict: dict[ f ] = [ file ] else: dict[ f ].append( file ) for file in files: if hasattr( file, 'PACKAGING_X_MSI_FEATURE' ): add_to_dict(file.PACKAGING_X_MSI_FEATURE, file) elif hasattr( file, 'PACKAGING_DOC' ): add_to_dict( 'PACKAGING_DOC', file ) else: add_to_dict( 'default', file ) return dict def generate_guids(root): """ generates globally unique identifiers for parts of the xml which need them. Component tags have a special requirement. Their UUID is only allowed to change if the list of their contained resources has changed. This allows for clean removal and proper updates. To handle this requirement, the uuid is generated with an md5 hashing the whole subtree of a xml node. """ from hashlib import md5 # specify which tags need a guid and in which attribute this should be stored. needs_id = { 'Product' : 'Id', 'Package' : 'Id', 'Component' : 'Guid', } # find all XMl nodes matching the key, retrieve their attribute, hash their # subtree, convert hash to string and add as a attribute to the xml node. for (key,value) in needs_id.items(): node_list = root.getElementsByTagName(key) attribute = value for node in node_list: hash = md5(node.toxml()).hexdigest() hash_str = '%s-%s-%s-%s-%s' % ( hash[:8], hash[8:12], hash[12:16], hash[16:20], hash[20:] ) node.attributes[attribute] = hash_str def string_wxsfile(target, source, env): return "building WiX file %s"%( target[0].path ) def build_wxsfile(target, source, env): """ compiles a .wxs file from the keywords given in env['msi_spec'] and by analyzing the tree of source nodes and their tags. """ file = open(target[0].abspath, 'w') try: # Create a document with the Wix root tag doc = Document() root = doc.createElement( 'Wix' ) root.attributes['xmlns']='http://schemas.microsoft.com/wix/2003/01/wi' doc.appendChild( root ) filename_set = [] # this is to circumvent duplicates in the shortnames id_set = {} # this is to circumvent duplicates in the ids # Create the content build_wxsfile_header_section(root, env) build_wxsfile_file_section(root, source, env['NAME'], env['VERSION'], env['VENDOR'], filename_set, id_set) generate_guids(root) build_wxsfile_features_section(root, source, env['NAME'], env['VERSION'], env['SUMMARY'], id_set) build_wxsfile_default_gui(root) build_license_file(target[0].get_dir(), env) # write the xml to a file file.write( doc.toprettyxml() ) # call a user specified function if 'CHANGE_SPECFILE' in env: env['CHANGE_SPECFILE'](target, source) except KeyError, e: raise SCons.Errors.UserError( '"%s" package field for MSI is missing.' % e.args[0] ) # # setup function # def create_default_directory_layout(root, NAME, VERSION, VENDOR, filename_set): """ Create the wix default target directory layout and return the innermost directory. We assume that the XML tree delivered in the root argument already contains the Product tag. Everything is put under the PFiles directory property defined by WiX. After that a directory with the 'VENDOR' tag is placed and then a directory with the name of the project and its VERSION. This leads to the following TARGET Directory Layout: C:\<PFiles>\<Vendor>\<Projectname-Version>\ Example: C:\Programme\Company\Product-1.2\ """ doc = Document() d1 = doc.createElement( 'Directory' ) d1.attributes['Id'] = 'TARGETDIR' d1.attributes['Name'] = 'SourceDir' d2 = doc.createElement( 'Directory' ) d2.attributes['Id'] = 'ProgramFilesFolder' d2.attributes['Name'] = 'PFiles' d3 = doc.createElement( 'Directory' ) d3.attributes['Id'] = 'VENDOR_folder' d3.attributes['Name'] = escape( gen_dos_short_file_name( VENDOR, filename_set ) ) d3.attributes['LongName'] = escape( VENDOR ) d4 = doc.createElement( 'Directory' ) project_folder = "%s-%s" % ( NAME, VERSION ) d4.attributes['Id'] = 'MY_DEFAULT_FOLDER' d4.attributes['Name'] = escape( gen_dos_short_file_name( project_folder, filename_set ) ) d4.attributes['LongName'] = escape( project_folder ) d1.childNodes.append( d2 ) d2.childNodes.append( d3 ) d3.childNodes.append( d4 ) root.getElementsByTagName('Product')[0].childNodes.append( d1 ) return d4 # # mandatory and optional file tags # def build_wxsfile_file_section(root, files, NAME, VERSION, VENDOR, filename_set, id_set): """ builds the Component sections of the wxs file with their included files. Files need to be specified in 8.3 format and in the long name format, long filenames will be converted automatically. Features are specficied with the 'X_MSI_FEATURE' or 'DOC' FileTag. """ root = create_default_directory_layout( root, NAME, VERSION, VENDOR, filename_set ) components = create_feature_dict( files ) factory = Document() def get_directory( node, dir ): """ returns the node under the given node representing the directory. Returns the component node if dir is None or empty. """ if dir == '' or not dir: return node Directory = node dir_parts = dir.split(os.path.sep) # to make sure that our directory ids are unique, the parent folders are # consecutively added to upper_dir upper_dir = '' # walk down the xml tree finding parts of the directory dir_parts = [d for d in dir_parts if d != ''] for d in dir_parts[:]: already_created = [c for c in Directory.childNodes if c.nodeName == 'Directory' and c.attributes['LongName'].value == escape(d)] if already_created != []: Directory = already_created[0] dir_parts.remove(d) upper_dir += d else: break for d in dir_parts: nDirectory = factory.createElement( 'Directory' ) nDirectory.attributes['LongName'] = escape( d ) nDirectory.attributes['Name'] = escape( gen_dos_short_file_name( d, filename_set ) ) upper_dir += d nDirectory.attributes['Id'] = convert_to_id( upper_dir, id_set ) Directory.childNodes.append( nDirectory ) Directory = nDirectory return Directory for file in files: drive, path = os.path.splitdrive( file.PACKAGING_INSTALL_LOCATION ) filename = os.path.basename( path ) dirname = os.path.dirname( path ) h = { # tagname : default value 'PACKAGING_X_MSI_VITAL' : 'yes', 'PACKAGING_X_MSI_FILEID' : convert_to_id(filename, id_set), 'PACKAGING_X_MSI_LONGNAME' : filename, 'PACKAGING_X_MSI_SHORTNAME' : gen_dos_short_file_name(filename, filename_set), 'PACKAGING_X_MSI_SOURCE' : file.get_path(), } # fill in the default tags given above. for k,v in [ (k, v) for (k,v) in h.items() if not hasattr(file, k) ]: setattr( file, k, v ) File = factory.createElement( 'File' ) File.attributes['LongName'] = escape( file.PACKAGING_X_MSI_LONGNAME ) File.attributes['Name'] = escape( file.PACKAGING_X_MSI_SHORTNAME ) File.attributes['Source'] = escape( file.PACKAGING_X_MSI_SOURCE ) File.attributes['Id'] = escape( file.PACKAGING_X_MSI_FILEID ) File.attributes['Vital'] = escape( file.PACKAGING_X_MSI_VITAL ) # create the <Component> Tag under which this file should appear Component = factory.createElement('Component') Component.attributes['DiskId'] = '1' Component.attributes['Id'] = convert_to_id( filename, id_set ) # hang the component node under the root node and the file node # under the component node. Directory = get_directory( root, dirname ) Directory.childNodes.append( Component ) Component.childNodes.append( File ) # # additional functions # def build_wxsfile_features_section(root, files, NAME, VERSION, SUMMARY, id_set): """ This function creates the <features> tag based on the supplied xml tree. This is achieved by finding all <component>s and adding them to a default target. It should be called after the tree has been built completly. We assume that a MY_DEFAULT_FOLDER Property is defined in the wxs file tree. Furthermore a top-level with the name and VERSION of the software will be created. An PACKAGING_X_MSI_FEATURE can either be a string, where the feature DESCRIPTION will be the same as its title or a Tuple, where the first part will be its title and the second its DESCRIPTION. """ factory = Document() Feature = factory.createElement('Feature') Feature.attributes['Id'] = 'complete' Feature.attributes['ConfigurableDirectory'] = 'MY_DEFAULT_FOLDER' Feature.attributes['Level'] = '1' Feature.attributes['Title'] = escape( '%s %s' % (NAME, VERSION) ) Feature.attributes['Description'] = escape( SUMMARY ) Feature.attributes['Display'] = 'expand' for (feature, files) in create_feature_dict(files).items(): SubFeature = factory.createElement('Feature') SubFeature.attributes['Level'] = '1' if SCons.Util.is_Tuple(feature): SubFeature.attributes['Id'] = convert_to_id( feature[0], id_set ) SubFeature.attributes['Title'] = escape(feature[0]) SubFeature.attributes['Description'] = escape(feature[1]) else: SubFeature.attributes['Id'] = convert_to_id( feature, id_set ) if feature=='default': SubFeature.attributes['Description'] = 'Main Part' SubFeature.attributes['Title'] = 'Main Part' elif feature=='PACKAGING_DOC': SubFeature.attributes['Description'] = 'Documentation' SubFeature.attributes['Title'] = 'Documentation' else: SubFeature.attributes['Description'] = escape(feature) SubFeature.attributes['Title'] = escape(feature) # build the componentrefs. As one of the design decision is that every # file is also a component we walk the list of files and create a # reference. for f in files: ComponentRef = factory.createElement('ComponentRef') ComponentRef.attributes['Id'] = convert_to_id( os.path.basename(f.get_path()), id_set ) SubFeature.childNodes.append(ComponentRef) Feature.childNodes.append(SubFeature) root.getElementsByTagName('Product')[0].childNodes.append(Feature) def build_wxsfile_default_gui(root): """ this function adds a default GUI to the wxs file """ factory = Document() Product = root.getElementsByTagName('Product')[0] UIRef = factory.createElement('UIRef') UIRef.attributes['Id'] = 'WixUI_Mondo' Product.childNodes.append(UIRef) UIRef = factory.createElement('UIRef') UIRef.attributes['Id'] = 'WixUI_ErrorProgressText' Product.childNodes.append(UIRef) def build_license_file(directory, spec): """ creates a License.rtf file with the content of "X_MSI_LICENSE_TEXT" in the given directory """ name, text = '', '' try: name = spec['LICENSE'] text = spec['X_MSI_LICENSE_TEXT'] except KeyError: pass # ignore this as X_MSI_LICENSE_TEXT is optional if name!='' or text!='': file = open( os.path.join(directory.get_path(), 'License.rtf'), 'w' ) file.write('{\\rtf') if text!='': file.write(text.replace('\n', '\\par ')) else: file.write(name+'\\par\\par') file.write('}') file.close() # # mandatory and optional package tags # def build_wxsfile_header_section(root, spec): """ Adds the xml file node which define the package meta-data. """ # Create the needed DOM nodes and add them at the correct position in the tree. factory = Document() Product = factory.createElement( 'Product' ) Package = factory.createElement( 'Package' ) root.childNodes.append( Product ) Product.childNodes.append( Package ) # set "mandatory" default values if 'X_MSI_LANGUAGE' not in spec: spec['X_MSI_LANGUAGE'] = '1033' # select english # mandatory sections, will throw a KeyError if the tag is not available Product.attributes['Name'] = escape( spec['NAME'] ) Product.attributes['Version'] = escape( spec['VERSION'] ) Product.attributes['Manufacturer'] = escape( spec['VENDOR'] ) Product.attributes['Language'] = escape( spec['X_MSI_LANGUAGE'] ) Package.attributes['Description'] = escape( spec['SUMMARY'] ) # now the optional tags, for which we avoid the KeyErrror exception if 'DESCRIPTION' in spec: Package.attributes['Comments'] = escape( spec['DESCRIPTION'] ) if 'X_MSI_UPGRADE_CODE' in spec: Package.attributes['X_MSI_UPGRADE_CODE'] = escape( spec['X_MSI_UPGRADE_CODE'] ) # We hardcode the media tag as our current model cannot handle it. Media = factory.createElement('Media') Media.attributes['Id'] = '1' Media.attributes['Cabinet'] = 'default.cab' Media.attributes['EmbedCab'] = 'yes' root.getElementsByTagName('Product')[0].childNodes.append(Media) # this builder is the entry-point for .wxs file compiler. wxs_builder = Builder( action = Action( build_wxsfile, string_wxsfile ), ensure_suffix = '.wxs' ) def package(env, target, source, PACKAGEROOT, NAME, VERSION, DESCRIPTION, SUMMARY, VENDOR, X_MSI_LANGUAGE, **kw): # make sure that the Wix Builder is in the environment SCons.Tool.Tool('wix').generate(env) # get put the keywords for the specfile compiler. These are the arguments # given to the package function and all optional ones stored in kw, minus # the the source, target and env one. loc = locals() del loc['kw'] kw.update(loc) del kw['source'], kw['target'], kw['env'] # strip the install builder from the source files target, source = stripinstallbuilder(target, source, env) # put the arguments into the env and call the specfile builder. env['msi_spec'] = kw specfile = wxs_builder(* [env, target, source], **kw) # now call the WiX Tool with the built specfile added as a source. msifile = env.WiX(target, specfile) # return the target and source tuple. return (msifile, source+[specfile]) # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4:
dezelin/scons
scons-local/SCons/Tool/packaging/msi.py
Python
mit
20,208
import sys # Import renderdoc if not already imported (e.g. in the UI) if 'renderdoc' not in sys.modules and '_renderdoc' not in sys.modules: import renderdoc # Alias renderdoc for legibility rd = renderdoc # We'll need the struct data to read out of bytes objects import struct # We base our data on a MeshFormat, but we add some properties class MeshData(rd.MeshFormat): indexOffset = 0 name = '' # Recursively search for the drawcall with the most vertices def biggestDraw(prevBiggest, d): ret = prevBiggest if ret == None or d.numIndices > ret.numIndices: ret = d for c in d.children: biggest = biggestDraw(ret, c) if biggest.numIndices > ret.numIndices: ret = biggest return ret # Unpack a tuple of the given format, from the data def unpackData(fmt, data): # We don't handle 'special' formats - typically bit-packed such as 10:10:10:2 if fmt.Special(): raise RuntimeError("Packed formats are not supported!") formatChars = {} # 012345678 formatChars[rd.CompType.UInt] = "xBHxIxxxL" formatChars[rd.CompType.SInt] = "xbhxixxxl" formatChars[rd.CompType.Float] = "xxexfxxxd" # only 2, 4 and 8 are valid # These types have identical decodes, but we might post-process them formatChars[rd.CompType.UNorm] = formatChars[rd.CompType.UInt] formatChars[rd.CompType.UScaled] = formatChars[rd.CompType.UInt] formatChars[rd.CompType.SNorm] = formatChars[rd.CompType.SInt] formatChars[rd.CompType.SScaled] = formatChars[rd.CompType.SInt] # We need to fetch compCount components vertexFormat = str(fmt.compCount) + formatChars[fmt.compType][fmt.compByteWidth] # Unpack the data value = struct.unpack_from(vertexFormat, data, 0) # If the format needs post-processing such as normalisation, do that now if fmt.compType == rd.CompType.UNorm: divisor = float((2 ** (fmt.compByteWidth * 8)) - 1) value = tuple(float(i) / divisor for i in value) elif fmt.compType == rd.CompType.SNorm: maxNeg = -float(2 ** (fmt.compByteWidth * 8)) / 2 divisor = float(-(maxNeg-1)) value = tuple((float(i) if (i == maxNeg) else (float(i) / divisor)) for i in value) # If the format is BGRA, swap the two components if fmt.BGRAOrder(): value = tuple(value[i] for i in [2, 1, 0, 3]) return value # Get a list of MeshData objects describing the vertex inputs at this draw def getMeshInputs(controller, draw): state = controller.GetPipelineState() # Get the index & vertex buffers, and fixed vertex inputs ib = state.GetIBuffer() vbs = state.GetVBuffers() attrs = state.GetVertexInputs() meshInputs = [] for attr in attrs: # We don't handle instance attributes if attr.perInstance: raise RuntimeError("Instanced properties are not supported!") meshInput = MeshData() meshInput.indexResourceId = ib.resourceId meshInput.indexByteOffset = ib.byteOffset meshInput.indexByteStride = ib.byteStride meshInput.baseVertex = draw.baseVertex meshInput.indexOffset = draw.indexOffset meshInput.numIndices = draw.numIndices # If the draw doesn't use an index buffer, don't use it even if bound if not (draw.flags & rd.ActionFlags.Indexed): meshInput.indexResourceId = rd.ResourceId.Null() # The total offset is the attribute offset from the base of the vertex meshInput.vertexByteOffset = attr.byteOffset + vbs[attr.vertexBuffer].byteOffset + draw.vertexOffset * vbs[attr.vertexBuffer].byteStride meshInput.format = attr.format meshInput.vertexResourceId = vbs[attr.vertexBuffer].resourceId meshInput.vertexByteStride = vbs[attr.vertexBuffer].byteStride meshInput.name = attr.name meshInputs.append(meshInput) return meshInputs # Get a list of MeshData objects describing the vertex outputs at this draw def getMeshOutputs(controller, postvs): meshOutputs = [] posidx = 0 vs = controller.GetPipelineState().GetShaderReflection(rd.ShaderStage.Vertex) # Repeat the process, but this time sourcing the data from postvs. # Since these are outputs, we iterate over the list of outputs from the # vertex shader's reflection data for attr in vs.outputSignature: # Copy most properties from the postvs struct meshOutput = MeshData() meshOutput.indexResourceId = postvs.indexResourceId meshOutput.indexByteOffset = postvs.indexByteOffset meshOutput.indexByteStride = postvs.indexByteStride meshOutput.baseVertex = postvs.baseVertex meshOutput.indexOffset = 0 meshOutput.numIndices = postvs.numIndices # The total offset is the attribute offset from the base of the vertex, # as calculated by the stride per index meshOutput.vertexByteOffset = postvs.vertexByteOffset meshOutput.vertexResourceId = postvs.vertexResourceId meshOutput.vertexByteStride = postvs.vertexByteStride # Construct a resource format for this element meshOutput.format = rd.ResourceFormat() meshOutput.format.compByteWidth = rd.VarTypeByteSize(attr.varType) meshOutput.format.compCount = attr.compCount meshOutput.format.compType = rd.VarTypeCompType(attr.varType) meshOutput.format.type = rd.ResourceFormatType.Regular meshOutput.name = attr.semanticIdxName if attr.varName == '' else attr.varName if attr.systemValue == rd.ShaderBuiltin.Position: posidx = len(meshOutputs) meshOutputs.append(meshOutput) # Shuffle the position element to the front if posidx > 0: pos = meshOutputs[posidx] del meshOutputs[posidx] meshOutputs.insert(0, pos) accumOffset = 0 for i in range(0, len(meshOutputs)): meshOutputs[i].vertexByteOffset = accumOffset # Note that some APIs such as Vulkan will pad the size of the attribute here # while others will tightly pack fmt = meshOutputs[i].format accumOffset += (8 if fmt.compByteWidth > 4 else 4) * fmt.compCount return meshOutputs def getIndices(controller, mesh): # Get the character for the width of index indexFormat = 'B' if mesh.indexByteStride == 2: indexFormat = 'H' elif mesh.indexByteStride == 4: indexFormat = 'I' # Duplicate the format by the number of indices indexFormat = str(mesh.numIndices) + indexFormat # If we have an index buffer if mesh.indexResourceId != rd.ResourceId.Null(): # Fetch the data ibdata = controller.GetBufferData(mesh.indexResourceId, mesh.indexByteOffset, 0) # Unpack all the indices, starting from the first index to fetch offset = mesh.indexOffset * mesh.indexByteStride indices = struct.unpack_from(indexFormat, ibdata, offset) # Apply the baseVertex offset return [i + mesh.baseVertex for i in indices] else: # With no index buffer, just generate a range return tuple(range(mesh.numIndices)) def printMeshData(controller, meshData): indices = getIndices(controller, meshData[0]) print("Mesh configuration:") for attr in meshData: print("\t%s:" % attr.name) print("\t\t- vertex: %s / %d stride" % (attr.vertexResourceId, attr.vertexByteStride)) print("\t\t- format: %s x %s @ %d" % (attr.format.compType, attr.format.compCount, attr.vertexByteOffset)) # We'll decode the first three indices making up a triangle for i in range(0, 3): idx = indices[i] print("Vertex %d is index %d:" % (i, idx)) for attr in meshData: # This is the data we're reading from. This would be good to cache instead of # re-fetching for every attribute for every index offset = attr.vertexByteOffset + attr.vertexByteStride * idx data = controller.GetBufferData(attr.vertexResourceId, offset, 0) # Get the value from the data value = unpackData(attr.format, data) # We don't go into the details of semantic matching here, just print both print("\tAttribute '%s': %s" % (attr.name, value)) def sampleCode(controller): # Find the biggest drawcall in the whole capture draw = None for d in controller.GetRootActions(): draw = biggestDraw(draw, d) # Move to that draw controller.SetFrameEvent(draw.eventId, True) print("Decoding mesh inputs at %d: %s\n\n" % (draw.eventId, draw.GetName(controller.GetStructuredFile()))) # Calculate the mesh input configuration meshInputs = getMeshInputs(controller, draw) # Fetch and print the data from the mesh inputs printMeshData(controller, meshInputs) print("Decoding mesh outputs\n\n") # Fetch the postvs data postvs = controller.GetPostVSData(0, 0, rd.MeshDataStage.VSOut) # Calcualte the mesh configuration from that meshOutputs = getMeshOutputs(controller, postvs) # Print it printMeshData(controller, meshOutputs) def loadCapture(filename): # Open a capture file handle cap = rd.OpenCaptureFile() # Open a particular file - see also OpenBuffer to load from memory status = cap.OpenFile(filename, '', None) # Make sure the file opened successfully if status != rd.ReplayStatus.Succeeded: raise RuntimeError("Couldn't open file: " + str(status)) # Make sure we can replay if not cap.LocalReplaySupport(): raise RuntimeError("Capture cannot be replayed") # Initialise the replay status,controller = cap.OpenCapture(rd.ReplayOptions(), None) if status != rd.ReplayStatus.Succeeded: raise RuntimeError("Couldn't initialise replay: " + str(status)) return (cap, controller) if 'pyrenderdoc' in globals(): pyrenderdoc.Replay().BlockInvoke(sampleCode) else: rd.InitialiseReplay(rd.GlobalEnvironment(), []) if len(sys.argv) <= 1: print('Usage: python3 {} filename.rdc'.format(sys.argv[0])) sys.exit(0) cap,controller = loadCapture(sys.argv[1]) sampleCode(controller) controller.Shutdown() cap.Shutdown() rd.ShutdownReplay()
Zorro666/renderdoc
docs/python_api/examples/renderdoc/decode_mesh.py
Python
mit
9,433
""" Support for Tellstick lights. For more details about this platform, please refer to the documentation at https://home-assistant.io/components/light.tellstick/ """ from homeassistant.components import tellstick from homeassistant.components.light import ATTR_BRIGHTNESS, Light from homeassistant.components.tellstick import (DEFAULT_SIGNAL_REPETITIONS, ATTR_DISCOVER_DEVICES, ATTR_DISCOVER_CONFIG) # pylint: disable=unused-argument def setup_platform(hass, config, add_devices, discovery_info=None): """Setup Tellstick lights.""" if (discovery_info is None or discovery_info[ATTR_DISCOVER_DEVICES] is None or tellstick.TELLCORE_REGISTRY is None): return signal_repetitions = discovery_info.get(ATTR_DISCOVER_CONFIG, DEFAULT_SIGNAL_REPETITIONS) add_devices(TellstickLight( tellstick.TELLCORE_REGISTRY.get_device(switch_id), signal_repetitions) for switch_id in discovery_info[ATTR_DISCOVER_DEVICES]) class TellstickLight(tellstick.TellstickDevice, Light): """Representation of a Tellstick light.""" def __init__(self, tellstick_device, signal_repetitions): """Initialize the light.""" self._brightness = 255 tellstick.TellstickDevice.__init__(self, tellstick_device, signal_repetitions) @property def is_on(self): """Return true if switch is on.""" return self._state @property def brightness(self): """Return the brightness of this light between 0..255.""" return self._brightness def set_tellstick_state(self, last_command_sent, last_data_sent): """Update the internal representation of the switch.""" from tellcore.constants import TELLSTICK_TURNON, TELLSTICK_DIM if last_command_sent == TELLSTICK_DIM: if last_data_sent is not None: self._brightness = int(last_data_sent) self._state = self._brightness > 0 else: self._state = last_command_sent == TELLSTICK_TURNON def _send_tellstick_command(self, command, data): """Handle the turn_on / turn_off commands.""" from tellcore.constants import (TELLSTICK_TURNOFF, TELLSTICK_DIM) if command == TELLSTICK_TURNOFF: self.tellstick_device.turn_off() elif command == TELLSTICK_DIM: self.tellstick_device.dim(self._brightness) else: raise NotImplementedError( "Command not implemented: {}".format(command)) def turn_on(self, **kwargs): """Turn the switch on.""" from tellcore.constants import TELLSTICK_DIM brightness = kwargs.get(ATTR_BRIGHTNESS) if brightness is not None: self._brightness = brightness self.call_tellstick(TELLSTICK_DIM, self._brightness) def turn_off(self, **kwargs): """Turn the switch off.""" from tellcore.constants import TELLSTICK_TURNOFF self.call_tellstick(TELLSTICK_TURNOFF)
justyns/home-assistant
homeassistant/components/light/tellstick.py
Python
mit
3,211
# #START_LICENSE########################################################### # # # This file is part of the Environment for Tree Exploration program # (ETE). http://etetoolkit.org # # ETE is free software: you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # ETE is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public # License for more details. # # You should have received a copy of the GNU General Public License # along with ETE. If not, see <http://www.gnu.org/licenses/>. # # # ABOUT THE ETE PACKAGE # ===================== # # ETE is distributed under the GPL copyleft license (2008-2015). # # If you make use of ETE in published work, please cite: # # Jaime Huerta-Cepas, Joaquin Dopazo and Toni Gabaldon. # ETE: a python Environment for Tree Exploration. Jaime BMC # Bioinformatics 2010,:24doi:10.1186/1471-2105-11-24 # # Note that extra references to the specific methods implemented in # the toolkit may be available in the documentation. # # More info at http://etetoolkit.org. Contact: huerta@embl.de # # # #END_LICENSE############################################################# from StringIO import StringIO import cPickle from string import strip from collections import defaultdict import logging import os log = logging.getLogger("main") from ete2.tools.phylobuild_lib.master_task import CogSelectorTask from ete2.tools.phylobuild_lib.errors import DataError, TaskError from ete2.tools.phylobuild_lib.utils import (GLOBALS, print_as_table, generate_node_ids, encode_seqname, md5, pjoin, _min, _max, _mean, _median, _std) from ete2.tools.phylobuild_lib import db __all__ = ["CogSelector"] class CogSelector(CogSelectorTask): def __init__(self, target_sp, out_sp, seqtype, conf, confname): self.missing_factor = float(conf[confname]["_species_missing_factor"]) self.max_missing_factor = float(conf[confname]["_max_species_missing_factor"]) self.cog_hard_limit = int(conf[confname]["_max_cogs"]) node_id, clade_id = generate_node_ids(target_sp, out_sp) # Initialize task CogSelectorTask.__init__(self, node_id, "cog_selector", "MCL-COGs", None, conf[confname]) # taskid does not depend on jobs, so I set it manually self.cladeid = clade_id self.seqtype = seqtype self.targets = target_sp self.outgroups = out_sp self.init() self.size = len(target_sp | out_sp) self.cog_analysis = None self.cogs = None def finish(self): def sort_cogs_by_size(c1, c2): ''' sort cogs by descending size. If two cogs are the same size, sort them keeping first the one with the less represented species. Otherwise sort by sequence name sp_seqid.''' r = -1 * cmp(len(c1), len(c2)) if r == 0: # finds the cog including the less represented species c1_repr = _min([sp2cogs[_sp] for _sp, _seq in c1]) c2_repr = _min([sp2cogs[_sp] for _sp, _seq in c2]) r = cmp(c1_repr, c2_repr) if r == 0: return cmp(sorted(c1), sorted(c2)) else: return r else: return r def sort_cogs_by_sp_repr(c1, c2): c1_repr = _min([sp2cogs[_sp] for _sp, _seq in c1]) c2_repr = _min([sp2cogs[_sp] for _sp, _seq in c2]) r = cmp(c1_repr, c2_repr) if r == 0: r = -1 * cmp(len(c1), len(c2)) if r == 0: return cmp(sorted(c1), sorted(c2)) else: return r else: return r all_species = self.targets | self.outgroups # strict threshold #min_species = len(all_species) - int(round(self.missing_factor * len(all_species))) # Relax threshold for cog selection to ensure sames genes are always included min_species = len(all_species) - int(round(self.missing_factor * len(GLOBALS["target_species"]))) min_species = max(min_species, (1-self.max_missing_factor) * len(all_species)) smallest_cog, largest_cog = len(all_species), 0 all_singletons = [] sp2cogs = defaultdict(int) for cognumber, cog in enumerate(open(GLOBALS["cogs_file"])): sp2seqs = defaultdict(list) for sp, seqid in [map(strip, seq.split(GLOBALS["spname_delimiter"], 1)) for seq in cog.split("\t")]: sp2seqs[sp].append(seqid) one2one_cog = set() for sp, seqs in sp2seqs.iteritems(): #if len(seqs) != 1: # print sp, len(seqs) if sp in all_species and len(seqs) == 1: sp2cogs[sp] += 1 one2one_cog.add((sp, seqs[0])) smallest_cog = min(smallest_cog, len(one2one_cog)) largest_cog = max(largest_cog, len(one2one_cog)) all_singletons.append(one2one_cog) #if len(one2one_cog) >= min_species: # valid_cogs.append(one2one_cog) cognumber += 1 # sets the ammount of cogs in file for sp, ncogs in sorted(sp2cogs.items(), key=lambda x: x[1], reverse=True): log.log(28, "% 20s found in single copy in % 6d (%0.1f%%) COGs " %(sp, ncogs, 100 * ncogs/float(cognumber))) valid_cogs = sorted([sing for sing in all_singletons if len(sing) >= min_species], sort_cogs_by_size) log.log(28, "Largest cog size: %s. Smallest cog size: %s" %( largest_cog, smallest_cog)) self.cog_analysis = "" # save original cog names hitting the hard limit if len(valid_cogs) > self.cog_hard_limit: log.warning("Applying hard limit number of COGs: %d out of %d available" %(self.cog_hard_limit, len(valid_cogs))) self.raw_cogs = valid_cogs[:self.cog_hard_limit] self.cogs = [] # Translate sequence names into the internal DB names sp_repr = defaultdict(int) sizes = [] for co in self.raw_cogs: sizes.append(len(co)) for sp, seq in co: sp_repr[sp] += 1 co_names = ["%s%s%s" %(sp, GLOBALS["spname_delimiter"], seq) for sp, seq in co] encoded_names = db.translate_names(co_names) if len(encoded_names) != len(co): print set(co) - set(encoded_names.keys()) raise DataError("Some sequence ids could not be translated") self.cogs.append(encoded_names.values()) # ERROR! COGs selected are not the prioritary cogs sorted out before!!! # Sort Cogs according to the md5 hash of its content. Random # sorting but kept among runs #map(lambda x: x.sort(), self.cogs) #self.cogs.sort(lambda x,y: cmp(md5(','.join(x)), md5(','.join(y)))) log.log(28, "Analysis of current COG selection:") for sp, ncogs in sorted(sp_repr.items(), key=lambda x:x[1], reverse=True): log.log(28, " % 30s species present in % 6d COGs (%0.1f%%)" %(sp, ncogs, 100 * ncogs/float(len(self.cogs)))) log.log(28, " %d COGs selected with at least %d species out of %d" %(len(self.cogs), min_species, len(all_species))) log.log(28, " Average COG size %0.1f/%0.1f +- %0.1f" %(_mean(sizes), _median(sizes), _std(sizes))) # Some consistency checks missing_sp = (all_species) - set(sp_repr.keys()) if missing_sp: log.error("%d missing species or not present in single-copy in any cog:\n%s" %\ (len(missing_sp), '\n'.join(missing_sp))) open('etebuild.valid_species_names.tmp', 'w').write('\n'.join(sp_repr.keys()) +'\n') log.error("All %d valid species have been dumped into etebuild.valid_species_names.tmp." " You can use --spfile to restrict the analysis to those species." %len(sp_repr)) raise TaskError('missing or not single-copy species under current cog selection') CogSelectorTask.store_data(self, self.cogs, self.cog_analysis) if __name__ == "__main__": ## TEST CODE import argparse parser = argparse.ArgumentParser() # Input data related flags parser.add_argument("--cogs_file", dest="cogs_file", required=True, help="Cogs file") parser.add_argument("--spname_delimiter", dest="spname_delimiter", type=str, default = "_", help="species name delimiter character") parser.add_argument("--target_sp", dest="target_sp", type=str, nargs="+", help="target species sperated by") parser.add_argument("-m", dest="missing_factor", type=float, required=True, help="missing factor for cog selection") parser.add_argument("--max_missing", dest="max_missing_factor", type=float, default = 0.3, help="max missing factor for cog selection") parser.add_argument("--total_species", dest="total_species", type=int, required=True, help="total number of species in the analysis") args = parser.parse_args() GLOBALS["cogs_file"] = args.cogs_file GLOBALS["spname_delimiter"] = args.spname_delimiter target_sp = args.target_sp logging.basicConfig(level=logging.DEBUG) log = logging GLOBALS["target_species"] = [1] * args.total_species conf = { "user": {"_species_missing_factor": args.missing_factor, "_max_species_missing_factor": args.max_missing_factor, "_max_cogs": 10000 }} CogSelectorTask.store_data=lambda a,b,c: True C = CogSelector(set(target_sp), set(), "aa", conf, "user") db.translate_names = lambda x: dict([(n,n) for n in x]) C.finish()
sauloal/cnidaria
scripts/venv/lib/python2.7/site-packages/ete2/tools/phylobuild_lib/task/cog_selector.py
Python
mit
10,562
"""Attempt to determine the current user's "system" directories""" try: ## raise ImportError from win32com.shell import shell, shellcon except ImportError: shell = None try: import _winreg except ImportError: _winreg = None import os, sys ## The registry keys where the SHGetFolderPath values appear to be stored r"HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders" r"HKEY_CURRENT_USER\Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders" def _winreg_getShellFolder( name ): """Get a shell folder by string name from the registry""" k = _winreg.OpenKey( _winreg.HKEY_CURRENT_USER, r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders" ) try: # should check that it's valid? How? return _winreg.QueryValueEx( k, name )[0] finally: _winreg.CloseKey( k ) def shell_getShellFolder( type ): """Get a shell folder by shell-constant from COM interface""" return shell.SHGetFolderPath( 0,# null hwnd type, # the (roaming) appdata path 0,# null access token (no impersonation) 0 # want current value, shellcon.SHGFP_TYPE_CURRENT isn't available, this seems to work ) def appdatadirectory( ): """Attempt to retrieve the current user's app-data directory This is the location where application-specific files should be stored. On *nix systems, this will be the ${HOME}/.config directory. On Win32 systems, it will be the "Application Data" directory. Note that for Win32 systems it is normal to create a sub-directory for storing data in the Application Data directory. """ if shell: # on Win32 and have Win32all extensions, best-case return shell_getShellFolder(shellcon.CSIDL_APPDATA) if _winreg: # on Win32, but no Win32 shell com available, this uses # a direct registry access, likely to fail on Win98/Me return _winreg_getShellFolder( 'AppData' ) # okay, what if for some reason _winreg is missing? would we want to allow ctypes? ## default case, look for name in environ... for name in ['APPDATA', 'HOME']: if name in os.environ: return os.path.join( os.environ[name], '.config' ) # well, someone's being naughty, see if we can get ~ to expand to a directory... possible = os.path.abspath(os.path.expanduser( '~/.config' )) if os.path.exists( possible ): return possible raise OSError( """Unable to determine user's application-data directory, no ${HOME} or ${APPDATA} in environment""" ) if __name__ == "__main__": print 'AppData', appdatadirectory()
ktan2020/legacy-automation
win/Lib/site-packages/runsnakerun/homedirectory.py
Python
mit
2,694
"""The macros below aren't reliable (e.g., some fail if ``arg_string`` is `None`) or safe (``include`` doesn't guard against circular reference). For a more complete example, see `the code used in the sandbox <http://code.google.com/p/urlminer/source/browse/examples/wiki/macros.py>`_. """ import genshi.builder as bldr import dialects, core import os class Page(object): root = 'test_pages' def __init__(self,page_name): self.name = page_name def get_raw_body(self): try: f = open(os.path.join(self.root,self.name + '.txt'),'r') s = f.read() f.close() return s except IOError: return None def exists(self): try: f = open(os.path.join(self.root,self.name + '.txt'),'r') f.close() return True except IOError: return False def class_func(page_name): if not Page(page_name).exists(): return 'nonexistent' def path_func(page_name): if page_name == 'Home': return 'FrontPage' else: return page_name ## Start of macros def include(arg_string,body,isblock): page = Page(arg_string.strip()) return text2html.generate(page.get_raw_body()) def include_raw(arg_string,body,isblock): page = Page(arg_string.strip()) return bldr.tag.pre(page.get_raw_body(),class_='plain') def include_source(arg_string,body,isblock): page = Page(arg_string.strip()) return bldr.tag.pre(text2html.render(page.get_raw_body())) def source(arg_string,body,isblock): return bldr.tag.pre(text2html.render(body)) def pre(arg_string,body,isblock): return bldr.tag.pre(body) ## End of macros macros = {'include':include, 'include-raw':include_raw, 'include-source':include_source, 'source':source, 'pre':pre } def macro_dispatcher(macro_name,arg_string,body,isblock,environ): if macro_name in macros: return macros[macro_name](arg_string,body,isblock) dialect = dialects.create_dialect(dialects.creole11_base, wiki_links_base_url='', wiki_links_space_char='', # use_additions=True, no_wiki_monospace=False, wiki_links_class_func=class_func, wiki_links_path_func=path_func, macro_func=macro_dispatcher) text2html = core.Parser(dialect) if __name__ == '__main__': text = Page('CheatSheetPlus').get_raw_body() f = open(os.path.join('test_pages','CheatSheetPlus.html'),'r') rendered = f.read() f.close() f = open(os.path.join('test_pages','template.html'),'r') template = f.read() f.close() out = open(os.path.join('test_pages','out.html'),'w') out.write(template % text2html(text)) out.close() assert template % text2html(text) == rendered
hprid/creoleparser
creoleparser/test_cheat_sheet_plus.py
Python
mit
2,923
# -*- coding: utf-8 -*- from __future__ import absolute_import, unicode_literals from django.contrib.auth.models import User from django.test.utils import override_settings from allauth.account import app_settings as account_settings from allauth.account.models import EmailAddress from allauth.socialaccount.models import SocialAccount from allauth.socialaccount.tests import OAuth2TestsMixin from allauth.tests import MockedResponse, TestCase from .provider import DisqusProvider @override_settings( SOCIALACCOUNT_AUTO_SIGNUP=True, ACCOUNT_SIGNUP_FORM_CLASS=None, ACCOUNT_EMAIL_VERIFICATION=account_settings .EmailVerificationMethod.MANDATORY) class DisqusTests(OAuth2TestsMixin, TestCase): provider_id = DisqusProvider.id def get_mocked_response(self, name='Raymond Penners', email="raymond.penners@example.com"): return MockedResponse(200, """ {"response": {"name": "%s", "avatar": { "permalink": "https://lh5.googleusercontent.com/photo.jpg" }, "email": "%s", "profileUrl": "https://plus.google.com/108204268033311374519", "id": "108204268033311374519" }} """ % (name, email)) def test_account_connect(self): email = "user@example.com" user = User.objects.create(username='user', is_active=True, email=email) user.set_password('test') user.save() EmailAddress.objects.create(user=user, email=email, primary=True, verified=True) self.client.login(username=user.username, password='test') self.login(self.get_mocked_response(), process='connect') # Check if we connected... self.assertTrue(SocialAccount.objects.filter( user=user, provider=DisqusProvider.id).exists()) # For now, we do not pick up any new e-mail addresses on connect self.assertEqual(EmailAddress.objects.filter(user=user).count(), 1) self.assertEqual(EmailAddress.objects.filter( user=user, email=email).count(), 1)
AltSchool/django-allauth
allauth/socialaccount/providers/disqus/tests.py
Python
mit
2,347
# -*- coding: utf-8 -*- # Natural Language Toolkit: ASCII visualization of NLTK trees # # Copyright (C) 2001-2015 NLTK Project # Author: Andreas van Cranenburgh <A.W.vanCranenburgh@uva.nl> # Peter Ljunglöf <peter.ljunglof@gu.se> # URL: <http://nltk.org/> # For license information, see LICENSE.TXT """ Pretty-printing of discontinuous trees. Adapted from the disco-dop project, by Andreas van Cranenburgh. https://github.com/andreasvc/disco-dop Interesting reference (not used for this code): T. Eschbach et al., Orth. Hypergraph Drawing, Journal of Graph Algorithms and Applications, 10(2) 141--157 (2006)149. http://jgaa.info/accepted/2006/EschbachGuentherBecker2006.10.2.pdf """ from __future__ import division, print_function, unicode_literals from nltk.util import slice_bounds, OrderedDict from nltk.compat import string_types, python_2_unicode_compatible, unicode_repr from nltk.internals import raise_unorderable_types from nltk.tree import Tree import re import sys import codecs from cgi import escape from collections import defaultdict from operator import itemgetter from itertools import chain, islice ANSICOLOR = { 'black': 30, 'red': 31, 'green': 32, 'yellow': 33, 'blue': 34, 'magenta': 35, 'cyan': 36, 'white': 37, } @python_2_unicode_compatible class TreePrettyPrinter(object): """ Pretty-print a tree in text format, either as ASCII or Unicode. The tree can be a normal tree, or discontinuous. ``TreePrettyPrinter(tree, sentence=None, highlight=())`` creates an object from which different visualizations can be created. :param tree: a Tree object. :param sentence: a list of words (strings). If `sentence` is given, `tree` must contain integers as leaves, which are taken as indices in `sentence`. Using this you can display a discontinuous tree. :param highlight: Optionally, a sequence of Tree objects in `tree` which should be highlighted. Has the effect of only applying colors to nodes in this sequence (nodes should be given as Tree objects, terminals as indices). >>> from nltk.tree import Tree >>> tree = Tree.fromstring('(S (NP Mary) (VP walks))') >>> print(TreePrettyPrinter(tree).text()) ... # doctest: +NORMALIZE_WHITESPACE S ____|____ NP VP | | Mary walks """ def __init__(self, tree, sentence=None, highlight=()): if sentence is None: leaves = tree.leaves() if (leaves and not any(len(a) == 0 for a in tree.subtrees()) and all(isinstance(a, int) for a in leaves)): sentence = [str(a) for a in leaves] else: # this deals with empty nodes (frontier non-terminals) # and multiple/mixed terminals under non-terminals. tree = tree.copy(True) sentence = [] for a in tree.subtrees(): if len(a) == 0: a.append(len(sentence)) sentence.append(None) elif any(not isinstance(b, Tree) for b in a): for n, b in enumerate(a): if not isinstance(b, Tree): a[n] = len(sentence) sentence.append('%s' % b) self.nodes, self.coords, self.edges, self.highlight = self.nodecoords( tree, sentence, highlight) def __str__(self): return self.text() def __repr__(self): return '<TreePrettyPrinter with %d nodes>' % len(self.nodes) @staticmethod def nodecoords(tree, sentence, highlight): """ Produce coordinates of nodes on a grid. Objective: - Produce coordinates for a non-overlapping placement of nodes and horizontal lines. - Order edges so that crossing edges cross a minimal number of previous horizontal lines (never vertical lines). Approach: - bottom up level order traversal (start at terminals) - at each level, identify nodes which cannot be on the same row - identify nodes which cannot be in the same column - place nodes into a grid at (row, column) - order child-parent edges with crossing edges last Coordinates are (row, column); the origin (0, 0) is at the top left; the root node is on row 0. Coordinates do not consider the size of a node (which depends on font, &c), so the width of a column of the grid should be automatically determined by the element with the greatest width in that column. Alternatively, the integer coordinates could be converted to coordinates in which the distances between adjacent nodes are non-uniform. Produces tuple (nodes, coords, edges, highlighted) where: - nodes[id]: Tree object for the node with this integer id - coords[id]: (n, m) coordinate where to draw node with id in the grid - edges[id]: parent id of node with this id (ordered dictionary) - highlighted: set of ids that should be highlighted """ def findcell(m, matrix, startoflevel, children): """ Find vacant row, column index for node ``m``. Iterate over current rows for this level (try lowest first) and look for cell between first and last child of this node, add new row to level if no free row available. """ candidates = [a for _, a in children[m]] minidx, maxidx = min(candidates), max(candidates) leaves = tree[m].leaves() center = scale * sum(leaves) // len(leaves) # center of gravity if minidx < maxidx and not minidx < center < maxidx: center = sum(candidates) // len(candidates) if max(candidates) - min(candidates) > 2 * scale: center -= center % scale # round to unscaled coordinate if minidx < maxidx and not minidx < center < maxidx: center += scale if ids[m] == 0: startoflevel = len(matrix) for rowidx in range(startoflevel, len(matrix) + 1): if rowidx == len(matrix): # need to add a new row matrix.append([vertline if a not in (corner, None) else None for a in matrix[-1]]) row = matrix[rowidx] i = j = center if len(children[m]) == 1: # place unaries directly above child return rowidx, next(iter(children[m]))[1] elif all(a is None or a == vertline for a in row[min(candidates):max(candidates) + 1]): # find free column for n in range(scale): i = j = center + n while j > minidx or i < maxidx: if i < maxidx and (matrix[rowidx][i] is None or i in candidates): return rowidx, i elif j > minidx and (matrix[rowidx][j] is None or j in candidates): return rowidx, j i += scale j -= scale raise ValueError('could not find a free cell for:\n%s\n%s' 'min=%d; max=%d' % (tree[m], minidx, maxidx, dumpmatrix())) def dumpmatrix(): """Dump matrix contents for debugging purposes.""" return '\n'.join( '%2d: %s' % (n, ' '.join(('%2r' % i)[:2] for i in row)) for n, row in enumerate(matrix)) leaves = tree.leaves() if not all(isinstance(n, int) for n in leaves): raise ValueError('All leaves must be integer indices.') if len(leaves) != len(set(leaves)): raise ValueError('Indices must occur at most once.') if not all(0 <= n < len(sentence) for n in leaves): raise ValueError('All leaves must be in the interval 0..n ' 'with n=len(sentence)\ntokens: %d indices: ' '%r\nsentence: %s' % (len(sentence), tree.leaves(), sentence)) vertline, corner = -1, -2 # constants tree = tree.copy(True) for a in tree.subtrees(): a.sort(key=lambda n: min(n.leaves()) if isinstance(n, Tree) else n) scale = 2 crossed = set() # internal nodes and lexical nodes (no frontiers) positions = tree.treepositions() maxdepth = max(map(len, positions)) + 1 childcols = defaultdict(set) matrix = [[None] * (len(sentence) * scale)] nodes = {} ids = dict((a, n) for n, a in enumerate(positions)) highlighted_nodes = set(n for a, n in ids.items() if not highlight or tree[a] in highlight) levels = dict((n, []) for n in range(maxdepth - 1)) terminals = [] for a in positions: node = tree[a] if isinstance(node, Tree): levels[maxdepth - node.height()].append(a) else: terminals.append(a) for n in levels: levels[n].sort(key=lambda n: max(tree[n].leaves()) - min(tree[n].leaves())) terminals.sort() positions = set(positions) for m in terminals: i = int(tree[m]) * scale assert matrix[0][i] is None, (matrix[0][i], m, i) matrix[0][i] = ids[m] nodes[ids[m]] = sentence[tree[m]] if nodes[ids[m]] is None: nodes[ids[m]] = '...' highlighted_nodes.discard(ids[m]) positions.remove(m) childcols[m[:-1]].add((0, i)) # add other nodes centered on their children, # if the center is already taken, back off # to the left and right alternately, until an empty cell is found. for n in sorted(levels, reverse=True): nodesatdepth = levels[n] startoflevel = len(matrix) matrix.append([vertline if a not in (corner, None) else None for a in matrix[-1]]) for m in nodesatdepth: # [::-1]: if n < maxdepth - 1 and childcols[m]: _, pivot = min(childcols[m], key=itemgetter(1)) if (set(a[:-1] for row in matrix[:-1] for a in row[:pivot] if isinstance(a, tuple)) & set(a[:-1] for row in matrix[:-1] for a in row[pivot:] if isinstance(a, tuple))): crossed.add(m) rowidx, i = findcell(m, matrix, startoflevel, childcols) positions.remove(m) # block positions where children of this node branch out for _, x in childcols[m]: matrix[rowidx][x] = corner # assert m == () or matrix[rowidx][i] in (None, corner), ( # matrix[rowidx][i], m, str(tree), ' '.join(sentence)) # node itself matrix[rowidx][i] = ids[m] nodes[ids[m]] = tree[m] # add column to the set of children for its parent if m != (): childcols[m[:-1]].add((rowidx, i)) assert len(positions) == 0 # remove unused columns, right to left for m in range(scale * len(sentence) - 1, -1, -1): if not any(isinstance(row[m], (Tree, int)) for row in matrix): for row in matrix: del row[m] # remove unused rows, reverse matrix = [row for row in reversed(matrix) if not all(a is None or a == vertline for a in row)] # collect coordinates of nodes coords = {} for n, _ in enumerate(matrix): for m, i in enumerate(matrix[n]): if isinstance(i, int) and i >= 0: coords[i] = n, m # move crossed edges last positions = sorted([a for level in levels.values() for a in level], key=lambda a: a[:-1] in crossed) # collect edges from node to node edges = OrderedDict() for i in reversed(positions): for j, _ in enumerate(tree[i]): edges[ids[i + (j, )]] = ids[i] return nodes, coords, edges, highlighted_nodes def text(self, nodedist=1, unicodelines=False, html=False, ansi=False, nodecolor='blue', leafcolor='red', funccolor='green', abbreviate=None, maxwidth=16): """ :return: ASCII art for a discontinuous tree. :param unicodelines: whether to use Unicode line drawing characters instead of plain (7-bit) ASCII. :param html: whether to wrap output in html code (default plain text). :param ansi: whether to produce colors with ANSI escape sequences (only effective when html==False). :param leafcolor, nodecolor: specify colors of leaves and phrasal nodes; effective when either html or ansi is True. :param abbreviate: if True, abbreviate labels longer than 5 characters. If integer, abbreviate labels longer than `abbr` characters. :param maxwidth: maximum number of characters before a label starts to wrap; pass None to disable. """ if abbreviate == True: abbreviate = 5 if unicodelines: horzline = '\u2500' leftcorner = '\u250c' rightcorner = '\u2510' vertline = ' \u2502 ' tee = horzline + '\u252C' + horzline bottom = horzline + '\u2534' + horzline cross = horzline + '\u253c' + horzline ellipsis = '\u2026' else: horzline = '_' leftcorner = rightcorner = ' ' vertline = ' | ' tee = 3 * horzline cross = bottom = '_|_' ellipsis = '.' def crosscell(cur, x=vertline): """Overwrite center of this cell with a vertical branch.""" splitl = len(cur) - len(cur) // 2 - len(x) // 2 - 1 lst = list(cur) lst[splitl:splitl + len(x)] = list(x) return ''.join(lst) result = [] matrix = defaultdict(dict) maxnodewith = defaultdict(lambda: 3) maxnodeheight = defaultdict(lambda: 1) maxcol = 0 minchildcol = {} maxchildcol = {} childcols = defaultdict(set) labels = {} wrapre = re.compile('(.{%d,%d}\\b\\W*|.{%d})' % ( maxwidth - 4, maxwidth, maxwidth)) # collect labels and coordinates for a in self.nodes: row, column = self.coords[a] matrix[row][column] = a maxcol = max(maxcol, column) label = (self.nodes[a].label() if isinstance(self.nodes[a], Tree) else self.nodes[a]) if abbreviate and len(label) > abbreviate: label = label[:abbreviate] + ellipsis if maxwidth and len(label) > maxwidth: label = wrapre.sub(r'\1\n', label).strip() label = label.split('\n') maxnodeheight[row] = max(maxnodeheight[row], len(label)) maxnodewith[column] = max(maxnodewith[column], max(map(len, label))) labels[a] = label if a not in self.edges: continue # e.g., root parent = self.edges[a] childcols[parent].add((row, column)) minchildcol[parent] = min(minchildcol.get(parent, column), column) maxchildcol[parent] = max(maxchildcol.get(parent, column), column) # bottom up level order traversal for row in sorted(matrix, reverse=True): noderows = [[''.center(maxnodewith[col]) for col in range(maxcol + 1)] for _ in range(maxnodeheight[row])] branchrow = [''.center(maxnodewith[col]) for col in range(maxcol + 1)] for col in matrix[row]: n = matrix[row][col] node = self.nodes[n] text = labels[n] if isinstance(node, Tree): # draw horizontal branch towards children for this node if n in minchildcol and minchildcol[n] < maxchildcol[n]: i, j = minchildcol[n], maxchildcol[n] a, b = (maxnodewith[i] + 1) // 2 - 1, maxnodewith[j] // 2 branchrow[i] = ((' ' * a) + leftcorner).ljust( maxnodewith[i], horzline) branchrow[j] = (rightcorner + (' ' * b)).rjust( maxnodewith[j], horzline) for i in range(minchildcol[n] + 1, maxchildcol[n]): if i == col and any( a == i for _, a in childcols[n]): line = cross elif i == col: line = bottom elif any(a == i for _, a in childcols[n]): line = tee else: line = horzline branchrow[i] = line.center(maxnodewith[i], horzline) else: # if n and n in minchildcol: branchrow[col] = crosscell(branchrow[col]) text = [a.center(maxnodewith[col]) for a in text] color = nodecolor if isinstance(node, Tree) else leafcolor if isinstance(node, Tree) and node.label().startswith('-'): color = funccolor if html: text = [escape(a) for a in text] if n in self.highlight: text = ['<font color=%s>%s</font>' % ( color, a) for a in text] elif ansi and n in self.highlight: text = ['\x1b[%d;1m%s\x1b[0m' % ( ANSICOLOR[color], a) for a in text] for x in range(maxnodeheight[row]): # draw vertical lines in partially filled multiline node # labels, but only if it's not a frontier node. noderows[x][col] = (text[x] if x < len(text) else (vertline if childcols[n] else ' ').center( maxnodewith[col], ' ')) # for each column, if there is a node below us which has a parent # above us, draw a vertical branch in that column. if row != max(matrix): for n, (childrow, col) in self.coords.items(): if (n > 0 and self.coords[self.edges[n]][0] < row < childrow): branchrow[col] = crosscell(branchrow[col]) if col not in matrix[row]: for noderow in noderows: noderow[col] = crosscell(noderow[col]) branchrow = [a + ((a[-1] if a[-1] != ' ' else b[0]) * nodedist) for a, b in zip(branchrow, branchrow[1:] + [' '])] result.append(''.join(branchrow)) result.extend((' ' * nodedist).join(noderow) for noderow in reversed(noderows)) return '\n'.join(reversed(result)) + '\n' def svg(self, nodecolor='blue', leafcolor='red', funccolor='green'): """ :return: SVG representation of a tree. """ fontsize = 12 hscale = 40 vscale = 25 hstart = vstart = 20 width = max(col for _, col in self.coords.values()) height = max(row for row, _ in self.coords.values()) result = ['<svg version="1.1" xmlns="http://www.w3.org/2000/svg" ' 'width="%dem" height="%dem" viewBox="%d %d %d %d">' % ( width * 3, height * 2.5, -hstart, -vstart, width * hscale + 3 * hstart, height * vscale + 3 * vstart) ] children = defaultdict(set) for n in self.nodes: if n: children[self.edges[n]].add(n) # horizontal branches from nodes to children for node in self.nodes: if not children[node]: continue y, x = self.coords[node] x *= hscale y *= vscale x += hstart y += vstart + fontsize // 2 childx = [self.coords[c][1] for c in children[node]] xmin = hstart + hscale * min(childx) xmax = hstart + hscale * max(childx) result.append( '\t<polyline style="stroke:black; stroke-width:1; fill:none;" ' 'points="%g,%g %g,%g" />' % (xmin, y, xmax, y)) result.append( '\t<polyline style="stroke:black; stroke-width:1; fill:none;" ' 'points="%g,%g %g,%g" />' % (x, y, x, y - fontsize // 3)) # vertical branches from children to parents for child, parent in self.edges.items(): y, _ = self.coords[parent] y *= vscale y += vstart + fontsize // 2 childy, childx = self.coords[child] childx *= hscale childy *= vscale childx += hstart childy += vstart - fontsize result += [ '\t<polyline style="stroke:white; stroke-width:10; fill:none;"' ' points="%g,%g %g,%g" />' % (childx, childy, childx, y + 5), '\t<polyline style="stroke:black; stroke-width:1; fill:none;"' ' points="%g,%g %g,%g" />' % (childx, childy, childx, y), ] # write nodes with coordinates for n, (row, column) in self.coords.items(): node = self.nodes[n] x = column * hscale + hstart y = row * vscale + vstart if n in self.highlight: color = nodecolor if isinstance(node, Tree) else leafcolor if isinstance(node, Tree) and node.label().startswith('-'): color = funccolor else: color = 'black' result += ['\t<text style="text-anchor: middle; fill: %s; ' 'font-size: %dpx;" x="%g" y="%g">%s</text>' % ( color, fontsize, x, y, escape(node.label() if isinstance(node, Tree) else node))] result += ['</svg>'] return '\n'.join(result) def test(): """Do some tree drawing tests.""" def print_tree(n, tree, sentence=None, ansi=True, **xargs): print() print('{0}: "{1}"'.format(n, ' '.join(sentence or tree.leaves()))) print(tree) print() drawtree = TreePrettyPrinter(tree, sentence) try: print(drawtree.text(unicodelines=ansi, ansi=ansi, **xargs)) except (UnicodeDecodeError, UnicodeEncodeError): print(drawtree.text(unicodelines=False, ansi=False, **xargs)) from nltk.corpus import treebank for n in [0, 1440, 1591, 2771, 2170]: tree = treebank.parsed_sents()[n] print_tree(n, tree, nodedist=2, maxwidth=8) print() print('ASCII version:') print(TreePrettyPrinter(tree).text(nodedist=2)) tree = Tree.fromstring( '(top (punct 8) (smain (noun 0) (verb 1) (inf (verb 5) (inf (verb 6) ' '(conj (inf (pp (prep 2) (np (det 3) (noun 4))) (verb 7)) (inf (verb 9)) ' '(vg 10) (inf (verb 11)))))) (punct 12))', read_leaf=int) sentence = ('Ze had met haar moeder kunnen gaan winkelen ,' ' zwemmen of terrassen .'.split()) print_tree('Discontinuous tree', tree, sentence, nodedist=2) __all__ = ['TreePrettyPrinter'] if __name__ == '__main__': test()
nelango/ViralityAnalysis
model/lib/nltk/treeprettyprinter.py
Python
mit
24,360
# -*- coding: utf-8 -*- import networkx as nx from nose.tools import assert_equal, assert_raises class TestNetworkSimplex: def test_simple_digraph(self): G = nx.DiGraph() G.add_node('a', demand = -5) G.add_node('d', demand = 5) G.add_edge('a', 'b', weight = 3, capacity = 4) G.add_edge('a', 'c', weight = 6, capacity = 10) G.add_edge('b', 'd', weight = 1, capacity = 9) G.add_edge('c', 'd', weight = 2, capacity = 5) flowCost, H = nx.network_simplex(G) soln = {'a': {'b': 4, 'c': 1}, 'b': {'d': 4}, 'c': {'d': 1}, 'd': {}} assert_equal(flowCost, 24) assert_equal(nx.min_cost_flow_cost(G), 24) assert_equal(H, soln) assert_equal(nx.min_cost_flow(G), soln) assert_equal(nx.cost_of_flow(G, H), 24) def test_negcycle_infcap(self): G = nx.DiGraph() G.add_node('s', demand = -5) G.add_node('t', demand = 5) G.add_edge('s', 'a', weight = 1, capacity = 3) G.add_edge('a', 'b', weight = 3) G.add_edge('c', 'a', weight = -6) G.add_edge('b', 'd', weight = 1) G.add_edge('d', 'c', weight = -2) G.add_edge('d', 't', weight = 1, capacity = 3) assert_raises(nx.NetworkXUnbounded, nx.network_simplex, G) def test_sum_demands_not_zero(self): G = nx.DiGraph() G.add_node('s', demand = -5) G.add_node('t', demand = 4) G.add_edge('s', 'a', weight = 1, capacity = 3) G.add_edge('a', 'b', weight = 3) G.add_edge('a', 'c', weight = -6) G.add_edge('b', 'd', weight = 1) G.add_edge('c', 'd', weight = -2) G.add_edge('d', 't', weight = 1, capacity = 3) assert_raises(nx.NetworkXUnfeasible, nx.network_simplex, G) def test_no_flow_satisfying_demands(self): G = nx.DiGraph() G.add_node('s', demand = -5) G.add_node('t', demand = 5) G.add_edge('s', 'a', weight = 1, capacity = 3) G.add_edge('a', 'b', weight = 3) G.add_edge('a', 'c', weight = -6) G.add_edge('b', 'd', weight = 1) G.add_edge('c', 'd', weight = -2) G.add_edge('d', 't', weight = 1, capacity = 3) assert_raises(nx.NetworkXUnfeasible, nx.network_simplex, G) def test_transshipment(self): G = nx.DiGraph() G.add_node('a', demand = 1) G.add_node('b', demand = -2) G.add_node('c', demand = -2) G.add_node('d', demand = 3) G.add_node('e', demand = -4) G.add_node('f', demand = -4) G.add_node('g', demand = 3) G.add_node('h', demand = 2) G.add_node('r', demand = 3) G.add_edge('a', 'c', weight = 3) G.add_edge('r', 'a', weight = 2) G.add_edge('b', 'a', weight = 9) G.add_edge('r', 'c', weight = 0) G.add_edge('b', 'r', weight = -6) G.add_edge('c', 'd', weight = 5) G.add_edge('e', 'r', weight = 4) G.add_edge('e', 'f', weight = 3) G.add_edge('h', 'b', weight = 4) G.add_edge('f', 'd', weight = 7) G.add_edge('f', 'h', weight = 12) G.add_edge('g', 'd', weight = 12) G.add_edge('f', 'g', weight = -1) G.add_edge('h', 'g', weight = -10) flowCost, H = nx.network_simplex(G) soln = {'a': {'c': 0}, 'b': {'a': 0, 'r': 2}, 'c': {'d': 3}, 'd': {}, 'e': {'r': 3, 'f': 1}, 'f': {'d': 0, 'g': 3, 'h': 2}, 'g': {'d': 0}, 'h': {'b': 0, 'g': 0}, 'r': {'a': 1, 'c': 1}} assert_equal(flowCost, 41) assert_equal(nx.min_cost_flow_cost(G), 41) assert_equal(H, soln) assert_equal(nx.min_cost_flow(G), soln) assert_equal(nx.cost_of_flow(G, H), 41) def test_max_flow_min_cost(self): G = nx.DiGraph() G.add_edge('s', 'a', bandwidth = 6) G.add_edge('s', 'c', bandwidth = 10, cost = 10) G.add_edge('a', 'b', cost = 6) G.add_edge('b', 'd', bandwidth = 8, cost = 7) G.add_edge('c', 'd', cost = 10) G.add_edge('d', 't', bandwidth = 5, cost = 5) soln = {'s': {'a': 5, 'c': 0}, 'a': {'b': 5}, 'b': {'d': 5}, 'c': {'d': 0}, 'd': {'t': 5}, 't': {}} flow = nx.max_flow_min_cost(G, 's', 't', capacity = 'bandwidth', weight = 'cost') assert_equal(flow, soln) assert_equal(nx.cost_of_flow(G, flow, weight = 'cost'), 90) def test_digraph1(self): # From Bradley, S. P., Hax, A. C. and Magnanti, T. L. Applied # Mathematical Programming. Addison-Wesley, 1977. G = nx.DiGraph() G.add_node(1, demand = -20) G.add_node(4, demand = 5) G.add_node(5, demand = 15) G.add_edges_from([(1, 2, {'capacity': 15, 'weight': 4}), (1, 3, {'capacity': 8, 'weight': 4}), (2, 3, {'weight': 2}), (2, 4, {'capacity': 4, 'weight': 2}), (2, 5, {'capacity': 10, 'weight': 6}), (3, 4, {'capacity': 15, 'weight': 1}), (3, 5, {'capacity': 5, 'weight': 3}), (4, 5, {'weight': 2}), (5, 3, {'capacity': 4, 'weight': 1})]) flowCost, H = nx.network_simplex(G) soln = {1: {2: 12, 3: 8}, 2: {3: 8, 4: 4, 5: 0}, 3: {4: 11, 5: 5}, 4: {5: 10}, 5: {3: 0}} assert_equal(flowCost, 150) assert_equal(nx.min_cost_flow_cost(G), 150) assert_equal(H, soln) assert_equal(nx.min_cost_flow(G), soln) assert_equal(nx.cost_of_flow(G, H), 150) def test_digraph2(self): # Example from ticket #430 from mfrasca. Original source: # http://www.cs.princeton.edu/courses/archive/spr03/cs226/lectures/mincost.4up.pdf, slide 11. G = nx.DiGraph() G.add_edge('s', 1, capacity=12) G.add_edge('s', 2, capacity=6) G.add_edge('s', 3, capacity=14) G.add_edge(1, 2, capacity=11) G.add_edge(2, 3, capacity=9) G.add_edge(1, 4, capacity=5) G.add_edge(1, 5, capacity=2) G.add_edge(2, 5, capacity=4) G.add_edge(2, 6, capacity=2) G.add_edge(3, 6, capacity=31) G.add_edge(4, 5, capacity=18) G.add_edge(5, 5, capacity=9) G.add_edge(4, 't', capacity=3) G.add_edge(5, 't', capacity=7) G.add_edge(6, 't', capacity=22) flow = nx.max_flow_min_cost(G, 's', 't') soln = {1: {2: 5, 4: 5, 5: 2}, 2: {3: 6, 5: 3, 6: 2}, 3: {6: 20}, 4: {5: 2, 't': 3}, 5: {5: 0, 't': 7}, 6: {'t': 22}, 's': {1: 12, 2: 6, 3: 14}, 't': {}} assert_equal(flow, soln) def test_digraph3(self): """Combinatorial Optimization: Algorithms and Complexity, Papadimitriou Steiglitz at page 140 has an example, 7.1, but that admits multiple solutions, so I alter it a bit. From ticket #430 by mfrasca.""" G = nx.DiGraph() G.add_edge('s', 'a', {0: 2, 1: 4}) G.add_edge('s', 'b', {0: 2, 1: 1}) G.add_edge('a', 'b', {0: 5, 1: 2}) G.add_edge('a', 't', {0: 1, 1: 5}) G.add_edge('b', 'a', {0: 1, 1: 3}) G.add_edge('b', 't', {0: 3, 1: 2}) "PS.ex.7.1: testing main function" sol = nx.max_flow_min_cost(G, 's', 't', capacity=0, weight=1) flow = sum(v for v in sol['s'].values()) assert_equal(4, flow) assert_equal(23, nx.cost_of_flow(G, sol, weight=1)) assert_equal(sol['s'], {'a': 2, 'b': 2}) assert_equal(sol['a'], {'b': 1, 't': 1}) assert_equal(sol['b'], {'a': 0, 't': 3}) assert_equal(sol['t'], {}) def test_zero_capacity_edges(self): """Address issue raised in ticket #617 by arv.""" G = nx.DiGraph() G.add_edges_from([(1, 2, {'capacity': 1, 'weight': 1}), (1, 5, {'capacity': 1, 'weight': 1}), (2, 3, {'capacity': 0, 'weight': 1}), (2, 5, {'capacity': 1, 'weight': 1}), (5, 3, {'capacity': 2, 'weight': 1}), (5, 4, {'capacity': 0, 'weight': 1}), (3, 4, {'capacity': 2, 'weight': 1})]) G.node[1]['demand'] = -1 G.node[2]['demand'] = -1 G.node[4]['demand'] = 2 flowCost, H = nx.network_simplex(G) soln = {1: {2: 0, 5: 1}, 2: {3: 0, 5: 1}, 3: {4: 2}, 4: {}, 5: {3: 2, 4: 0}} assert_equal(flowCost, 6) assert_equal(nx.min_cost_flow_cost(G), 6) assert_equal(H, soln) assert_equal(nx.min_cost_flow(G), soln) assert_equal(nx.cost_of_flow(G, H), 6) def test_digon(self): """Check if digons are handled properly. Taken from ticket #618 by arv.""" nodes = [(1, {}), (2, {'demand': -4}), (3, {'demand': 4}), ] edges = [(1, 2, {'capacity': 3, 'weight': 600000}), (2, 1, {'capacity': 2, 'weight': 0}), (2, 3, {'capacity': 5, 'weight': 714285}), (3, 2, {'capacity': 2, 'weight': 0}), ] G = nx.DiGraph(edges) G.add_nodes_from(nodes) flowCost, H = nx.network_simplex(G) soln = {1: {2: 0}, 2: {1: 0, 3: 4}, 3: {2: 0}} assert_equal(flowCost, 2857140) assert_equal(nx.min_cost_flow_cost(G), 2857140) assert_equal(H, soln) assert_equal(nx.min_cost_flow(G), soln) assert_equal(nx.cost_of_flow(G, H), 2857140) def test_multidigraph(self): """Raise an exception for multidigraph.""" G = nx.MultiDiGraph() G.add_weighted_edges_from([(1, 2, 1), (2, 3, 2)], weight='capacity') assert_raises(nx.NetworkXError, nx.network_simplex, G)
ChristianKniep/QNIB
serverfiles/usr/local/lib/networkx-1.6/networkx/algorithms/flow/tests/test_mincost.py
Python
gpl-2.0
10,257
# Copyright 2009 by Cymon J. Cox. All rights reserved. # This code is part of the Biopython distribution and governed by its # license. Please see the LICENSE file that should have been included # as part of this package. """Command line wrapper for the multiple alignment program DIALIGN2-2. """ from __future__ import print_function __docformat__ = "restructuredtext en" # Don't just use plain text in epydoc API pages! from Bio.Application import _Option, _Argument, _Switch, AbstractCommandline class DialignCommandline(AbstractCommandline): """Command line wrapper for the multiple alignment program DIALIGN2-2. http://bibiserv.techfak.uni-bielefeld.de/dialign/welcome.html Example: -------- To align a FASTA file (unaligned.fasta) with the output files names aligned.* including a FASTA output file (aligned.fa), use: >>> from Bio.Align.Applications import DialignCommandline >>> dialign_cline = DialignCommandline(input="unaligned.fasta", ... fn="aligned", fa=True) >>> print(dialign_cline) dialign2-2 -fa -fn aligned unaligned.fasta You would typically run the command line with dialign_cline() or via the Python subprocess module, as described in the Biopython tutorial. Citation: --------- B. Morgenstern (2004). DIALIGN: Multiple DNA and Protein Sequence Alignment at BiBiServ. Nucleic Acids Research 32, W33-W36. Last checked against version: 2.2 """ def __init__(self, cmd="dialign2-2", **kwargs): self.program_name = cmd self.parameters = \ [ _Switch(["-afc", "afc"], "Creates additional output file '*.afc' " "containing data of all fragments considered " "for alignment WARNING: this file can be HUGE !"), _Switch(["-afc_v", "afc_v"], "Like '-afc' but verbose: fragments are explicitly " "printed. WARNING: this file can be EVEN BIGGER !"), _Switch(["-anc", "anc"], "Anchored alignment. Requires a file <seq_file>.anc " "containing anchor points."), _Switch(["-cs", "cs"], "If segments are translated, not only the `Watson " "strand' but also the `Crick strand' is looked at."), _Switch(["-cw", "cw"], "Additional output file in CLUSTAL W format."), _Switch(["-ds", "ds"], "`dna alignment speed up' - non-translated nucleic acid " "fragments are taken into account only if they start " "with at least two matches. Speeds up DNA alignment at " "the expense of sensitivity."), _Switch(["-fa", "fa"], "Additional output file in FASTA format."), _Switch(["-ff", "ff"], "Creates file *.frg containing information about all " "fragments that are part of the respective optimal " "pairwise alignmnets plus information about " "consistency in the multiple alignment"), _Option(["-fn", "fn"], "Output files are named <out_file>.<extension>.", equate=False), _Switch(["-fop", "fop"], "Creates file *.fop containing coordinates of all " "fragments that are part of the respective pairwise alignments."), _Switch(["-fsm", "fsm"], "Creates file *.fsm containing coordinates of all " "fragments that are part of the final alignment"), _Switch(["-iw", "iw"], "Overlap weights switched off (by default, overlap " "weights are used if up to 35 sequences are aligned). " "This option speeds up the alignment but may lead " "to reduced alignment quality."), _Switch(["-lgs", "lgs"], "`long genomic sequences' - combines the following " "options: -ma, -thr 2, -lmax 30, -smin 8, -nta, -ff, " "-fop, -ff, -cs, -ds, -pst "), _Switch(["-lgs_t", "lgs_t"], "Like '-lgs' but with all segment pairs assessed " "at the peptide level (rather than 'mixed alignments' " "as with the '-lgs' option). Therefore faster than " "-lgs but not very sensitive for non-coding regions."), _Option(["-lmax", "lmax"], "Maximum fragment length = x (default: x = 40 or " "x = 120 for `translated' fragments). Shorter x " "speeds up the program but may affect alignment quality.", checker_function=lambda x: isinstance(x, int), equate=False), _Switch(["-lo", "lo"], "(Long Output) Additional file *.log with information " "about fragments selected for pairwise alignment and " "about consistency in multi-alignment proceedure."), _Switch(["-ma", "ma"], "`mixed alignments' consisting of P-fragments and " "N-fragments if nucleic acid sequences are aligned."), _Switch(["-mask", "mask"], "Residues not belonging to selected fragments are " "replaced by `*' characters in output alignment " "(rather than being printed in lower-case characters)"), _Switch(["-mat", "mat"], "Creates file *mat with substitution counts derived " "from the fragments that have been selected for alignment."), _Switch(["-mat_thr", "mat_thr"], "Like '-mat' but only fragments with weight score " "> t are considered"), _Switch(["-max_link", "max_link"], "'maximum linkage' clustering used to construct " "sequence tree (instead of UPGMA)."), _Switch(["-min_link", "min_link"], "'minimum linkage' clustering used."), _Option(["-mot", "mot"], "'motif' option.", equate=False), _Switch(["-msf", "msf"], "Separate output file in MSF format."), _Switch(["-n", "n"], "Input sequences are nucleic acid sequences. " "No translation of fragments."), _Switch(["-nt", "nt"], "Input sequences are nucleic acid sequences and " "`nucleic acid segments' are translated to `peptide " "segments'."), _Switch(["-nta", "nta"], "`no textual alignment' - textual alignment suppressed. " "This option makes sense if other output files are of " "intrest -- e.g. the fragment files created with -ff, " "-fop, -fsm or -lo."), _Switch(["-o", "o"], "Fast version, resulting alignments may be slightly " "different."), _Switch(["-ow", "ow"], "Overlap weights enforced (By default, overlap weights " "are used only if up to 35 sequences are aligned since " "calculating overlap weights is time consuming)."), _Switch(["-pst", "pst"], "'print status'. Creates and updates a file *.sta with " "information about the current status of the program " "run. This option is recommended if large data sets " "are aligned since it allows the user to estimate the " "remaining running time."), _Switch(["-smin", "smin"], "Minimum similarity value for first residue pair " "(or codon pair) in fragments. Speeds up protein " "alignment or alignment of translated DNA fragments " "at the expense of sensitivity."), _Option(["-stars", "stars"], "Maximum number of `*' characters indicating degree " "of local similarity among sequences. By default, no " "stars are used but numbers between 0 and 9, instead.", checker_function = lambda x: x in range(0, 10), equate=False), _Switch(["-stdo", "stdo"], "Results written to standard output."), _Switch(["-ta", "ta"], "Standard textual alignment printed (overrides " "suppression of textual alignments in special " "options, e.g. -lgs)"), _Option(["-thr", "thr"], "Threshold T = x.", checker_function = lambda x: isinstance(x, int), equate=False), _Switch(["-xfr", "xfr"], "'exclude fragments' - list of fragments can be " "specified that are NOT considered for pairwise alignment"), _Argument(["input"], "Input file name. Must be FASTA format", filename=True, is_required=True), ] AbstractCommandline.__init__(self, cmd, **kwargs) def _test(): """Run the module's doctests (PRIVATE).""" print("Running modules doctests...") import doctest doctest.testmod() print("Done") if __name__ == "__main__": _test()
updownlife/multipleK
dependencies/biopython-1.65/build/lib.linux-x86_64-2.7/Bio/Align/Applications/_Dialign.py
Python
gpl-2.0
9,793
from enigma import eDVBFrontendParametersSatellite, eDVBFrontendParametersCable, eDVBFrontendParametersTerrestrial from Components.NimManager import nimmanager def ConvertToHumanReadable(tp, type = None): ret = { } if type is None: type = tp.get("tuner_type", "None") if type == "DVB-S": ret["tuner_type"] = _("Satellite") ret["inversion"] = { eDVBFrontendParametersSatellite.Inversion_Unknown : _("Auto"), eDVBFrontendParametersSatellite.Inversion_On : _("On"), eDVBFrontendParametersSatellite.Inversion_Off : _("Off")}[tp["inversion"]] ret["fec_inner"] = { eDVBFrontendParametersSatellite.FEC_None : _("None"), eDVBFrontendParametersSatellite.FEC_Auto : _("Auto"), eDVBFrontendParametersSatellite.FEC_1_2 : "1/2", eDVBFrontendParametersSatellite.FEC_2_3 : "2/3", eDVBFrontendParametersSatellite.FEC_3_4 : "3/4", eDVBFrontendParametersSatellite.FEC_5_6 : "5/6", eDVBFrontendParametersSatellite.FEC_7_8 : "7/8", eDVBFrontendParametersSatellite.FEC_3_5 : "3/5", eDVBFrontendParametersSatellite.FEC_4_5 : "4/5", eDVBFrontendParametersSatellite.FEC_8_9 : "8/9", eDVBFrontendParametersSatellite.FEC_9_10 : "9/10"}.get(tp.get("fec_inner", _("Auto"))) ret["modulation"] = { eDVBFrontendParametersSatellite.Modulation_Auto : _("Auto"), eDVBFrontendParametersSatellite.Modulation_QPSK : "QPSK", eDVBFrontendParametersSatellite.Modulation_QAM16 : "QAM16", eDVBFrontendParametersSatellite.Modulation_8PSK : "8PSK"}[tp["modulation"]] ret["orbital_position"] = nimmanager.getSatName(int(tp["orbital_position"])) ret["polarization"] = { eDVBFrontendParametersSatellite.Polarisation_Horizontal : _("Horizontal"), eDVBFrontendParametersSatellite.Polarisation_Vertical : _("Vertical"), eDVBFrontendParametersSatellite.Polarisation_CircularLeft : _("Circular left"), eDVBFrontendParametersSatellite.Polarisation_CircularRight : _("Circular right")}[tp["polarization"]] ret["system"] = { eDVBFrontendParametersSatellite.System_DVB_S : "DVB-S", eDVBFrontendParametersSatellite.System_DVB_S2 : "DVB-S2"}[tp["system"]] if ret["system"] == "DVB-S2": ret["rolloff"] = { eDVBFrontendParametersSatellite.RollOff_alpha_0_35 : "0.35", eDVBFrontendParametersSatellite.RollOff_alpha_0_25 : "0.25", eDVBFrontendParametersSatellite.RollOff_alpha_0_20 : "0.20"}.get(tp.get("rolloff", "auto")) ret["pilot"] = { eDVBFrontendParametersSatellite.Pilot_Unknown : _("Auto"), eDVBFrontendParametersSatellite.Pilot_On : _("On"), eDVBFrontendParametersSatellite.Pilot_Off : _("Off")}[tp["pilot"]] elif type == "DVB-C": ret["tuner_type"] = _("Cable") ret["modulation"] = { eDVBFrontendParametersCable.Modulation_Auto: _("Auto"), eDVBFrontendParametersCable.Modulation_QAM16 : "QAM16", eDVBFrontendParametersCable.Modulation_QAM32 : "QAM32", eDVBFrontendParametersCable.Modulation_QAM64 : "QAM64", eDVBFrontendParametersCable.Modulation_QAM128 : "QAM128", eDVBFrontendParametersCable.Modulation_QAM256 : "QAM256"}[tp["modulation"]] ret["inversion"] = { eDVBFrontendParametersCable.Inversion_Unknown : _("Auto"), eDVBFrontendParametersCable.Inversion_On : _("On"), eDVBFrontendParametersCable.Inversion_Off : _("Off")}[tp["inversion"]] ret["fec_inner"] = { eDVBFrontendParametersCable.FEC_None : _("None"), eDVBFrontendParametersCable.FEC_Auto : _("Auto"), eDVBFrontendParametersCable.FEC_1_2 : "1/2", eDVBFrontendParametersCable.FEC_2_3 : "2/3", eDVBFrontendParametersCable.FEC_3_4 : "3/4", eDVBFrontendParametersCable.FEC_5_6 : "5/6", eDVBFrontendParametersCable.FEC_7_8 : "7/8", eDVBFrontendParametersCable.FEC_8_9 : "8/9"}[tp["fec_inner"]] elif type == "DVB-T": ret["tuner_type"] = _("Terrestrial") ret["bandwidth"] = { eDVBFrontendParametersTerrestrial.Bandwidth_Auto : _("Auto"), eDVBFrontendParametersTerrestrial.Bandwidth_10MHz : "10 MHz", eDVBFrontendParametersTerrestrial.Bandwidth_8MHz : "8 MHz", eDVBFrontendParametersTerrestrial.Bandwidth_7MHz : "7 MHz", eDVBFrontendParametersTerrestrial.Bandwidth_6MHz : "6 MHz", eDVBFrontendParametersTerrestrial.Bandwidth_5MHz : "5 MHz", eDVBFrontendParametersTerrestrial.Bandwidth_1_712MHz : "1.172 MHz"}.get(tp.get("bandwidth", " ")) ret["code_rate_lp"] = { eDVBFrontendParametersTerrestrial.FEC_Auto : _("Auto"), eDVBFrontendParametersTerrestrial.FEC_1_2 : "1/2", eDVBFrontendParametersTerrestrial.FEC_2_3 : "2/3", eDVBFrontendParametersTerrestrial.FEC_3_4 : "3/4", eDVBFrontendParametersTerrestrial.FEC_5_6 : "5/6", eDVBFrontendParametersTerrestrial.FEC_6_7 : "6/7", eDVBFrontendParametersTerrestrial.FEC_7_8 : "7/8", eDVBFrontendParametersTerrestrial.FEC_8_9 : "8/9"}.get(tp.get("code_rate_lp", " ")) ret["code_rate_hp"] = { eDVBFrontendParametersTerrestrial.FEC_Auto : _("Auto"), eDVBFrontendParametersTerrestrial.FEC_1_2 : "1/2", eDVBFrontendParametersTerrestrial.FEC_2_3 : "2/3", eDVBFrontendParametersTerrestrial.FEC_3_4 : "3/4", eDVBFrontendParametersTerrestrial.FEC_5_6 : "5/6", eDVBFrontendParametersTerrestrial.FEC_6_7 : "6/7", eDVBFrontendParametersTerrestrial.FEC_7_8 : "7/8", eDVBFrontendParametersTerrestrial.FEC_8_9 : "8/9"}.get(tp.get("code_rate_hp", " ")) ret["constellation"] = { eDVBFrontendParametersTerrestrial.Modulation_Auto : _("Auto"), eDVBFrontendParametersTerrestrial.Modulation_QPSK : "QPSK", eDVBFrontendParametersTerrestrial.Modulation_QAM16 : "QAM16", eDVBFrontendParametersTerrestrial.Modulation_QAM64 : "QAM64", eDVBFrontendParametersTerrestrial.Modulation_QAM256 : "QAM256"}.get(tp.get("constellation", " ")) ret["transmission_mode"] = { eDVBFrontendParametersTerrestrial.TransmissionMode_Auto : _("Auto"), eDVBFrontendParametersTerrestrial.TransmissionMode_1k : "1k", eDVBFrontendParametersTerrestrial.TransmissionMode_2k : "2k", eDVBFrontendParametersTerrestrial.TransmissionMode_4k : "4k", eDVBFrontendParametersTerrestrial.TransmissionMode_8k : "8k", eDVBFrontendParametersTerrestrial.TransmissionMode_16k : "16k", eDVBFrontendParametersTerrestrial.TransmissionMode_32k : "32k"}.get(tp.get("transmission_mode", " ")) ret["guard_interval"] = { eDVBFrontendParametersTerrestrial.GuardInterval_Auto : _("Auto"), eDVBFrontendParametersTerrestrial.GuardInterval_19_256 : "19/256", eDVBFrontendParametersTerrestrial.GuardInterval_19_128 : "19/128", eDVBFrontendParametersTerrestrial.GuardInterval_1_128 : "1/128", eDVBFrontendParametersTerrestrial.GuardInterval_1_32 : "1/32", eDVBFrontendParametersTerrestrial.GuardInterval_1_16 : "1/16", eDVBFrontendParametersTerrestrial.GuardInterval_1_8 : "1/8", eDVBFrontendParametersTerrestrial.GuardInterval_1_4 : "1/4"}.get(tp.get("guard_interval", " ")) ret["hierarchy_information"] = { eDVBFrontendParametersTerrestrial.Hierarchy_Auto : _("Auto"), eDVBFrontendParametersTerrestrial.Hierarchy_None : _("None"), eDVBFrontendParametersTerrestrial.Hierarchy_1 : "1", eDVBFrontendParametersTerrestrial.Hierarchy_2 : "2", eDVBFrontendParametersTerrestrial.Hierarchy_4 : "4"}.get(tp.get("hierarchy_information", " ")) ret["inversion"] = { eDVBFrontendParametersTerrestrial.Inversion_Unknown : _("Auto"), eDVBFrontendParametersTerrestrial.Inversion_On : _("On"), eDVBFrontendParametersTerrestrial.Inversion_Off : _("Off")}.get(tp.get("inversion", " ")) ret["system"] = { eDVBFrontendParametersTerrestrial.System_DVB_T : "DVB-T", eDVBFrontendParametersTerrestrial.System_DVB_T2 : "DVB-T2"}[tp.get("system")] else: print "ConvertToHumanReadable: no or unknown type in tpdata dict!" for x in tp.keys(): if not ret.has_key(x): ret[x] = tp[x] return ret
popazerty/bh1
lib/python/Tools/Transponder.py
Python
gpl-2.0
7,700
import re import subprocess import sys def get_promotion_chain(git_directory, git_branch, upstream_name='origin'): """ For a given git repository & branch, determine the promotion chain Following the promotion path defined for pulp figure out what the full promotion path to master is from wherever we are. For example if given 2.5-release for pulp the promotion path would be 2.5-release -> 2.5-testing -> 2.5-dev -> 2.6-dev -> master :param git_directory: The directory containing the git repo :type git_directory: str :param git_branch: The git branch to start with :type git_branch: str :param upstream_name: The name of the upstream repo, defaults to 'origin', will be overridden by the upstream name if specified in the branch :param upstream_name: str :return: list of branches that the specified branch promotes to :rtype: list of str """ if git_branch.find('/') != -1: upstream_name = git_branch[:git_branch.find('/')] git_branch = git_branch[git_branch.find('/')+1:] git_branch = git_branch.strip() # parse the branch into its component parts if git_branch == 'master': return ['master'] # parse the git_branch: x.y-(dev|testing|release) branch_regex = "(\d+.\d+)-(dev|testing|release)" match = re.search(branch_regex, git_branch) source_branch_version = match.group(1) source_branch_stream = match.group(2) # get the branch list raw_branch_list = subprocess.check_output(['git', 'branch', '-r'], cwd=git_directory) lines = raw_branch_list.splitlines() target_branch_versions = set() all_branches = set() for line in lines: line = line.strip() # print line match = re.search(branch_regex, line) if match: all_branches.add(match.group(0)) branch_version = match.group(1) if branch_version > source_branch_version: target_branch_versions.add(branch_version) result_list = [git_branch] if source_branch_stream == 'release': result_list.append("%s-testing" % source_branch_version) result_list.append("%s-dev" % source_branch_version) if source_branch_stream == 'testing': result_list.append("%s-dev" % source_branch_version) result_list.extend(["%s-dev" % branch_version for branch_version in sorted(target_branch_versions)]) # Do this check before adding master since we explicitly won't match master in the above regex if not set(result_list).issubset(all_branches): missing_branches = set(result_list).difference(all_branches) print "Error creating git branch promotion list. The following branches are missing: " print missing_branches sys.exit(1) result_list.append('master') result_list = ["%s/%s" % (upstream_name, item) for item in result_list] return result_list def generate_promotion_pairs(promotion_chain): """ For all the items in a promotion path, yield the list of individual promotions that will need to be applied :param promotion_chain: list of branches that will need to be promoted :type promotion_chain: list of str """ for i in range(0, len(promotion_chain), 1): if i < (len(promotion_chain) - 1): yield promotion_chain[i:i + 2] def check_merge_forward(git_directory, promotion_chain): """ For a given git repo & promotion path, validate that all branches have been merged forward :param git_directory: The directory containing the git repo :type git_directory: str :param promotion_chain: git branch promotion path :type promotion_chain: list of str """ for pair in generate_promotion_pairs(promotion_chain): print "checking log comparision of %s -> %s" % (pair[0], pair[1]) output = subprocess.check_output(['git', 'log', "^%s" % pair[1], pair[0]], cwd=git_directory) if output: print "ERROR: in %s: branch %s has not been merged into %s" % \ (git_directory, pair[0], pair[1]) print "Run 'git log ^%s %s' to view the differences." % (pair[1], pair[0]) sys.exit(1) def get_current_git_upstream_branch(git_directory): """ For a given git directory, get the current remote branch :param git_directory: The directory containing the git repo :type git_directory: str :return: remote branch :rtype: str """ command = 'git rev-parse --abbrev-ref --symbolic-full-name @{u}' command = command.split(' ') return subprocess.check_output(command, cwd=git_directory).strip() def get_current_git_branch(git_directory): """ For a given git directory, get the current branch :param git_directory: The directory containing the git repo :type git_directory: str :return: remote branch :rtype: str """ command = 'git rev-parse --abbrev-ref HEAD' command = command.split(' ') return subprocess.check_output(command, cwd=git_directory).strip() def get_local_git_branches(git_directory): command = "git for-each-ref --format %(refname:short) refs/heads/" command = command.split(' ') lines = subprocess.check_output(command, cwd=git_directory) results = [item.strip() for item in lines.splitlines()] return set(results) def checkout_branch(git_directory, branch_name, remote_name='origin'): """ Ensure that branch_name is checkout from the given upstream :param git_directory: directory containing the git project :type git_directory: str :param branch_name: The local branch name. if the remote is specified in the branch name eg upstream/2.6-dev then the remote specified in the branch_name will take precidence over the remote_name specified as a parameter :type branch_name: str :param remote_name: The name of the remote git repo to use, is ignored if the remote is specified as part of the branch name :type remote_name: str """ if branch_name.find('/') != -1: local_branch = branch_name[branch_name.find('/')+1:] remote_name = branch_name[:branch_name.find('/')] else: local_branch = branch_name local_branch = local_branch.strip() full_name = '%s/%s' % (remote_name, local_branch) if not local_branch in get_local_git_branches(git_directory): subprocess.check_call(['git', 'checkout', '-b', local_branch, full_name], cwd=git_directory) subprocess.check_call(['git', 'checkout', local_branch], cwd=git_directory) # validate that the upstream branch is what we expect it to be upstream_branch = get_current_git_upstream_branch(git_directory) if upstream_branch != full_name: print "Error checking out %s in %s" % (full_name, git_directory) print "The upstream branch was already set to %s" % upstream_branch sys.exit(1) subprocess.check_call(['git', 'pull'], cwd=git_directory) def merge_forward(git_directory, push=False): """ From whatever the current checkout is, merge it forward :param git_directory: directory containing the git project :type git_directory: str :param push: Whether or not we should push the results to github :type push: bool """ starting_branch = get_current_git_branch(git_directory) branch = get_current_git_upstream_branch(git_directory) chain = get_promotion_chain(git_directory, branch) for source_branch, target_branch in generate_promotion_pairs(chain): checkout_branch(git_directory, source_branch) checkout_branch(git_directory, target_branch) local_source_branch = source_branch[source_branch.find('/')+1:] print "Merging %s into %s" % (local_source_branch, target_branch) subprocess.check_call(['git', 'merge', '-s', 'ours', local_source_branch, '--no-edit'], cwd=git_directory) if push: subprocess.call(['git', 'push'], cwd=git_directory) # Set the branch back tot he one we started on checkout_branch(git_directory, starting_branch)
aprajshekhar/pulp_packaging
ci/lib/promote.py
Python
gpl-2.0
8,229
# -*- coding: utf-8 -*- """Storage for ports. Set defaults here, then :py:mod:`fixtures.portset` will make overrides.""" import sys from cfme.utils import clear_property_cache from cfme.utils.log import logger class Ports(object): SSH = 22 DB = 5432 TOWER = 54322 logger = logger @property def _top(self, m=sys.modules): mod = m.get('utils.appliance') return mod and mod.stack.top def __setattr__(self, attr, value): super(self.__class__, self).__setattr__(attr, value) if self._top is not None: self.logger.info("Invalidating lazy_cache ssh_client current_appliance object") clear_property_cache(self._top, 'ssh_client') sys.modules[__name__] = Ports()
Yadnyawalkya/integration_tests
cfme/utils/ports.py
Python
gpl-2.0
747
# -*- coding: utf-8 -*- ''' Exodus Add-on Copyright (C) 2016 Exodus This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. ''' import re,urllib,urlparse from resources.lib.modules import cleantitle from resources.lib.modules import cache from resources.lib.modules import client from resources.lib.modules import proxy class source: def __init__(self): self.domains = ['projectfreetv.im'] self.base_link = 'http://projectfreetv.im' self.search_link = '/watch-series/' def tvshow(self, imdb, tvdb, tvshowtitle, year): try: t = cleantitle.get(tvshowtitle) r = cache.get(self.pftv_tvcache, 120) r = [i[0] for i in r if t == i[1]] for i in r[:2]: try: m = proxy.request(urlparse.urljoin(self.base_link, i), 'Episodes') m = re.sub('\s|<.+?>|</.+?>', '', m) m = re.findall('Year:(%s)' % year, m)[0] url = i ; break except: pass return url except: return def pftv_tvcache(self): try: url = urlparse.urljoin(self.base_link, self.search_link) r = proxy.request(url, 'A-Z') r = client.parseDOM(r, 'li') m = [] for i in r: try: title = client.parseDOM(i, 'a')[0] title = client.replaceHTMLCodes(title) title = cleantitle.get(title) title = title.encode('utf-8') url = client.parseDOM(i, 'a', ret='href')[0] url = client.replaceHTMLCodes(url) try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0] except: pass try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['q'][0] except: pass url = urlparse.urljoin(self.base_link, url) url = re.findall('(?://.+?|)(/.+)', url)[0] url = url.encode('utf-8') m.append((url, title)) except: pass return m except: return def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: if url == None: return url = [i for i in url.split('/') if not i == ''][-1] url = '/episode/%s-season-%01d-episode-%01d/' % (url, int(season), int(episode)) return url except: return def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) r = proxy.request(url, 'add links') links = client.parseDOM(r, 'tr') for i in links: try: host = client.parseDOM(i, 'a')[0] host = [x.strip() for x in host.strip().split('\n') if not x == ''][-1] if not host in hostDict: raise Exception() host = client.replaceHTMLCodes(host) host = host.encode('utf-8') url = client.parseDOM(i, 'a', ret='href')[0] url = client.replaceHTMLCodes(url) try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0] except: pass try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['q'][0] except: pass url = urlparse.urljoin(self.base_link, url) url = url.encode('utf-8') sources.append({'source': host, 'quality': 'SD', 'provider': 'PFTV', 'url': url, 'direct': False, 'debridonly': False}) except: pass return sources except: return sources def resolve(self, url): try: r = proxy.request(url, 'nofollow') url = client.parseDOM(r, 'a', ret='href', attrs = {'rel': 'nofollow'}) url = [i for i in url if not urlparse.urlparse(self.base_link).netloc in i] url = client.replaceHTMLCodes(url[0]) try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0] except: pass try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['q'][0] except: pass url = url.encode('utf-8') return url except: return
felipenaselva/repo.felipe
plugin.video.exodus/resources/lib/sources/pftv_tv.py
Python
gpl-2.0
5,225
# -*- coding: utf-8 -*- """ *************************************************************************** blast2dem.py --------------------- Date : September 2013 Copyright : (C) 2013 by Martin Isenburg Email : martin near rapidlasso point com *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * *************************************************************************** """ __author__ = 'Martin Isenburg' __date__ = 'September 2013' __copyright__ = '(C) 2013, Martin Isenburg' # This will get replaced with a git SHA1 when you do a git archive __revision__ = '$Format:%H$' import os from .LAStoolsUtils import LAStoolsUtils from .LAStoolsAlgorithm import LAStoolsAlgorithm from processing.core.parameters import ParameterSelection from processing.core.parameters import ParameterBoolean class blast2dem(LAStoolsAlgorithm): ATTRIBUTE = "ATTRIBUTE" PRODUCT = "PRODUCT" ATTRIBUTES = ["elevation", "slope", "intensity", "rgb"] PRODUCTS = ["actual values", "hillshade", "gray", "false"] USE_TILE_BB = "USE_TILE_BB" def defineCharacteristics(self): self.name, self.i18n_name = self.trAlgorithm('blast2dem') self.group, self.i18n_group = self.trAlgorithm('LAStools') self.addParametersVerboseGUI() self.addParametersPointInputGUI() self.addParametersFilter1ReturnClassFlagsGUI() self.addParametersStepGUI() self.addParameter(ParameterSelection(blast2dem.ATTRIBUTE, self.tr("Attribute"), blast2dem.ATTRIBUTES, 0)) self.addParameter(ParameterSelection(blast2dem.PRODUCT, self.tr("Product"), blast2dem.PRODUCTS, 0)) self.addParameter(ParameterBoolean(blast2dem.USE_TILE_BB, self.tr("Use tile bounding box (after tiling with buffer)"), False)) self.addParametersRasterOutputGUI() self.addParametersAdditionalGUI() def processAlgorithm(self, progress): commands = [os.path.join(LAStoolsUtils.LAStoolsPath(), "bin", "blast2dem")] self.addParametersVerboseCommands(commands) self.addParametersPointInputCommands(commands) self.addParametersFilter1ReturnClassFlagsCommands(commands) self.addParametersStepCommands(commands) attribute = self.getParameterValue(blast2dem.ATTRIBUTE) if attribute != 0: commands.append("-" + blast2dem.ATTRIBUTES[attribute]) product = self.getParameterValue(blast2dem.PRODUCT) if product != 0: commands.append("-" + blast2dem.PRODUCTS[product]) if (self.getParameterValue(blast2dem.USE_TILE_BB)): commands.append("-use_tile_bb") self.addParametersRasterOutputCommands(commands) self.addParametersAdditionalCommands(commands) LAStoolsUtils.runLAStools(commands, progress)
AsgerPetersen/QGIS
python/plugins/processing/algs/lidar/lastools/blast2dem.py
Python
gpl-2.0
3,426
# -*- coding: utf-8 -*- ## ## This file is part of Invenio. ## Copyright (C) 2012, 2013 CERN. ## ## Invenio is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 2 of the ## License, or (at your option) any later version. ## ## Invenio is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with Invenio; if not, write to the Free Software Foundation, Inc., ## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. """ Extended WTForms field Classes TimeField, DatePickerWidget, DateTimePickerWidget and TimePickerWidget are taken from `flask-admin` extension. :copyright: (c) 2011 by wilsaj. :license: BSD, see LICENSE for more details. :source: https://raw.github.com/wilsaj/flask-admin/master/flask_admin/wtforms.py """ import datetime import time from flask import session from wtforms import Form as WTForm from wtforms.widgets import TextInput, HTMLString, html_params from wtforms.fields import Field, TextField, HiddenField, FileField from flask.ext.wtf import Form from wtforms.ext.csrf.session import SessionSecureForm from wtforms.compat import text_type from invenio.config import CFG_SITE_SECRET_KEY class RowWidget(object): """ Renders a list of fields as a set of table rows with th/td pairs. """ def __init__(self): pass def __call__(self, field, **kwargs): html = [] hidden = '' for subfield in field: if subfield.type == 'HiddenField': hidden += text_type(subfield) else: html.append('%s%s' % (hidden, text_type(subfield(class_="span1", placeholder=subfield.label.text)))) hidden = '' if hidden: html.append(hidden) return HTMLString(''.join(html)) class TimeField(Field): """A text field which stores a `time.time` matching a format.""" widget = TextInput() def __init__(self, label=None, validators=None, format='%H:%M:%S', **kwargs): super(TimeField, self).__init__(label, validators, **kwargs) self.format = format def _value(self): if self.raw_data: return u' '.join(self.raw_data) else: return self.data and self.data.strftime(self.format) or u'' def process_formdata(self, valuelist): if valuelist: time_str = u' '.join(valuelist) try: timetuple = time.strptime(time_str, self.format) self.data = datetime.time(*timetuple[3:6]) except ValueError: self.data = None raise class DatePickerWidget(TextInput): """ TextInput widget that adds a 'datepicker' class to the html input element; this makes it easy to write a jQuery selector that adds a UI widget for date picking. """ def __call__(self, field, **kwargs): c = kwargs.pop('class', '') or kwargs.pop('class_', '') kwargs['class'] = u'datepicker %s' % c return super(DatePickerWidget, self).__call__(field, **kwargs) class DateTimePickerWidget(TextInput): """TextInput widget that adds a 'datetimepicker' class to the html adds a UI widget for datetime picking. """ def __call__(self, field, **kwargs): c = kwargs.pop('class', '') or kwargs.pop('class_', '') kwargs['class'] = u'datetimepicker %s' % c return super(DateTimePickerWidget, self).__call__(field, **kwargs) class TimePickerWidget(TextInput): """TextInput widget that adds a 'timepicker' class to the html input element; this makes it easy to write a jQuery selector that adds a UI widget for time picking. """ def __call__(self, field, **kwargs): c = kwargs.pop('class', '') or kwargs.pop('class_', '') kwargs['class'] = u'timepicker %s' % c return super(TimePickerWidget, self).__call__(field, **kwargs) class AutocompleteField(TextField): def __init__(self, label=None, validators=None, data_provide="typeahead", data_source=None, **kwargs): super(AutocompleteField, self).__init__(label, validators, **kwargs) if data_source: self.widget = TypeheadWidget(data_source, data_provide) class TypeheadWidget(object): def __init__(self, autocomplete_list, data_provide): if callable(autocomplete_list): self.autocomplete_list = autocomplete_list() else: self.autocomplete_list = '["{}"]'.format('","'.join(autocomplete_list)) self.data_provide = data_provide def __call__(self, field, **kwargs): kwargs.setdefault('id', field.id) kwargs.setdefault('type', 'text') kwargs.setdefault('data-provide', self.data_provide) kwargs.setdefault('data-source', self.autocomplete_list) if 'value' not in kwargs: kwargs['value'] = field._value() return HTMLString(u'<input %s />' % html_params(name=field.name, **kwargs)) def has_file_field(form): """Test whether or not a form has a FileField in it. This is used to know whether or not we need to set enctype to multipart/form-data. """ for field in form: if isinstance(field, FileField): return True return False class FilterTextField(TextField): alias = None def __init__(self, *args, **kwargs): self.alias = kwargs.get('alias') if 'alias' in kwargs: del kwargs['alias'] super(TextField, self).__init__(*args, **kwargs) if not self.raw_data: self.raw_data = [] def _value(self): if self.raw_data: return self.raw_data.pop() return u'' class InvenioForm(WTForm): @property def required_field_names(self): return [field.name for field in self if hasattr(field, 'required')] class InvenioBaseForm(Form, SessionSecureForm): SECRET_KEY = CFG_SITE_SECRET_KEY TIME_LIMIT = datetime.timedelta(minutes=20) def __init__(self, *args, **kwargs): super(InvenioBaseForm, self).__init__(*args, csrf_context=session, **kwargs) def add_fields(self, name, field): self.__setattr__(name, field) class FilterForm(InvenioBaseForm): """ Filter forms contains hidden fields to keep sorting. """ sort_by = HiddenField() order = HiddenField()
labordoc/labordoc-next
modules/miscutil/lib/wtforms_utils.py
Python
gpl-2.0
6,653
"""Storage for pytest objects during test runs The objects in the module will change during the course of a test run, so they have been stashed into the 'store' namespace Usage: # imported directly (store is pytest.store) from cfme.fixtures.pytest_store import store store.config, store.pluginmanager, store.session The availability of these objects varies during a test run, but all should be available in the collection and testing phases of a test run. """ import os import sys import fauxfactory from _pytest.terminal import TerminalReporter from cached_property import cached_property from py.io import TerminalWriter from cfme.utils import diaper class FlexibleTerminalReporter(TerminalReporter): """A TerminalReporter stand-in that pretends to work even without a py.test config.""" def __init__(self, config=None, file=None): if config: # If we have a config, nothing more needs to be done return TerminalReporter.__init__(self, config, file) # Without a config, pretend to be a TerminalReporter # hook-related functions (logreport, collection, etc) will be outrigt broken, # but the line writers should still be usable if file is None: file = sys.stdout self._tw = self.writer = TerminalWriter(file) self.hasmarkup = self._tw.hasmarkup self.reportchars = '' self.currentfspath = None class Store(object): """pytest object store If a property isn't available for any reason (including being accessed outside of a pytest run), it will be None. """ @property def current_appliance(self): # layz import due to loops and loops and loops from cfme.utils import appliance # TODO: concieve a better way to detect/log import-time missuse # assert self.config is not None, 'current appliance not in scope' return appliance.current_appliance def __init__(self): #: The py.test config instance, None if not in py.test self.config = None #: The current py.test session, None if not in a py.test session self.session = None #: Parallelizer role, None if not running a parallelized session self.parallelizer_role = None # Stash of the "real" terminal reporter once we get it, # so we don't have to keep going through pluginmanager self._terminalreporter = None #: hack variable until we get a more sustainable solution self.ssh_clients_to_close = [] self.uncollection_stats = {} @property def has_config(self): return self.config is not None def _maybe_get_plugin(self, name): """ returns the plugin if the pluginmanager is availiable and the plugin exists""" return self.pluginmanager and self.pluginmanager.getplugin(name) @property def in_pytest_session(self): return self.session is not None @property def fixturemanager(self): # "publicize" the fixturemanager return self.session and self.session._fixturemanager @property def capturemanager(self): return self._maybe_get_plugin('capturemanager') @property def pluginmanager(self): # Expose this directly on the store for convenience in getting/setting plugins return self.config and self.config.pluginmanager @property def terminalreporter(self): if self._terminalreporter is not None: return self._terminalreporter reporter = self._maybe_get_plugin('terminalreporter') if reporter and isinstance(reporter, TerminalReporter): self._terminalreporter = reporter return reporter return FlexibleTerminalReporter(self.config) @property def terminaldistreporter(self): return self._maybe_get_plugin('terminaldistreporter') @property def parallel_session(self): return self._maybe_get_plugin('parallel_session') @property def slave_manager(self): return self._maybe_get_plugin('slave_manager') @property def slaveid(self): return getattr(self.slave_manager, 'slaveid', None) @cached_property def my_ip_address(self): try: # Check the environment first return os.environ['CFME_MY_IP_ADDRESS'] except KeyError: # Fall back to having an appliance tell us what it thinks our IP # address is return self.current_appliance.ssh_client.client_address() def write_line(self, line, **kwargs): return write_line(line, **kwargs) store = Store() def pytest_namespace(): # Expose the pytest store as pytest.store return {'store': store} def pytest_plugin_registered(manager): # config will be set at the second call to this hook if store.config is None: store.config = manager.getplugin('pytestconfig') def pytest_sessionstart(session): store.session = session def write_line(line, **kwargs): """A write-line helper that should *always* write a line to the terminal It knows all of py.tests dirty tricks, including ones that we made, and works around them. Args: **kwargs: Normal kwargs for pytest line formatting, stripped from slave messages """ if store.slave_manager: # We're a pytest slave! Write out the vnc info through the slave manager store.slave_manager.message(line, **kwargs) else: # If py.test is supressing stdout/err, turn that off for a moment with diaper: store.capturemanager.suspendcapture() # terminal reporter knows whether or not to write a newline based on currentfspath # so stash it, then use rewrite to blow away the line that printed the current # test name, then clear currentfspath so the test name is reprinted with the # write_ensure_prefix call. shenanigans! cfp = store.terminalreporter.currentfspath # carriage return, write spaces for the whole line, carriage return, write the new line store.terminalreporter.line('\r' + ' ' * store.terminalreporter._tw.fullwidth + '\r' + line, **kwargs) store.terminalreporter.currentfspath = fauxfactory.gen_alphanumeric(8) store.terminalreporter.write_ensure_prefix(cfp) # resume capturing with diaper: store.capturemanager.resumecapture()
apagac/cfme_tests
cfme/fixtures/pytest_store.py
Python
gpl-2.0
6,454
import re import xml.dom.minidom from typing import ( Any, List, Optional, ) from pcs import utils from pcs.common import ( const, pacemaker, ) from pcs.common.str_tools import format_list_custom_last_separator # pylint: disable=not-callable # main functions def parse_argv(argv, extra_options=None): """ Commandline options: no options """ options = {"id": None, "role": None, "score": None, "score-attribute": None} if extra_options: options.update(dict(extra_options)) # parse options while argv: found = False option = argv.pop(0) for name in options: if option.startswith(name + "="): options[name] = option.split("=", 1)[1] found = True break if not found: argv.insert(0, option) break return options, argv def dom_rule_add(dom_element, options, rule_argv, cib_schema_version): # pylint: disable=too-many-branches """ Commandline options: no options """ # validate options if options.get("score") and options.get("score-attribute"): utils.err("can not specify both score and score-attribute") if options.get("score") and not utils.is_score(options["score"]): utils.err("invalid score'{score}'".format(score=options["score"])) if options.get("role"): role = options["role"].capitalize() utils.print_depracation_warning_for_legacy_roles(options["role"]) supported_roles = ( const.PCMK_ROLES_PROMOTED + const.PCMK_ROLES_UNPROMOTED ) if role not in supported_roles: utils.err( "invalid role '{role}', use {supported_roles}".format( role=options["role"], supported_roles=format_list_custom_last_separator( list(supported_roles), " or " ), ) ) options["role"] = pacemaker.role.get_value_for_cib( role, cib_schema_version >= const.PCMK_NEW_ROLES_CIB_VERSION, ) if options.get("id"): id_valid, id_error = utils.validate_xml_id(options["id"], "rule id") if not id_valid: utils.err(id_error) if utils.does_id_exist(dom_element.ownerDocument, options["id"]): utils.err( "id '%s' is already in use, please specify another one" % options["id"] ) # parse rule if not rule_argv: utils.err("no rule expression was specified") try: preprocessor = TokenPreprocessor() dom_rule = CibBuilder(cib_schema_version).build( dom_element, RuleParser().parse(preprocessor.run(rule_argv)), options.get("id"), ) except SyntaxError as e: utils.err( "'%s' is not a valid rule expression: %s" % (" ".join(rule_argv), e) ) except UnexpectedEndOfInput: utils.err( "'%s' is not a valid rule expression: unexpected end of rule" % " ".join(rule_argv) ) except (ParserException, CibBuilderException): utils.err("'%s' is not a valid rule expression" % " ".join(rule_argv)) # add options into rule xml if not options.get("score") and not options.get("score-attribute"): options["score"] = "INFINITY" for name, value in options.items(): if name != "id" and value is not None: dom_rule.setAttribute(name, value) # score or score-attribute is required for the nested rules in order to have # valid CIB, pacemaker does not use the score of the nested rules for rule in dom_rule.getElementsByTagName("rule"): rule.setAttribute("score", "0") if dom_element.hasAttribute("score"): dom_element.removeAttribute("score") if dom_element.hasAttribute("node"): dom_element.removeAttribute("node") return dom_element class ExportDetailed: def __init__(self): self.show_detail = False self.rule_expired = False def get_string(self, rule, rule_expired, show_detail, indent=""): self.show_detail = show_detail self.rule_expired = rule_expired return indent + ("\n" + indent).join(self.list_rule(rule)) def list_rule(self, rule): rule_parts = [ "Rule{0}: {1}".format( " (expired)" if self.rule_expired else "", " ".join(self._list_attributes(rule)), ) ] for child in rule.childNodes: if child.nodeType == xml.dom.minidom.Node.TEXT_NODE: continue if child.tagName == "expression": self.indent_append(rule_parts, self.list_expression(child)) elif child.tagName == "date_expression": self.indent_append(rule_parts, self.list_date_expression(child)) elif child.tagName == "rule": self.indent_append(rule_parts, self.list_rule(child)) return rule_parts def list_expression(self, expression): if "value" in expression.attributes.keys(): exp_parts = [ expression.getAttribute("attribute"), expression.getAttribute("operation"), ] if expression.hasAttribute("type"): exp_parts.append(expression.getAttribute("type")) exp_parts.append(expression.getAttribute("value")) else: exp_parts = [ expression.getAttribute("operation"), expression.getAttribute("attribute"), ] if self.show_detail: exp_parts.append("(id:%s)" % expression.getAttribute("id")) return ["Expression: %s" % " ".join(exp_parts)] def list_date_expression(self, expression): operation = expression.getAttribute("operation") if operation == "date_spec": date_spec_parts = self._list_attributes( expression.getElementsByTagName("date_spec")[0] ) exp_parts = ["Expression:"] if self.show_detail: exp_parts.append("(id:%s)" % expression.getAttribute("id")) return self.indent_append( [" ".join(exp_parts)], ["Date Spec: %s" % " ".join(date_spec_parts)], ) if operation == "in_range": exp_parts = ["date", "in_range"] if expression.hasAttribute("start"): exp_parts.extend([expression.getAttribute("start"), "to"]) if expression.hasAttribute("end"): exp_parts.append(expression.getAttribute("end")) durations = expression.getElementsByTagName("duration") if durations: exp_parts.append("duration") duration_parts = self._list_attributes(durations[0]) if self.show_detail: exp_parts.append("(id:%s)" % expression.getAttribute("id")) result = ["Expression: %s" % " ".join(exp_parts)] if durations: self.indent_append( result, ["Duration: %s" % " ".join(duration_parts)] ) return result exp_parts = ["date", expression.getAttribute("operation")] if expression.hasAttribute("start"): exp_parts.append(expression.getAttribute("start")) if expression.hasAttribute("end"): exp_parts.append(expression.getAttribute("end")) if self.show_detail: exp_parts.append("(id:%s)" % expression.getAttribute("id")) return ["Expression: " + " ".join(exp_parts)] def _list_attributes(self, element): attributes = utils.dom_attrs_to_list(element, with_id=False) if self.show_detail: attributes.append("(id:%s)" % (element.getAttribute("id"))) return attributes @staticmethod def indent_append(target, source, indent=" "): for part in source: target.append(indent + part) return target class ExportAsExpression: def __init__(self): self.normalize = False def get_string(self, rule, normalize=False): self.normalize = normalize return self.string_rule(rule) def string_rule(self, rule): boolean_op = rule.getAttribute("boolean-op") or "or" rule_parts = [] for child in rule.childNodes: if child.nodeType == xml.dom.minidom.Node.TEXT_NODE: continue if child.tagName == "expression": rule_parts.append(self.string_expression(child)) elif child.tagName == "date_expression": rule_parts.append(self.string_date_expression(child)) elif child.tagName == "rule": rule_parts.append("(%s)" % self.string_rule(child)) if self.normalize: rule_parts.sort() return (" %s " % boolean_op).join(rule_parts) def string_expression(self, expression): if "value" in expression.attributes.keys(): exp_parts = [ expression.getAttribute("attribute"), expression.getAttribute("operation"), ] if expression.hasAttribute("type"): exp_parts.append(expression.getAttribute("type")) elif self.normalize: exp_parts.append("string") value = expression.getAttribute("value") if " " in value: value = '"%s"' % value exp_parts.append(value) else: exp_parts = [ expression.getAttribute("operation"), expression.getAttribute("attribute"), ] return " ".join(exp_parts) def string_date_expression(self, expression): operation = expression.getAttribute("operation") if operation == "date_spec": exp_parts = ["date-spec"] + self._list_attributes( expression.getElementsByTagName("date_spec")[0] ) return " ".join(exp_parts) if operation == "in_range": exp_parts = ["date", "in_range"] if expression.hasAttribute("start"): exp_parts.extend([expression.getAttribute("start"), "to"]) if expression.hasAttribute("end"): exp_parts.append(expression.getAttribute("end")) durations = expression.getElementsByTagName("duration") if durations: exp_parts.append("duration") exp_parts.extend(self._list_attributes(durations[0])) return " ".join(exp_parts) exp_parts = ["date", expression.getAttribute("operation")] if expression.hasAttribute("start"): exp_parts.append(expression.getAttribute("start")) if expression.hasAttribute("end"): exp_parts.append(expression.getAttribute("end")) return " ".join(exp_parts) @staticmethod def _list_attributes(element): attributes = utils.dom_attrs_to_list(element, with_id=False) # sort it always to get the same output for the same input as dict is # unordered attributes.sort() return attributes def has_node_attr_expr_with_type_integer(rule_tree): if isinstance(rule_tree, SymbolOperator): if rule_tree.symbol_id in RuleParser.boolean_list: return any( has_node_attr_expr_with_type_integer(child) for child in rule_tree.children ) if ( rule_tree.symbol_id in RuleParser.date_comparison_list and rule_tree.children[0].value == "date" and rule_tree.children[1].is_literal() ) or ( isinstance(rule_tree, SymbolTypeDateCommon) and rule_tree.date_value_class == DateSpecValue ): return False if isinstance(rule_tree, SymbolPrefix): return False child = rule_tree.children[1] if isinstance(child, SymbolType) and child.symbol_id == "integer": return True return False return False # generic parser class SymbolBase: END = "{end}" LITERAL = "{literal}" symbol_id = None left_binding_power = 0 def null_denotation(self): raise SyntaxError("unexpected '%s'" % self.label()) def left_denotation(self, left): raise SyntaxError( "unexpected '%s' after '%s'" % (self.label(), left.label()) ) def is_end(self): return self.symbol_id == SymbolBase.END def is_literal(self): return self.symbol_id == SymbolBase.LITERAL def label(self): return self.symbol_id def __str__(self): return "(%s)" % self.symbol_id class SymbolLiteral(SymbolBase): def __init__(self, value): self.value = value def null_denotation(self): return self def label(self): return "end" if self.is_end() else str(self.value) def __str__(self): return "(end)" if self.is_end() else "(literal %s)" % self.value class SymbolParenthesisOpen(SymbolBase): expression_func = None advance_func = None close_symbol_id = None def null_denotation(self): expression = self.expression_func() self.advance_func(self.close_symbol_id) return expression class SymbolOperator(SymbolBase): expression_func = None # Note: not properly typed allowed_child_ids: List[Any] = [] def __init__(self): self.children = [] def is_allowed_child(self, child_symbol, child_position): return ( not self.allowed_child_ids or not self.allowed_child_ids[child_position] or child_symbol.symbol_id in self.allowed_child_ids[child_position] ) def __str__(self): string = " ".join( [str(part) for part in [self.symbol_id] + self.children] ) return "(" + string + ")" class SymbolPrefix(SymbolOperator): def null_denotation(self): self.children.append(self.expression_func(self.left_binding_power)) if not self.is_allowed_child(self.children[0], 0): raise SyntaxError( "unexpected '%s' after '%s'" % (self.children[0].label(), self.symbol_id) ) return self class SymbolType(SymbolPrefix): value_re = None def null_denotation(self): super().null_denotation() if self.value_re and not self.value_re.match(self.children[0].value): raise SyntaxError( "invalid %s value '%s'" % (self.symbol_id, self.children[0].value) ) return self class SymbolInfix(SymbolOperator): def left_denotation(self, left): self.children.append(left) if not self.is_allowed_child(self.children[0], 0): raise SyntaxError( "unexpected '%s' before '%s'" % (left.label(), self.symbol_id) ) self.children.append(self.expression_func(self.left_binding_power)) if not self.is_allowed_child(self.children[1], 1): raise SyntaxError( "unexpected '%s' after '%s'" % (self.children[1].label(), self.symbol_id) ) return self class SymbolTernary(SymbolOperator): advance_func = None symbol_second_id: Optional[str] = None def left_denotation(self, left): self.children.append(left) if not self.is_allowed_child(self.children[0], 0): raise SyntaxError( "unexpected '%s' before '%s'" % (left.label(), self.symbol_id) ) self.children.append(self.expression_func(self.left_binding_power)) if not self.is_allowed_child(self.children[1], 1): raise SyntaxError( "unexpected '%s' after '%s'" % (self.children[1].label(), self.symbol_id) ) self.advance_func(self.symbol_second_id) self.children.append(self.expression_func(self.left_binding_power)) if not self.is_allowed_child(self.children[2], 2): raise SyntaxError( "unexpected '%s' after '%s ... %s'" % ( self.children[2].label(), self.symbol_id, self.symbol_second_id, ) ) return self class SymbolTable: def __init__(self): self.table = {} def has_symbol(self, symbol_id): return symbol_id in self.table def get_symbol(self, symbol_id): return self.table[symbol_id] def new_symbol( self, symbol_id, superclass, binding_power=0, expression_func=None, advance_func=None, ): if not self.has_symbol(symbol_id): class SymbolClass(superclass): pass # enforce str to be both python2 and python3 compatible SymbolClass.__name__ = str("symbol_" + symbol_id) SymbolClass.symbol_id = symbol_id SymbolClass.left_binding_power = binding_power if expression_func: SymbolClass.expression_func = expression_func if advance_func: SymbolClass.advance_func = advance_func self.table[symbol_id] = SymbolClass return SymbolClass return self.get_symbol(symbol_id) class Parser: def __init__(self): self.current_symbol = None self.current_symbol_index = -1 self.program = [] self.symbol_table = SymbolTable() self.new_symbol_literal(SymbolBase.LITERAL) self.new_symbol_literal(SymbolBase.END) def new_symbol_literal(self, symbol_id): return self.symbol_table.new_symbol(symbol_id, SymbolLiteral) def new_symbol_prefix(self, symbol_id, binding_power): return self.symbol_table.new_symbol( symbol_id, SymbolPrefix, binding_power, self.expression ) def new_symbol_type(self, symbol_id, binding_power): return self.symbol_table.new_symbol( symbol_id, SymbolType, binding_power, self.expression ) def new_symbol_infix(self, symbol_id, binding_power): return self.symbol_table.new_symbol( symbol_id, SymbolInfix, binding_power, self.expression ) def new_symbol_ternary(self, symbol_id, second_id, binding_power): self.symbol_table.new_symbol(second_id, SymbolBase) symbol_class = self.symbol_table.new_symbol( symbol_id, SymbolTernary, binding_power, self.expression, self.advance, ) symbol_class.symbol_second_id = second_id return symbol_class def new_symbol_parenthesis(self, symbol_id, closing_id): self.symbol_table.new_symbol(closing_id, SymbolBase) symbol_class = self.symbol_table.new_symbol( symbol_id, SymbolParenthesisOpen, 0, self.expression, self.advance ) symbol_class.close_symbol_id = closing_id return symbol_class def symbolize(self, program): symbolized_program = [] literal_class = self.symbol_table.get_symbol(SymbolBase.LITERAL) for token in program: if self.symbol_table.has_symbol(token) and ( not symbolized_program or not isinstance(symbolized_program[-1], SymbolType) ): symbolized = self.symbol_table.get_symbol(token)() else: symbolized = literal_class(token) symbolized_program.append(symbolized) symbolized_program.append( self.symbol_table.get_symbol(SymbolBase.END)(None) ) return symbolized_program def advance(self, expected_symbol_id=None): if ( expected_symbol_id and self.current_symbol.symbol_id != expected_symbol_id ): if self.current_symbol.is_end(): raise SyntaxError("missing '%s'" % expected_symbol_id) raise SyntaxError( "expecting '%s', got '%s'" % (expected_symbol_id, self.current_symbol.label()) ) self.current_symbol_index += 1 if self.current_symbol_index >= len(self.program): raise UnexpectedEndOfInput() self.current_symbol = self.program[self.current_symbol_index] return self def expression(self, right_binding_power=0): symbol = self.current_symbol self.advance() left = symbol.null_denotation() while right_binding_power < self.current_symbol.left_binding_power: symbol = self.current_symbol self.advance() left = symbol.left_denotation(left) return left def parse(self, program): self.current_symbol = None self.current_symbol_index = -1 self.program = self.symbolize(program) self.advance() result = self.expression() symbol = self.current_symbol if not symbol.is_end(): raise SyntaxError("unexpected '%s'" % symbol.label()) return result class ParserException(Exception): pass class UnexpectedEndOfInput(ParserException): pass class SyntaxError(ParserException): # pylint: disable=redefined-builtin pass # rule parser specific code class DateCommonValue: allowed_items = [ "hours", "monthdays", "weekdays", "yeardays", "months", "weeks", "years", "weekyears", "moon", ] KEYWORD: Optional[str] = None def __init__(self, parts_string, keyword=None): self.parts = {} for part in parts_string.split(): if not self.accepts_part(part): raise SyntaxError("unexpected '%s' in %s" % (part, keyword)) if "=" not in part: raise SyntaxError( "missing =value after '%s' in %s" % (part, keyword) ) name, value = part.split("=", 1) if value == "": raise SyntaxError( "missing value after '%s' in %s" % (part, keyword) ) self.parts[name] = value if not self.parts: raise SyntaxError( "missing one of '%s=' in %s" % ("=', '".join(DateCommonValue.allowed_items), keyword) ) self.validate() def validate(self): return self @classmethod def accepts_part(cls, part): for name in cls.allowed_items: if part == name or part.startswith(name + "="): return True return False def __str__(self): # sort it always to get the same output for the same input as dict is # unordered return " ".join( [ "%s=%s" % (name, value) for name, value in sorted(self.parts.items()) ] ) class DateSpecValue(DateCommonValue): KEYWORD = "date-spec" part_re = re.compile(r"^(?P<since>\d+)(-(?P<until>\d+))?$") part_limits = { "hours": (0, 23), "monthdays": (0, 31), "weekdays": (1, 7), "yeardays": (1, 366), "months": (1, 12), "weeks": (1, 53), "weekyears": (1, 53), "moon": (0, 7), } def __init__(self, parts_string): super().__init__(parts_string, self.KEYWORD) def validate(self): for name, value in self.parts.items(): if not self.valid_part(name, value): raise SyntaxError( "invalid %s '%s' in '%s'" % (name, value, DateSpecValue.KEYWORD) ) return self def valid_part(self, name, value): match = DateSpecValue.part_re.match(value) if not match: return False match_dict = match.groupdict() if not self.valid_part_limits(name, match_dict["since"]): return False if match_dict["until"]: if not self.valid_part_limits(name, match_dict["since"]): return False if int(match_dict["since"]) >= int(match_dict["until"]): return False return True @staticmethod def valid_part_limits(name, value): if name not in DateSpecValue.part_limits: return True limits = DateSpecValue.part_limits[name] return limits[0] <= int(value) <= limits[1] class DateDurationValue(DateCommonValue): KEYWORD = "duration" def __init__(self, parts_string): super().__init__(parts_string, self.KEYWORD) def validate(self): for name, value in self.parts.items(): if not value.isdigit(): raise SyntaxError( "invalid %s '%s' in '%s'" % (name, value, DateDurationValue.KEYWORD) ) return self class SymbolTypeDateCommon(SymbolType): date_value_class = None def null_denotation(self): symbol = self.expression_func(self.left_binding_power) symbol.value = self.date_value_class(symbol.value) self.children.append(symbol) return self class SymbolTernaryInRange(SymbolTernary): allowed_child_ids = [ [SymbolBase.LITERAL], [SymbolBase.LITERAL], [SymbolBase.LITERAL, DateDurationValue.KEYWORD], ] symbol_second_id = "to" def is_allowed_child(self, child_symbol, child_position): return super().is_allowed_child(child_symbol, child_position) and ( child_position != 0 or child_symbol.value == "date" ) def left_denotation(self, left): super().left_denotation(left) for child in self.children[1:]: if child.is_literal() and not utils.is_iso8601_date(child.value): raise SyntaxError( "invalid date '%s' in 'in_range ... to'" % child.value ) return self class RuleParser(Parser): comparison_list = ["eq", "ne", "lt", "gt", "lte", "gte", "in_range"] date_comparison_list = ["gt", "lt", "in_range"] prefix_list = ["defined", "not_defined"] boolean_list = ["and", "or"] simple_type_list = ["string", "integer", "number", "version"] parenthesis_open = "(" parenthesis_close = ")" def __init__(self): super().__init__() for operator in RuleParser.comparison_list: if operator == "in_range": continue symbol_class = self.new_symbol_infix(operator, 50) symbol_class.allowed_child_ids = [ [SymbolBase.LITERAL], [SymbolBase.LITERAL] + RuleParser.simple_type_list, ] self.symbol_table.new_symbol( "in_range", SymbolTernaryInRange, 50, self.expression, self.advance ) self.symbol_table.new_symbol("to", SymbolBase) for operator in RuleParser.prefix_list: symbol_class = self.new_symbol_prefix(operator, 60) symbol_class.allowed_child_ids = [[SymbolBase.LITERAL]] for operator in RuleParser.simple_type_list: symbol_class = self.new_symbol_type(operator, 70) self.symbol_table.get_symbol("integer").value_re = re.compile( r"^[-+]?\d+$" ) # rhbz#1869399 # Originally, pacemaker only supported 'number', treated it as an # integer and documented it as 'integer'. With CIB schema 3.5.0+, # 'integer' is supported as well. With crm_feature_set 3.5.0+, 'number' # is treated as a floating point number. # Since pcs never supported 'number' until the above changes in # pacemaker happened and pacemaker was able to handle floating point # numbers before (even though truncating them to integers), we'll just # check for a float here. If that's not good enough, we can fix it # later and validate the value as integer when crm_feature_set < 3.5.0. self.symbol_table.get_symbol("number").value_re = re.compile( r"^[-+]?(\d+|(\d*\.\d+)|(\d+\.\d*))([eE][+-]?\d+)?$" ) self.symbol_table.get_symbol("version").value_re = re.compile( r"^\d+(\.\d+)*$" ) symbol_class = self.new_symbol_type_date(DateSpecValue, 70) symbol_class = self.new_symbol_type_date(DateDurationValue, 70) for operator in RuleParser.boolean_list: symbol_class = self.new_symbol_infix(operator, 40) symbol_class.allowed_child_ids = [ RuleParser.comparison_list + RuleParser.prefix_list + [DateSpecValue.KEYWORD] + RuleParser.boolean_list ] * 2 self.new_symbol_parenthesis( RuleParser.parenthesis_open, RuleParser.parenthesis_close ) def parse(self, program): syntactic_tree = super().parse(program) if syntactic_tree.is_literal() or ( isinstance(syntactic_tree, SymbolType) and not ( isinstance(syntactic_tree, SymbolTypeDateCommon) and syntactic_tree.date_value_class == DateSpecValue ) ): raise SyntaxError( "missing one of '%s'" % "', '".join( RuleParser.comparison_list + RuleParser.prefix_list + [DateSpecValue.KEYWORD] ) ) return syntactic_tree def new_symbol_type_date(self, date_value_class, binding_power): symbol_class = self.symbol_table.new_symbol( date_value_class.KEYWORD, SymbolTypeDateCommon, binding_power, self.expression, ) symbol_class.date_value_class = date_value_class return symbol_class # cib builder class CibBuilder: def __init__(self, cib_schema_version): self.cib_schema_version = cib_schema_version def build(self, dom_element, syntactic_tree, rule_id=None): dom_rule = self.add_element( dom_element, "rule", rule_id if rule_id else dom_element.getAttribute("id") + "-rule", ) self.build_rule(dom_rule, syntactic_tree) return dom_rule def build_rule(self, dom_rule, syntactic_tree): if isinstance(syntactic_tree, SymbolOperator): if syntactic_tree.symbol_id in RuleParser.boolean_list: self.build_boolean(dom_rule, syntactic_tree) elif ( syntactic_tree.symbol_id in RuleParser.date_comparison_list and syntactic_tree.children[0].value == "date" and syntactic_tree.children[1].is_literal() ): self.build_date_expression(dom_rule, syntactic_tree) elif ( isinstance(syntactic_tree, SymbolTypeDateCommon) and syntactic_tree.date_value_class == DateSpecValue ): self.build_datespec(dom_rule, syntactic_tree) else: self.build_expression(dom_rule, syntactic_tree) else: raise InvalidSyntacticTree(syntactic_tree) def build_datespec(self, dom_element, syntactic_tree): dom_expression = self.add_element( dom_element, "date_expression", dom_element.getAttribute("id") + "-expr", ) dom_expression.setAttribute("operation", "date_spec") dom_datespec = self.add_element( dom_expression, "date_spec", dom_expression.getAttribute("id") + "-datespec", ) for key, value in syntactic_tree.children[0].value.parts.items(): dom_datespec.setAttribute(key, value) def build_expression(self, dom_element, syntactic_tree): dom_expression = self.add_element( dom_element, "expression", dom_element.getAttribute("id") + "-expr" ) dom_expression.setAttribute("operation", syntactic_tree.symbol_id) dom_expression.setAttribute( "attribute", syntactic_tree.children[0].value ) if not isinstance(syntactic_tree, SymbolPrefix): child = syntactic_tree.children[1] if isinstance(child, SymbolType): # rhbz#1869399 # Pcs was always accepting 'integer', while CIB was only # supporting 'number' (and 'string' and 'version'). Pacemaker # was documenting it as 'integer' and was treating it as # integer (not float). With CIB schema 3.5.0, both 'integer' # and 'number' are accepted by CIB. For older schemas, we turn # 'integer' to 'number'. if ( self.cib_schema_version < const.PCMK_RULES_NODE_ATTR_EXPR_WITH_INT_TYPE_CIB_VERSION and child.symbol_id == "integer" ): dom_expression.setAttribute("type", "number") else: dom_expression.setAttribute("type", child.symbol_id) child = child.children[0] dom_expression.setAttribute("value", child.value) def build_date_expression(self, dom_element, syntactic_tree): dom_expression = self.add_element( dom_element, "date_expression", dom_element.getAttribute("id") + "-expr", ) dom_expression.setAttribute("operation", syntactic_tree.symbol_id) if syntactic_tree.symbol_id == "gt": dom_expression.setAttribute( "start", syntactic_tree.children[1].value ) elif syntactic_tree.symbol_id == "lt": dom_expression.setAttribute("end", syntactic_tree.children[1].value) elif syntactic_tree.symbol_id == "in_range": dom_expression.setAttribute( "start", syntactic_tree.children[1].value ) if ( isinstance(syntactic_tree.children[2], SymbolTypeDateCommon) and syntactic_tree.children[2].date_value_class == DateDurationValue ): dom_duration = self.add_element( dom_expression, "duration", dom_expression.getAttribute("id") + "-duration", ) duration = syntactic_tree.children[2].children[0].value for key, value in duration.parts.items(): dom_duration.setAttribute(key, value) else: dom_expression.setAttribute( "end", syntactic_tree.children[2].value ) def build_boolean(self, dom_element, syntactic_tree): dom_element.setAttribute("boolean-op", syntactic_tree.symbol_id) for subtree in syntactic_tree.children: if ( subtree.symbol_id in RuleParser.boolean_list and subtree.symbol_id != syntactic_tree.symbol_id ): self.build( dom_element, subtree, dom_element.getAttribute("id") + "-rule", ) else: self.build_rule(dom_element, subtree) @staticmethod def add_element(parent, tag_name, element_id): dom = parent.ownerDocument child = parent.appendChild(dom.createElement(tag_name)) child.setAttribute("id", utils.find_unique_id(dom, element_id)) return child class CibBuilderException(Exception): pass class InvalidSyntacticTree(CibBuilderException): pass # token preprocessing class TokenPreprocessor: def run(self, token_list): return self.join_date_common(self.separate_parenthesis(token_list)) @staticmethod def separate_parenthesis(input_list): output_list = [] for token in input_list: if not ( RuleParser.parenthesis_open in token or RuleParser.parenthesis_close in token ): output_list.append(token) else: part = [] for char in token: if char in [ RuleParser.parenthesis_open, RuleParser.parenthesis_close, ]: if part: output_list.append("".join(part)) part = [] output_list.append(char) else: part.append(char) if part: output_list.append("".join(part)) return output_list @staticmethod def join_date_common(input_list): output_list = [] token_parts = [] in_datecommon = False for token in input_list: if in_datecommon: if DateCommonValue.accepts_part(token): token_parts.append(token) else: in_datecommon = False output_list.append(token_parts[0]) if len(token_parts) > 1: output_list.append(" ".join(token_parts[1:])) output_list.append(token) token_parts = [] elif token in [DateSpecValue.KEYWORD, DateDurationValue.KEYWORD]: in_datecommon = True token_parts = [token] else: output_list.append(token) if token_parts: output_list.append(token_parts[0]) if len(token_parts) > 1: output_list.append(" ".join(token_parts[1:])) return output_list
tomjelinek/pcs
pcs/rule.py
Python
gpl-2.0
37,973
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('forum', '0004_topic_update_index_date'), ] database_operations = [ migrations.AlterModelTable('TopicFollowed', 'notification_topicfollowed') ] state_operations = [ migrations.DeleteModel('TopicFollowed') ] operations = [ migrations.SeparateDatabaseAndState( database_operations=database_operations, state_operations=state_operations) ]
DevHugo/zds-site
zds/forum/migrations/0005_auto_20151119_2224.py
Python
gpl-3.0
594
# -*- coding: utf-8 -*- """ linkcode ~~~~~~~~ Add external links to module code in Python object descriptions. :copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS. :license: BSD, see LICENSE for details. """ from __future__ import division, absolute_import, print_function import warnings import collections warnings.warn("This extension has been accepted to Sphinx upstream. " "Use the version from there (Sphinx >= 1.2) " "https://bitbucket.org/birkenfeld/sphinx/pull-request/47/sphinxextlinkcode", FutureWarning, stacklevel=1) from docutils import nodes from sphinx import addnodes from sphinx.locale import _ from sphinx.errors import SphinxError class LinkcodeError(SphinxError): category = "linkcode error" def doctree_read(app, doctree): env = app.builder.env resolve_target = getattr(env.config, 'linkcode_resolve', None) if not isinstance(env.config.linkcode_resolve, collections.Callable): raise LinkcodeError( "Function `linkcode_resolve` is not given in conf.py") domain_keys = dict( py=['module', 'fullname'], c=['names'], cpp=['names'], js=['object', 'fullname'], ) for objnode in doctree.traverse(addnodes.desc): domain = objnode.get('domain') uris = set() for signode in objnode: if not isinstance(signode, addnodes.desc_signature): continue # Convert signode to a specified format info = {} for key in domain_keys.get(domain, []): value = signode.get(key) if not value: value = '' info[key] = value if not info: continue # Call user code to resolve the link uri = resolve_target(domain, info) if not uri: # no source continue if uri in uris or not uri: # only one link per name, please continue uris.add(uri) onlynode = addnodes.only(expr='html') onlynode += nodes.reference('', '', internal=False, refuri=uri) onlynode[0] += nodes.inline('', _('[source]'), classes=['viewcode-link']) signode += onlynode def setup(app): app.connect('doctree-read', doctree_read) app.add_config_value('linkcode_resolve', None, '')
dbarbier/ot-svn
python/doc/sphinxext/numpydoc/linkcode.py
Python
gpl-3.0
2,510
# -*- coding: utf-8 -*- # # Cyruslib v0.8.5-20090401 # Copyright (C) 2007-2009 Reinaldo de Carvalho <reinaldoc@gmail.com> # Copyright (C) 2003-2006 Gianluigi Tiesi <sherpya@netfarm.it> # Copyright (C) 2003-2006 NetFarm S.r.l. [http://www.netfarm.it] # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. # # Requires python >= 2.3 # __version__ = '0.8.5' __all__ = [ 'CYRUS' ] __doc__ = """Cyrus admin wrapper Adds cyrus-specific commands to imaplib IMAP4 Class and defines new CYRUS class for cyrus imapd commands """ from sys import exit, stdout try: import imaplib import re from binascii import b2a_base64 except ImportError, e: print e exit(1) Commands = { 'RECONSTRUCT' : ('AUTH',), 'DUMP' : ('AUTH',), # To check admin status 'ID' : ('AUTH',), # Only one ID allowed in non auth mode 'GETANNOTATION': ('AUTH',), 'SETANNOTATION': ('AUTH',) } imaplib.Commands.update(Commands) DEFAULT_SEP = '.' QUOTE = '"' DQUOTE = '""' re_ns = re.compile(r'.*\(\(\".*(\.|/)\"\)\).*') re_q0 = re.compile(r'(.*)\s\(\)') re_q = re.compile(r'(.*)\s\(STORAGE (\d+) (\d+)\)') re_mb = re.compile(r'\((.*)\)\s\".\"\s(.*)') re_url = re.compile(r'^(imaps?)://(.+?):?(\d{0,5})$') def ok(res): return res.upper().startswith('OK') def quote(text, qchar=QUOTE): return text.join([qchar, qchar]) def unquote(text, qchar=QUOTE): return ''.join(text.split(qchar)) def getflags(test): flags = [] for flag in test.split('\\'): flag = flag.strip() if len(flag): flags.append(flag) return flags ### A smart function to return an array of splitted strings ### and honours quoted strings def splitquote(text): data = text.split(QUOTE) if len(data) == 1: # no quotes res = data[0].split() else: res = [] for match in data: if len(match.strip()) == 0: continue if match[0] == ' ': res = res + match.strip().split() else: res.append(match) return res ### return a dictionary from a cyrus info response def res2dict(data): data = splitquote(data) datalen = len(data) if datalen % 2: # Unmatched pair return False, {} res = {} for i in range(0, datalen, 2): res[data[i]] = data[i+1] return True, res class CYRUSError(Exception): pass class IMAP4(imaplib.IMAP4): def getsep(self): """Get mailbox separator""" ### yes, ugly but cyradm does it in the same way ### also more realable then calling NAMESPACE ### and it should be also compatibile with other servers try: return unquote(self.list(DQUOTE, DQUOTE)[1][0]).split()[1] except: return DEFAULT_SEP def isadmin(self): ### A trick to check if the user is admin or not ### normal users cannot use dump command try: res, msg = self._simple_command('DUMP', 'NIL') if msg[0].lower().find('denied') == -1: return True except: pass return False def id(self): try: typ, dat = self._simple_command('ID', 'NIL') res, dat = self._untagged_response(typ, dat, 'ID') except: return False, dat[0] return ok(res), dat[0] def getannotation(self, mailbox, pattern='*'): typ, dat = self._simple_command('GETANNOTATION', mailbox, quote(pattern), quote('value.shared')) return self._untagged_response(typ, dat, 'ANNOTATION') def setannotation(self, mailbox, desc, value): if value: value = quote(value) else: value = "NIL" typ, dat = self._simple_command('SETANNOTATION', mailbox, quote(desc), "(%s %s)" % (quote('value.shared'), value) ) return self._untagged_response(typ, dat, 'ANNOTATION') def setquota(self, mailbox, limit): """Set quota of a mailbox""" if limit == 0: quota = '()' else: quota = '(STORAGE %s)' % limit return self._simple_command('SETQUOTA', mailbox, quota) ### Overridden to support partition ### Pychecker will complain about non matching signature def create(self, mailbox, partition=None): """Create a mailbox, partition is optional""" if partition is not None: return self._simple_command('CREATE', mailbox, partition) else: return self._simple_command('CREATE', mailbox) ### Overridden to support partition ### Pychecker: same here def rename(self, from_mailbox, to_mailbox, partition=None): """Rename a from_mailbox to to_mailbox, partition is optional""" if partition is not None: return self._simple_command('RENAME', from_mailbox, to_mailbox, partition) else: return self._simple_command('RENAME', from_mailbox, to_mailbox) def reconstruct(self, mailbox): return self._simple_command('RECONSTRUCT', mailbox) class IMAP4_SSL(imaplib.IMAP4_SSL): def getsep(self): """Get mailbox separator""" ### yes, ugly but cyradm does it in the same way ### also more realable then calling NAMESPACE ### and it should be also compatibile with other servers try: return unquote(self.list(DQUOTE, DQUOTE)[1][0]).split()[1] except: return DEFAULT_SEP def isadmin(self): ### A trick to check if the user is admin or not ### normal users cannot use dump command try: res, msg = self._simple_command('DUMP', 'NIL') if msg[0].lower().find('denied') == -1: return True except: pass return False def id(self): try: typ, dat = self._simple_command('ID', 'NIL') res, dat = self._untagged_response(typ, dat, 'ID') except: return False, dat[0] return ok(res), dat[0] def getannotation(self, mailbox, pattern='*'): typ, dat = self._simple_command('GETANNOTATION', mailbox, quote(pattern), quote('value.shared')) return self._untagged_response(typ, dat, 'ANNOTATION') def setannotation(self, mailbox, desc, value): if value: value = quote(value) else: value = "NIL" typ, dat = self._simple_command('SETANNOTATION', mailbox, quote(desc), "(%s %s)" % (quote('value.shared'), value) ) return self._untagged_response(typ, dat, 'ANNOTATION') def setquota(self, mailbox, limit): """Set quota of a mailbox""" if limit == 0: quota = '()' else: quota = '(STORAGE %s)' % limit return self._simple_command('SETQUOTA', mailbox, quota) ### Overridden to support partition ### Pychecker will complain about non matching signature def create(self, mailbox, partition=None): """Create a mailbox, partition is optional""" if partition is not None: return self._simple_command('CREATE', mailbox, partition) else: return self._simple_command('CREATE', mailbox) ### Overridden to support partition ### Pychecker: same here def rename(self, from_mailbox, to_mailbox, partition=None): """Rename a from_mailbox to to_mailbox, partition is optional""" if partition is not None: return self._simple_command('RENAME', from_mailbox, to_mailbox, partition) else: return self._simple_command('RENAME', from_mailbox, to_mailbox) def reconstruct(self, mailbox): return self._simple_command('RECONSTRUCT', mailbox) def login_plain(self, admin, password, asUser): if asUser: encoded = b2a_base64("%s\0%s\0%s" % (asUser, admin, password)).strip() else: encoded = b2a_base64("%s\0%s\0%s" % (admin, admin, password)).strip() res, data = self._simple_command('AUTHENTICATE', 'PLAIN', encoded) if ok(res): self.state = 'AUTH' return res, data class CYRUS: ERROR = {} ERROR["CONNECT"] = [0, "Connection error"] ERROR["INVALID_URL"] = [1, "Invalid URL"] ERROR["ENCODING"] = [3, "Invalid encondig"] ERROR["MBXNULL"] = [5, "Mailbox is Null"] ERROR["NOAUTH"] = [7, "Connection is not authenticated"] ERROR["LOGIN"] = [10, "User or password is wrong"] ERROR["ADMIN"] = [11, "User is not cyrus administrator"] ERROR["AUTH"] = [12, "Connection already authenticated"] ERROR["LOGINPLAIN"] = [15, "Encryption needed to use mechanism"] ERROR["LOGIN_PLAIN"] = [16, "User or password is wrong"] ERROR["CREATE"] = [20, "Unable create mailbox"] ERROR["DELETE"] = [25, "Unable delete mailbox"] ERROR["GETACL"] = [30, "Unable parse GETACL result"] ERROR["SETQUOTA"] = [40, "Invalid integer argument"] ERROR["GETQUOTA"] = [45, "Quota root does not exist"] ERROR["RENAME"] = [50, "Unable rename mailbox"] ERROR["RECONSTRUCT"] = [60, "Unable reconstruct mailbox"] ERROR["SUBSCRIBE"] = [70, "User is cyrus administrator, normal user required"] ERROR["UNSUBSCRIBE"] = [75, "User is cyrus administrator, normal user required"] ERROR["LSUB"] = [77, "User is cyrus administrator, normal user required"] ERROR["UNKCMD"] = [98, "Command not implemented"] ERROR["IMAPLIB"] = [99, "Generic imaplib error"] ENCODING_LIST = ['imap', 'utf-8', 'iso-8859-1'] def __init__(self, url = 'imap://localhost:143'): self.VERBOSE = False self.AUTH = False self.ADMIN = None self.AUSER = None self.ADMINACL = 'c' self.SEP = DEFAULT_SEP self.ENCODING = 'imap' self.LOGFD = stdout match = re_url.match(url) if match: host = match.group(2) if match.group(3): port = int(match.group(3)) else: port = 143 else: self.__doraise("INVALID_URL") try: if match.group(1) == 'imap': self.ssl = False self.m = IMAP4(host, port) else: self.ssl = True self.m = IMAP4_SSL(host, port) except: self.__doraise("CONNECT") def __del__(self): if self.AUTH: self.logout() def __verbose(self, msg): if self.VERBOSE: print >> self.LOGFD, msg def __doexception(self, function, msg=None, *args): if msg is None: try: msg = self.ERROR.get(function.upper())[1] except: msg = self.ERROR.get("IMAPLIB")[1] value = "" for arg in args: if arg is not None: value = "%s %s" % (value, arg) self.__verbose( '[%s%s] %s: %s' % (function.upper(), value, "BAD", msg) ) self.__doraise( function.upper(), msg ) def __doraise(self, mode, msg=None): idError = self.ERROR.get(mode) if idError: if msg is None: msg = idError[1] else: idError = [self.ERROR.get("IMAPLIB")[0]] raise CYRUSError( idError[0], mode, msg ) def __prepare(self, command, mailbox=True): if not self.AUTH: self.__doexception(command, self.ERROR.get("NOAUTH")[1]) elif not mailbox: self.__doexception(command, self.ERROR.get("MBXNULL")[1]) def __docommand(self, function, *args): wrapped = getattr(self.m, function, None) if wrapped is None: raise self.__doraise("UNKCMD") try: res, msg = wrapped(*args) if ok(res): return res, msg except Exception, info: error = info.args[0].split(':').pop().strip() if error.upper().startswith('BAD'): error = error.split('BAD', 1).pop().strip() error = unquote(error[1:-1], '\'') self.__doexception(function, error, *args) self.__doexception(function, msg[0], *args) def id(self): self.__prepare('id') res, data = self.m.id() data = data.strip() if not res or (len(data) < 3): return False, {} data = data[1:-1] # Strip () res, rdata = res2dict(data) if not res: self.__verbose( '[ID] Umatched pairs in result' ) return res, rdata def login(self, username, password, forceNoAdmin = False): if self.AUTH: self.__doexception("LOGIN", self.ERROR.get("AUTH")[1]) try: res, msg = self.m.login(username, password) admin = self.m.isadmin() except Exception, info: error = info.args[0].split(':').pop().strip() self.__doexception("LOGIN", error) if admin or forceNoAdmin: self.ADMIN = username else: self.__doexception("LOGIN", self.ERROR.get("ADMIN")[1]) self.SEP = self.m.getsep() self.AUTH = True self.__verbose( '[LOGIN %s] %s: %s' % (username, res, msg[0]) ) def login_plain(self, admin, password, asUser = None, forceNoAdmin = False): if self.AUTH: self.__doexception("LOGINPLAIN", self.ERROR.get("AUTH")[1]) if not self.ssl: self.__doexception("LOGINPLAIN", self.ERROR.get("LOGINPLAIN")[1]) res, msg = self.__docommand("login_plain", admin, password, asUser) self.__verbose( '[AUTHENTICATE PLAIN %s] %s: %s' % (admin, res, msg[0]) ) if ok(res): if asUser is None: if self.m.isadmin() or forceNoAdmin: self.ADMIN = admin else: self.__doexception("LOGIN", self.ERROR.get("ADMIN")[1]) else: self.ADMIN = asUser self.AUSER = asUser self.SEP = self.m.getsep() self.AUTH = True def logout(self): try: res, msg = self.m.logout() except Exception, info: error = info.args[0].split(':').pop().strip() self.__doexception("LOGOUT", error) self.AUTH = False self.ADMIN = None self.AUSER = None self.__verbose( '[LOGOUT] %s: %s' % (res, msg[0]) ) def getEncoding(self): """Get current input/ouput codification""" return self.ENCODING def setEncoding(self, enc = None): """Set current input/ouput codification""" if enc is None: self.ENCODING = 'imap' elif enc in self.ENCODING_LIST: self.ENCODING = enc else: raise self.__doraise("ENCODING") def __encode(self, text): if re.search("&", text): text = re.sub("/", "+AC8-", text) text = re.sub("&", "+", text) text = unicode(text, 'utf-7').encode(self.ENCODING) return text def encode(self, text): if self.ENCODING == 'imap': return text elif self.ENCODING in self.ENCODING_LIST: return self.__encode(text) def __decode(self, text): text = re.sub("/", "-&", text) text = re.sub(" ", "-@", text) text = unicode(text, self.ENCODING).encode('utf-7') text = re.sub("-@", " ", text) text = re.sub("-&", "/", text) text = re.sub("\+", "&", text) return text def decode(self, text): if self.ENCODING == 'imap': return text elif self.ENCODING in self.ENCODING_LIST: return self.__decode(text) def lm(self, pattern="*"): """ List mailboxes, returns dict with list of mailboxes To list all mailboxes lm() To list users top mailboxes lm("user/%") To list all users mailboxes lm("user/*") To list users mailboxes startwith a word lm("user/word*") To list global top folders lm("%") To list global startwith a word unsupported by server suggestion lm("word*") """ self.__prepare('LIST') if pattern == '': pattern = "*" if pattern == '%': res, ml = self.__docommand('list', '', '%') else: res, ml = self.__docommand('list', '*', self.decode(pattern)) if not ok(res): self.__verbose( '[LIST] %s: %s' % (res, ml) ) return [] if (len(ml) == 1) and ml[0] is None: self.__verbose( '[LIST] No results' ) return [] mb = [] for mailbox in ml: res = re_mb.match(mailbox) if res is None: continue mbe = unquote(res.group(2)) if 'Noselect' in getflags(res.group(1)): continue mb.append(self.encode(mbe)) return mb def cm(self, mailbox, partition=None): """Create mailbox""" self.__prepare('CREATE', mailbox) res, msg = self.__docommand('create', self.decode(mailbox), partition) self.__verbose( '[CREATE %s partition=%s] %s: %s' % (mailbox, partition, res, msg[0]) ) def __dm(self, mailbox): if not mailbox: return True self.__docommand("setacl", self.decode(mailbox), self.ADMIN, self.ADMINACL) res, msg = self.__docommand("delete", self.decode(mailbox)) self.__verbose( '[DELETE %s] %s: %s' % (mailbox, res, msg[0]) ) def dm(self, mailbox, recursive=True): """Delete mailbox""" self.__prepare('DELETE', mailbox) mbxTmp = mailbox.split(self.SEP) # Cyrus is not recursive for user subfolders and global folders if (recursive and mbxTmp[0] != "user") or (len(mbxTmp) > 2): mbxList = self.lm("%s%s*" % (mailbox, self.SEP)) mbxList.reverse() for mbox in mbxList: self.__dm(mbox) self.__dm(mailbox) def rename(self, fromMbx, toMbx, partition=None): """Rename or change partition""" self.__prepare('RENAME', fromMbx) # Rename is recursive! Amen! res, msg = self.__docommand("rename", self.decode(fromMbx), self.decode(toMbx), partition) self.__verbose( '[RENAME %s %s] %s: %s' % (fromMbx, toMbx, res, msg[0]) ) def lam(self, mailbox): """List ACLs""" self.__prepare('GETACL', mailbox) res, acl = self.__docommand("getacl", self.decode(mailbox)) acls = {} aclList = splitquote(acl.pop().strip()) del aclList[0] # mailbox for i in range(0, len(aclList), 2): try: userid = self.encode(aclList[i]) rights = aclList[i + 1] except Exception, info: self.__verbose( '[GETACL %s] BAD: %s' % (mailbox, info.args[0]) ) raise self.__doraise("GETACL") self.__verbose( '[GETACL %s] %s %s' % (mailbox, userid, rights) ) acls[userid] = rights return acls def sam(self, mailbox, userid, rights): """Set ACL""" self.__prepare('SETACL', mailbox) res, msg = self.__docommand("setacl", self.decode(mailbox), userid, rights) self.__verbose( '[SETACL %s %s %s] %s: %s' % (mailbox, userid, rights, res, msg[0]) ) def lq(self, mailbox): """List Quota""" self.__prepare('GETQUOTA', mailbox) res, msg = self.__docommand("getquota", self.decode(mailbox)) match = re_q0.match(msg[0]) if match: self.__verbose( '[GETQUOTA %s] QUOTA (Unlimited)' % mailbox ) return 0, 0 match = re_q.match(msg[0]) if match is None: self.__verbose( '[GETQUOTA %s] BAD: RegExp not matched, please report' % mailbox ) return 0, 0 try: used = int(match.group(2)) quota = int(match.group(3)) self.__verbose( '[GETQUOTA %s] %s: QUOTA (%d/%d)' % (mailbox, res, used, quota) ) return used, quota except: self.__verbose( '[GETQUOTA %s] BAD: Error while parsing results' % mailbox ) return 0, 0 def sq(self, mailbox, limit): """Set Quota""" self.__prepare('SETQUOTA', mailbox) try: limit = int(limit) except ValueError, e: self.__verbose( '[SETQUOTA %s] BAD: %s %s' % (mailbox, self.ERROR.get("SETQUOTA")[1], limit) ) raise self.__doraise("SETQUOTA") res, msg = self.__docommand("setquota", self.decode(mailbox), limit) self.__verbose( '[SETQUOTA %s %s] %s: %s' % (mailbox, limit, res, msg[0]) ) def getannotation(self, mailbox, pattern='*'): """Get Annotation""" self.__prepare('GETANNOTATION') res, data = self.__docommand('getannotation', self.decode(mailbox), pattern) if (len(data) == 1) and data[0] is None: self.__verbose( '[GETANNOTATION %s] No results' % (mailbox) ) return {} ann = {} for annotation in data: annotation = annotation.split('"') if len(annotation) != 9: self.__verbose( '[GETANNOTATION] Invalid annotation entry' ) continue mbx = self.encode(annotation[1]) key = annotation[3] value = annotation[7] self.__verbose( '[GETANNOTATION %s] %s: %s' % (mbx, key, value) ) if not ann.has_key(mbx): ann[mbx] = {} if not ann[mbx].has_key(key): ann[mbx][key] = value return ann def setannotation(self, mailbox, annotation, value): """Set Annotation""" self.__prepare('SETANNOTATION') res, msg = self.__docommand("setannotation", self.decode(mailbox), annotation, value) self.__verbose( '[SETANNOTATION %s] %s: %s' % (mailbox, res, msg[0]) ) def __reconstruct(self, mailbox): if not mailbox: return True res, msg = self.__docommand("reconstruct", self.decode(mailbox)) self.__verbose( '[RECONSTRUCT %s] %s: %s' % (mailbox, res, msg[0]) ) def reconstruct(self, mailbox, recursive=True): """Reconstruct""" self.__prepare('RECONSTRUCT', mailbox) # Cyrus is not recursive for remote reconstruct if recursive: mbxList = self.lm("%s%s*" % (mailbox, self.SEP)) mbxList.reverse() for mbox in mbxList: self.__reconstruct(mbox) self.__reconstruct(mailbox) def lsub(self, pattern="*"): if self.AUSER is None: self.__doexception("lsub") self.__prepare('LSUB') if pattern == '': pattern = "*" res, ml = self.__docommand('lsub', '*', pattern) if not ok(res): self.__verbose( '[LIST] %s: %s' % (res, ml) ) return [] if (len(ml) == 1) and ml[0] is None: self.__verbose( '[LIST] No results' ) return [] mb = [] for mailbox in ml: res = re_mb.match(mailbox) if res is None: continue mbe = unquote(res.group(2)) if 'Noselect' in getflags(res.group(1)): continue mb.append(self.encode(mbe)) return mb def subscribe(self, mailbox): """Subscribe""" if self.AUSER is None: self.__doexception("subscribe") self.__prepare('SUBSCRIBE') res, msg = self.__docommand("subscribe", self.decode(mailbox)) self.__verbose( '[SUBSCRIBE %s] %s: %s' % (mailbox, res, msg[0]) ) def unsubscribe(self, mailbox): """Unsubscribe""" if self.AUSER is None: self.__doexception("unsubscribe") self.__prepare('UNSUBSCRIBE') res, msg = self.__docommand("unsubscribe", self.decode(mailbox)) self.__verbose( '[UNSUBSCRIBE %s] %s: %s' % (mailbox, res, msg[0]) )
redondomarco/useradm
doc/python-cyrus-0.8.5/cyruslib.py
Python
gpl-3.0
24,712
#!/usr/bin/env python from pymongo import MongoClient import pymongo HOST = "wfSciwoncWiki:enw1989@172.31.2.76:27001/?authSource=admin" c = MongoClient('mongodb://'+HOST) dbname = "wiki" sessions = "sessions" contributors = "contributors" user_sessions = "user_sessions" top_sessions = "top_sessions" c[dbname].drop_collection(contributors) c[dbname].create_collection(contributors) c[dbname].drop_collection(user_sessions) c[dbname].create_collection(user_sessions) c[dbname].drop_collection(top_sessions) c[dbname].create_collection(top_sessions) db = c[dbname] sessions_col = db[sessions] contributors_col = db[contributors] user_sessions_col = db[user_sessions] top_sessions_col = db[top_sessions] sessions_col.create_index([("contributor_username", pymongo.ASCENDING)]) sessions_col.create_index([("timestamp", pymongo.ASCENDING)]) user_sessions_col.create_index([("timestamp", pymongo.ASCENDING)]) #sessions_col.create_index([("_id.filepath", pymongo.ASCENDING),("_id.numline", pymongo.ASCENDING)]) contributors_col.create_index([("_id.filepath", pymongo.ASCENDING),("_id.numline", pymongo.ASCENDING)]) user_sessions_col.create_index([("_id.filepath", pymongo.ASCENDING),("_id.numline", pymongo.ASCENDING)]) top_sessions_col.create_index([("_id.filepath", pymongo.ASCENDING),("_id.numline", pymongo.ASCENDING)])
elainenaomi/sciwonc-dataflow-examples
dissertation2017/Experiment 2/instances/7_2_wikiflow_1sh_1s_noannot_wmj/init_0/DataStoreInit.py
Python
gpl-3.0
1,335
# (c) 2014, James Tanner <tanner.jc@gmail.com> # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # # ansible-vault is a script that encrypts/decrypts YAML files. See # http://docs.ansible.com/playbooks_vault.html for more details. from __future__ import (absolute_import, division, print_function) __metaclass__ = type import datetime import os import traceback import textwrap from ansible.compat.six import iteritems, string_types from ansible import constants as C from ansible.errors import AnsibleError, AnsibleOptionsError from ansible.plugins import module_loader, action_loader from ansible.cli import CLI from ansible.utils import module_docs try: from __main__ import display except ImportError: from ansible.utils.display import Display display = Display() class DocCLI(CLI): """ Vault command line class """ def __init__(self, args): super(DocCLI, self).__init__(args) self.module_list = [] def parse(self): self.parser = CLI.base_parser( usage='usage: %prog [options] [module...]', epilog='Show Ansible module documentation', module_opts=True, ) self.parser.add_option("-l", "--list", action="store_true", default=False, dest='list_dir', help='List available modules') self.parser.add_option("-s", "--snippet", action="store_true", default=False, dest='show_snippet', help='Show playbook snippet for specified module(s)') self.parser.add_option("-a", "--all", action="store_true", default=False, dest='all_modules', help='Show documentation for all modules') super(DocCLI, self).parse() display.verbosity = self.options.verbosity def run(self): super(DocCLI, self).run() if self.options.module_path is not None: for i in self.options.module_path.split(os.pathsep): module_loader.add_directory(i) # list modules if self.options.list_dir: paths = module_loader._get_paths() for path in paths: self.find_modules(path) self.pager(self.get_module_list_text()) return 0 # process all modules if self.options.all_modules: paths = module_loader._get_paths() for path in paths: self.find_modules(path) self.args = sorted(set(self.module_list) - module_docs.BLACKLIST_MODULES) if len(self.args) == 0: raise AnsibleOptionsError("Incorrect options passed") # process command line module list text = '' for module in self.args: try: # if the module lives in a non-python file (eg, win_X.ps1), require the corresponding python file for docs filename = module_loader.find_plugin(module, mod_type='.py') if filename is None: display.warning("module %s not found in %s\n" % (module, DocCLI.print_paths(module_loader))) continue if any(filename.endswith(x) for x in C.BLACKLIST_EXTS): continue try: doc, plainexamples, returndocs, metadata = module_docs.get_docstring(filename, verbose=(self.options.verbosity > 0)) except: display.vvv(traceback.format_exc()) display.error("module %s has a documentation error formatting or is missing documentation\nTo see exact traceback use -vvv" % module) continue if doc is not None: # is there corresponding action plugin? if module in action_loader: doc['action'] = True else: doc['action'] = False all_keys = [] for (k,v) in iteritems(doc['options']): all_keys.append(k) all_keys = sorted(all_keys) doc['option_keys'] = all_keys doc['filename'] = filename doc['docuri'] = doc['module'].replace('_', '-') doc['now_date'] = datetime.date.today().strftime('%Y-%m-%d') doc['plainexamples'] = plainexamples doc['returndocs'] = returndocs doc['metadata'] = metadata if self.options.show_snippet: text += self.get_snippet_text(doc) else: text += self.get_man_text(doc) else: # this typically means we couldn't even parse the docstring, not just that the YAML is busted, # probably a quoting issue. raise AnsibleError("Parsing produced an empty object.") except Exception as e: display.vvv(traceback.format_exc()) raise AnsibleError("module %s missing documentation (or could not parse documentation): %s\n" % (module, str(e))) if text: self.pager(text) return 0 def find_modules(self, path): for module in os.listdir(path): full_path = '/'.join([path, module]) if module.startswith('.'): continue elif os.path.isdir(full_path): continue elif any(module.endswith(x) for x in C.BLACKLIST_EXTS): continue elif module.startswith('__'): continue elif module in C.IGNORE_FILES: continue elif module.startswith('_'): if os.path.islink(full_path): # avoids aliases continue module = os.path.splitext(module)[0] # removes the extension module = module.lstrip('_') # remove underscore from deprecated modules self.module_list.append(module) def get_module_list_text(self): columns = display.columns displace = max(len(x) for x in self.module_list) linelimit = columns - displace - 5 text = [] deprecated = [] for module in sorted(set(self.module_list)): if module in module_docs.BLACKLIST_MODULES: continue # if the module lives in a non-python file (eg, win_X.ps1), require the corresponding python file for docs filename = module_loader.find_plugin(module, mod_type='.py') if filename is None: continue if filename.endswith(".ps1"): continue if os.path.isdir(filename): continue try: doc, plainexamples, returndocs, metadata = module_docs.get_docstring(filename) desc = self.tty_ify(doc.get('short_description', '?')).strip() if len(desc) > linelimit: desc = desc[:linelimit] + '...' if module.startswith('_'): # Handle deprecated deprecated.append("%-*s %-*.*s" % (displace, module[1:], linelimit, len(desc), desc)) else: text.append("%-*s %-*.*s" % (displace, module, linelimit, len(desc), desc)) except: raise AnsibleError("module %s has a documentation error formatting or is missing documentation\n" % module) if len(deprecated) > 0: text.append("\nDEPRECATED:") text.extend(deprecated) return "\n".join(text) @staticmethod def print_paths(finder): ''' Returns a string suitable for printing of the search path ''' # Uses a list to get the order right ret = [] for i in finder._get_paths(): if i not in ret: ret.append(i) return os.pathsep.join(ret) def get_snippet_text(self, doc): text = [] desc = CLI.tty_ify(doc['short_description']) text.append("- name: %s" % (desc)) text.append(" action: %s" % (doc['module'])) pad = 31 subdent = " " * pad limit = display.columns - pad for o in sorted(doc['options'].keys()): opt = doc['options'][o] desc = CLI.tty_ify(" ".join(opt['description'])) required = opt.get('required', False) if not isinstance(required, bool): raise("Incorrect value for 'Required', a boolean is needed.: %s" % required) if required: s = o + "=" else: s = o text.append(" %-20s # %s" % (s, textwrap.fill(desc, limit, subsequent_indent=subdent))) text.append('') return "\n".join(text) def get_man_text(self, doc): opt_indent=" " text = [] text.append("> %s\n" % doc['module'].upper()) pad = display.columns * 0.20 limit = max(display.columns - int(pad), 70) if isinstance(doc['description'], list): desc = " ".join(doc['description']) else: desc = doc['description'] text.append("%s\n" % textwrap.fill(CLI.tty_ify(desc), limit, initial_indent=" ", subsequent_indent=" ")) # FUTURE: move deprecation to metadata-only if 'deprecated' in doc and doc['deprecated'] is not None and len(doc['deprecated']) > 0: text.append("DEPRECATED: \n%s\n" % doc['deprecated']) metadata = doc['metadata'] supported_by = metadata['supported_by'] text.append("Supported by: %s\n" % supported_by) status = metadata['status'] text.append("Status: %s\n" % ", ".join(status)) if 'action' in doc and doc['action']: text.append(" * note: %s\n" % "This module has a corresponding action plugin.") if 'option_keys' in doc and len(doc['option_keys']) > 0: text.append("Options (= is mandatory):\n") for o in sorted(doc['option_keys']): opt = doc['options'][o] required = opt.get('required', False) if not isinstance(required, bool): raise("Incorrect value for 'Required', a boolean is needed.: %s" % required) if required: opt_leadin = "=" else: opt_leadin = "-" text.append("%s %s" % (opt_leadin, o)) if isinstance(opt['description'], list): for entry in opt['description']: text.append(textwrap.fill(CLI.tty_ify(entry), limit, initial_indent=opt_indent, subsequent_indent=opt_indent)) else: text.append(textwrap.fill(CLI.tty_ify(opt['description']), limit, initial_indent=opt_indent, subsequent_indent=opt_indent)) choices = '' if 'choices' in opt: choices = "(Choices: " + ", ".join(str(i) for i in opt['choices']) + ")" default = '' if 'default' in opt or not required: default = "[Default: " + str(opt.get('default', '(null)')) + "]" text.append(textwrap.fill(CLI.tty_ify(choices + default), limit, initial_indent=opt_indent, subsequent_indent=opt_indent)) if 'notes' in doc and doc['notes'] and len(doc['notes']) > 0: text.append("Notes:") for note in doc['notes']: text.append(textwrap.fill(CLI.tty_ify(note), limit-6, initial_indent=" * ", subsequent_indent=opt_indent)) if 'requirements' in doc and doc['requirements'] is not None and len(doc['requirements']) > 0: req = ", ".join(doc['requirements']) text.append("Requirements:%s\n" % textwrap.fill(CLI.tty_ify(req), limit-16, initial_indent=" ", subsequent_indent=opt_indent)) if 'examples' in doc and len(doc['examples']) > 0: text.append("Example%s:\n" % ('' if len(doc['examples']) < 2 else 's')) for ex in doc['examples']: text.append("%s\n" % (ex['code'])) if 'plainexamples' in doc and doc['plainexamples'] is not None: text.append("EXAMPLES:") text.append(doc['plainexamples']) if 'returndocs' in doc and doc['returndocs'] is not None: text.append("RETURN VALUES:") text.append(doc['returndocs']) text.append('') maintainers = set() if 'author' in doc: if isinstance(doc['author'], string_types): maintainers.add(doc['author']) else: maintainers.update(doc['author']) if 'maintainers' in doc: if isinstance(doc['maintainers'], string_types): maintainers.add(doc['author']) else: maintainers.update(doc['author']) text.append('MAINTAINERS: ' + ', '.join(maintainers)) text.append('') return "\n".join(text)
kaarolch/ansible
lib/ansible/cli/doc.py
Python
gpl-3.0
13,556
#!/usr/bin/env python # this program is used to test latency # don't test RTT bigger than 3 secs - it will break # we make sure that nothing breaks if there is a packet missing # this can rarely happen import select import socket import time import sys import struct def pong(): # easy, receive and send back s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.bind(('0.0.0.0', 1234)) while True: c, addr = s.recvfrom(1) s.sendto(c, (addr[0], 1235)) if c == 'x': break print 'Finished' return 0 def ping(addr, n): # send and wait for it back s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.bind(('0.0.0.0', 1235)) succ = 0 errs = 0 while succ != n and errs < 3: # at most 3 lost packets time.sleep(0.02) # wait a bit start = time.time() s.sendto('r', (addr, 1234)) h, _, _ = select.select([s], [], [], 3) # wait 3 seconds end = time.time() if h == []: # lost packet # print '# lost packet' errs += 1 continue s.recv(1) # eat the response succ += 1 print '%.8f' % (end - start) for x in xrange(10): # send many packets to be (almost) sure the other end is done s.sendto('x', (addr, 1234)) return errs >= 3 if __name__ == '__main__': if 'ping' in sys.argv: ret = ping(sys.argv[2], int(sys.argv[3])) elif 'pong' in sys.argv: ret = pong() else: print 'ping or pong?' ret = 1 sys.exit(ret)
olbat/distem
test/experimental_testing/exps/latency.py
Python
gpl-3.0
1,573
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2017, Ansible by Red Hat, inc # # This file is part of Ansible by Red Hat # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'core'} DOCUMENTATION = """ --- module: net_vlan version_added: "2.4" author: "Ricardo Carrillo Cruz (@rcarrillocruz)" short_description: Manage VLANs on network devices description: - This module provides declarative management of VLANs on network devices. options: name: description: - Name of the VLAN. vlan_id: description: - ID of the VLAN. interfaces: description: - List of interfaces the VLAN should be configured on. collection: description: List of VLANs definitions. purge: description: - Purge VLANs not defined in the collections parameter. default: no state: description: - State of the VLAN configuration. default: present choices: ['present', 'absent', 'active', 'suspend'] """ EXAMPLES = """ - name: configure VLAN ID and name net_vlan: vlan_id: 20 name: test-vlan - name: remove configuration net_vlan: state: absent - name: configure VLAN state net_vlan: vlan_id: state: suspend """ RETURN = """ commands: description: The list of configuration mode commands to send to the device returned: always type: list sample: - vlan 20 - name test-vlan rpc: description: load-configuration RPC send to the device returned: C(rpc) is returned only for junos device when configuration is changed on device type: string sample: "<vlans><vlan><name>test-vlan-4</name></vlan></vlans>" """
pshen/ansible
lib/ansible/modules/network/net_vlan.py
Python
gpl-3.0
2,321
#!/usr/bin/env python -t # -*- coding: utf-8 -*- # Copyright (C) 2017 Jonathan Delvaux <pyshell@djoproject.net> # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import pytest from pyshell.addons.parameter import addValues from pyshell.addons.parameter import cleanKeyStore from pyshell.addons.parameter import createValue from pyshell.addons.parameter import createValues from pyshell.addons.parameter import getParameter from pyshell.addons.parameter import getProperties from pyshell.addons.parameter import getSelectedValue from pyshell.addons.parameter import listParameter from pyshell.addons.parameter import listProperties from pyshell.addons.parameter import removeParameter from pyshell.addons.parameter import selectValue from pyshell.addons.parameter import setProperties from pyshell.addons.parameter import setValue from pyshell.addons.parameter import setValues from pyshell.addons.parameter import statParameter from pyshell.addons.parameter import subtractValues from pyshell.arg.checker.default import DefaultChecker from pyshell.arg.checker.list import ListArgChecker from pyshell.system.manager.parent import ParentManager from pyshell.system.parameter.environment import EnvironmentParameter from pyshell.system.setting.environment import EnvironmentLocalSettings class TestParameterAddon(object): def setup_method(self, method): self.managers = ParentManager() self.env = self.managers.getEnvironmentManager() self.local_param = self.env.setParameter("local", "value", local_param=True) self.global_param = self.env.setParameter("global", "value", local_param=False) def test_getParameterLocalAndExist(self): param = getParameter(key="local", manager=self.env, start_with_local=True, explore_other_scope=False) assert param.getValue() == ["value"] def test_getParameterLocalAndNotExist(self): with pytest.raises(Exception): getParameter(key="titi", manager=self.env, start_with_local=True, explore_other_scope=False) def test_getParameterGlobalAndExist(self): param = getParameter(key="global", manager=self.env, start_with_local=False, explore_other_scope=False) assert param.getValue() == ["value"] def test_getParameterGlobalAndNotExist(self): with pytest.raises(Exception): getParameter(key="titi", manager=self.env, start_with_local=False, explore_other_scope=False) def test_setPropertiesNoSetter(self): info = "setToto", "getToto", DefaultChecker.getArg() with pytest.raises(Exception): setProperties(key="local", property_info=info, property_value=42, manager=self.env, start_with_local=True, explore_other_scope=False, perfect_match=True) def test_setPropertiesValidCase(self): info = "setRemovable", "isRemovable", DefaultChecker.getBoolean() assert self.local_param.settings.isRemovable() setProperties(key="local", property_info=info, property_value=False, manager=self.env, start_with_local=True, explore_other_scope=False, perfect_match=True) assert not self.local_param.settings.isRemovable() def test_getPropertiesNoGetter(self): info = "setToto", "getToto", DefaultChecker.getArg() with pytest.raises(Exception): getProperties(key="local", property_info=info, manager=self.env, start_with_local=True, explore_other_scope=False, perfect_match=True) def test_getPropertiesValidCase(self): info = "setRemovable", "isRemovable", DefaultChecker.getBoolean() assert self.local_param.settings.isRemovable() value = getProperties(key="local", property_info=info, manager=self.env, start_with_local=True, explore_other_scope=False, perfect_match=True) assert value def test_listPropertiesLocal(self): result = listProperties(key="local", manager=self.env, start_with_local=True, explore_other_scope=False, perfect_match=True) assert len(result) is 5 assert result[0] == ("Key", "Value",) assert ("checkerList", True,) in result assert ("readOnly", False,) in result assert ("checker", "any",) in result assert ("removable", True,) in result def test_listPropertiesGlobal(self): result = listProperties(key="global", manager=self.env, start_with_local=False, explore_other_scope=False, perfect_match=True) assert len(result) is 6 assert result[0] == ("Key", "Value",) assert ("checkerList", True,) in result assert ("readOnly", False,) in result assert ("transient", False,) in result assert ("checker", "any",) in result assert ("removable", True,) in result def test_removeParameterDoesNotExist(self): removeParameter(key="titi", manager=self.env, start_with_local=True, explore_other_scope=False) def test_removeParameterExists(self): removeParameter(key="local", manager=self.env, start_with_local=True, explore_other_scope=False) assert not self.env.hasParameter("local", perfect_match=True, local_param=True, explore_other_scope=False) def test_listParameterEmpty(self): output = listParameter(manager=self.managers.getContextManager(), key=None, start_with_local=True, explore_other_scope=True) assert len(output) is 1 assert "No item available" in output[0] def test_listParameterListTypeParameter(self): output = listParameter(manager=self.env, key=None, start_with_local=True, explore_other_scope=False) assert len(output) is 2 assert len(output[0]) is 2 assert "Name" in output[0][0] assert "Value" in output[0][1] assert len(output[1]) is 2 assert "local" in output[1][0] assert "value" == output[1][1] def test_listParameterNotAListTypeParameter(self): settings = EnvironmentLocalSettings(read_only=False, removable=True, checker=DefaultChecker.getString()) parameter = EnvironmentParameter("titi", settings) self.local_param = self.env.setParameter("local", parameter, local_param=True) output = listParameter(manager=self.env, key=None, start_with_local=True, explore_other_scope=False) assert len(output) is 2 assert len(output[0]) is 2 assert "Name" in output[0][0] assert "Value" in output[0][1] assert len(output[1]) is 2 assert "local" in output[1][0] assert "titi" == output[1][1] def test_statEmpty(self): output = statParameter(manager=self.managers.getContextManager(), key=None, start_with_local=True, explore_other_scope=True) assert len(output) is 1 assert "No item available" in output[0] def test_statLocal(self): output = statParameter(manager=self.env, key=None, start_with_local=True, explore_other_scope=False) assert len(output) is 2 assert len(output[0]) is 6 assert "Name" in output[0][0] assert "Scope" in output[0][1] assert "checker" in output[0][2] assert "checkerList" in output[0][3] assert "readOnly" in output[0][4] assert "removable" in output[0][5] assert len(output[1]) is 6 assert "local" in output[1][0] assert "local" in output[1][1] assert "any" in output[1][2] assert "True" in output[1][3] assert "False" in output[1][4] assert "True" in output[1][5] def test_statGlobal(self): output = statParameter(manager=self.env, key=None, start_with_local=False, explore_other_scope=False) assert len(output) is 2 assert len(output[0]) is 7 assert "Name" in output[0][0] assert "Scope" in output[0][1] assert "checker" in output[0][2] assert "checkerList" in output[0][3] assert "readOnly" in output[0][4] assert "removable" in output[0][5] assert "transient" in output[0][6] assert len(output[1]) is 7 assert "global" in output[1][0] assert "global" in output[1][1] assert "any" in output[1][2] assert "True" in output[1][3] assert "False" in output[1][4] assert "True" in output[1][5] assert "False" in output[1][6] def test_statAll(self): output = statParameter(manager=self.env, key=None, start_with_local=True, explore_other_scope=True) assert len(output) is 3 assert len(output[0]) is 7 assert "Name" in output[0][0] assert "Scope" in output[0][1] assert "checker" in output[0][2] assert "checkerList" in output[0][3] assert "readOnly" in output[0][4] assert "removable" in output[0][5] assert "transient" in output[0][6] assert len(output[1]) is 7 assert "global" in output[1][0] assert "global" in output[1][1] assert "any" in output[1][2] assert "True" in output[1][3] assert "False" in output[1][4] assert "True" in output[1][5] assert "False" in output[1][6] assert len(output[2]) is 7 assert "local" in output[2][0] assert "local" in output[2][1] assert "any" in output[2][2] assert "True" in output[2][3] assert "False" in output[2][4] assert "True" in output[2][5] assert "-" in output[2][6] def test_statAllWithSameName(self): self.global_param = self.env.setParameter("global", "value", local_param=True) output = statParameter(manager=self.env, key=None, start_with_local=True, explore_other_scope=True) assert len(output) is 3 assert len(output[0]) is 6 assert "Name" in output[0][0] assert "Scope" in output[0][1] assert "checker" in output[0][2] assert "checkerList" in output[0][3] assert "readOnly" in output[0][4] assert "removable" in output[0][5] assert len(output[1]) is 6 assert "global" in output[1][0] assert "local" in output[1][1] assert "any" in output[1][2] assert "True" in output[1][3] assert "False" in output[1][4] assert "True" in output[1][5] assert len(output[2]) is 6 assert "local" in output[2][0] assert "local" in output[2][1] assert "any" in output[2][2] assert "True" in output[2][3] assert "False" in output[2][4] assert "True" in output[2][5] def test_subtractValues(self): values = ("uu", "ii", "oo", "aa",) parameter = EnvironmentParameter(values) self.local_param = self.env.setParameter("local", parameter, local_param=True) subtractValues(key="local", values=("ii", "yy",), manager=self.env, start_with_local=True, explore_other_scope=False) assert tuple(parameter.getValue()) == ("uu", "oo", "aa",) def test_addValues(self): values = ("uu", "oo", "aa",) parameter = EnvironmentParameter(values) self.local_param = self.env.setParameter("local", parameter, local_param=True) addValues(key="local", values=("ii", "aa",), manager=self.env, start_with_local=True, explore_other_scope=False) assert tuple(parameter.getValue()) == ("uu", "oo", "aa", "ii", "aa",) def test_createValuesNoListWithEmptyValue(self): with pytest.raises(Exception): createValues(value_type=DefaultChecker.getArg(), key="creates_value", values=(), manager=self.env, list_enabled=False, local_param=True,) def test_createValuesNoListSuccess(self): createValues(value_type=DefaultChecker.getString(), key="creates_value", values=("toto", "titi", "tata"), manager=self.env, list_enabled=False, local_param=True,) assert self.env.hasParameter("creates_value", perfect_match=True, local_param=True, explore_other_scope=False) param = self.env.getParameter(string_path="creates_value", perfect_match=True, local_param=True, explore_other_scope=False) assert param.getValue() == "toto" assert not isinstance(param.settings.getChecker(), ListArgChecker) assert param.settings.getChecker() is DefaultChecker.getString() def test_createValuesListSuccess(self): createValues(value_type=DefaultChecker.getInteger(), key="creates_value", values=(11, 22, 33,), manager=self.env, list_enabled=True, local_param=True,) assert self.env.hasParameter("creates_value", perfect_match=True, local_param=True, explore_other_scope=False) param = self.env.getParameter(string_path="creates_value", perfect_match=True, local_param=True, explore_other_scope=False) assert param.getValue() == [11, 22, 33] assert isinstance(param.settings.getChecker(), ListArgChecker) def_int = DefaultChecker.getInteger() assert param.settings.getChecker().checker is def_int def test_createValuesNoListNoTypeSuccess(self): createValues(value_type=None, key="creates_value", values=("toto", "titi", "tata"), manager=self.env, list_enabled=False, local_param=True,) assert self.env.hasParameter("creates_value", perfect_match=True, local_param=True, explore_other_scope=False) param = self.env.getParameter(string_path="creates_value", perfect_match=True, local_param=True, explore_other_scope=False) assert param.getValue() == "toto" assert not isinstance(param.settings.getChecker(), ListArgChecker) assert param.settings.getChecker() is DefaultChecker.getArg() def test_createValuesListNoTypeSuccess(self): createValues(value_type=None, key="creates_value", values=(11, 22, 33,), manager=self.env, list_enabled=True, local_param=True,) assert self.env.hasParameter("creates_value", perfect_match=True, local_param=True, explore_other_scope=False) param = self.env.getParameter(string_path="creates_value", perfect_match=True, local_param=True, explore_other_scope=False) assert param.getValue() == [11, 22, 33] assert isinstance(param.settings.getChecker(), ListArgChecker) assert param.settings.getChecker().checker is DefaultChecker.getArg() def test_createValueNoType(self): createValue(value_type=None, key="create_value", value="titi", manager=self.env, local_param=True,) assert self.env.hasParameter("create_value", perfect_match=True, local_param=True, explore_other_scope=False) param = self.env.getParameter(string_path="create_value", perfect_match=True, local_param=True, explore_other_scope=False) assert param.getValue() == "titi" assert not isinstance(param.settings.getChecker(), ListArgChecker) assert param.settings.getChecker() is DefaultChecker.getArg() def test_createValueType(self): createValue(value_type=DefaultChecker.getString(), key="create_value", value="toto", manager=self.env, local_param=True) assert self.env.hasParameter("create_value", perfect_match=True, local_param=True, explore_other_scope=False) param = self.env.getParameter(string_path="create_value", perfect_match=True, local_param=True, explore_other_scope=False) assert param.getValue() == "toto" assert not isinstance(param.settings.getChecker(), ListArgChecker) assert param.settings.getChecker() is DefaultChecker.getString() def test_setValuesOnListParameter(self): createValues(value_type=None, key="set_values", values=(11, 22, 33,), manager=self.env, list_enabled=True, local_param=True,) setValues(key="set_values", values=(44, 55, 66,), manager=self.env, start_with_local=True, explore_other_scope=False) assert self.env.hasParameter("set_values", perfect_match=True, local_param=True, explore_other_scope=False) param = self.env.getParameter(string_path="set_values", perfect_match=True, local_param=True, explore_other_scope=False) assert param.getValue() == [44, 55, 66] assert isinstance(param.settings.getChecker(), ListArgChecker) assert param.settings.getChecker().checker is DefaultChecker.getArg() def test_setValuesNotOnListParameterWithNoValue(self): createValues(value_type=None, key="set_values", values=("toto", "titi", "tata"), manager=self.env, list_enabled=False, local_param=True,) with pytest.raises(Exception): setValues(key="set_values", values=(), manager=self.env, start_with_local=True, explore_other_scope=False) def test_setValuesNotOnListParameter(self): createValues(value_type=None, key="set_values", values=("toto", "titi", "tata"), manager=self.env, list_enabled=False, local_param=True,) setValues(key="set_values", values=("tutu", "tyty",), manager=self.env, start_with_local=True, explore_other_scope=False) assert self.env.hasParameter("set_values", perfect_match=True, local_param=True, explore_other_scope=False) param = self.env.getParameter(string_path="set_values", perfect_match=True, local_param=True, explore_other_scope=False) assert param.getValue() == "tutu" assert not isinstance(param.settings.getChecker(), ListArgChecker) assert param.settings.getChecker() is DefaultChecker.getArg() def test_setValue(self): createValue(value_type=DefaultChecker.getString(), key="set_value", value="toto", manager=self.env, local_param=True) setValue(key="set_value", value="plip", manager=self.env, start_with_local=True, explore_other_scope=False) assert self.env.hasParameter("set_value", perfect_match=True, local_param=True, explore_other_scope=False) param = self.env.getParameter(string_path="set_value", perfect_match=True, local_param=True, explore_other_scope=False) assert param.getValue() == "plip" assert not isinstance(param.settings.getChecker(), ListArgChecker) assert param.settings.getChecker() is DefaultChecker.getString() def test_selectValue(self): con = self.managers.getContextManager() createValues(value_type=None, key="selectValue", values=("toto", "titi", "tata"), manager=con, list_enabled=True, local_param=True,) assert con.hasParameter("selectValue", perfect_match=True, local_param=True, explore_other_scope=False) param = con.getParameter(string_path="selectValue", perfect_match=True, local_param=True, explore_other_scope=False) assert param.getSelectedValue() == "toto" selectValue(key="selectValue", value="tata", manager=con, start_with_local=True, explore_other_scope=False) assert param.getSelectedValue() == "tata" def test_getSelectedValue(self): con = self.managers.getContextManager() createValues(value_type=None, key="selectValue", values=("toto", "titi", "tata"), manager=con, list_enabled=True, local_param=True,) value = getSelectedValue(key="selectValue", manager=con, start_with_local=True, explore_other_scope=False) assert value == "toto" def test_cleanKeyStoreAllNoKey(self): keys = self.managers.getKeyManager() cleanKeyStore(manager=keys, remove_locals=True, remove_globals=True) def test_cleanKeyStoreAll(self): keys = self.managers.getKeyManager() createValue(value_type=None, key="local_key", value="0x112233", manager=keys, local_param=True) createValue(value_type=None, key="global_key", value="0b00110011", manager=keys, local_param=False) cleanKeyStore(manager=keys, remove_locals=True, remove_globals=True) assert not keys.hasParameter("local_key", perfect_match=True, local_param=True, explore_other_scope=False) assert not keys.hasParameter("global_key", perfect_match=True, local_param=False, explore_other_scope=True) def test_cleanKeyStoreLocalNoKey(self): keys = self.managers.getKeyManager() cleanKeyStore(manager=keys, remove_locals=True, remove_globals=False) def test_cleanKeyStoreLocal(self): keys = self.managers.getKeyManager() createValue(value_type=None, key="local_key", value="0x112233", manager=keys, local_param=True) createValue(value_type=None, key="global_key", value="0b00110011", manager=keys, local_param=False) cleanKeyStore(manager=keys, remove_locals=True, remove_globals=False) assert not keys.hasParameter("local_key", perfect_match=True, local_param=True, explore_other_scope=False) assert keys.hasParameter("global_key", perfect_match=True, local_param=False, explore_other_scope=True) def test_cleanKeyStoreGlobalNoKey(self): keys = self.managers.getKeyManager() cleanKeyStore(manager=keys, remove_locals=False, remove_globals=True) def test_cleanKeyStoreGlobal(self): keys = self.managers.getKeyManager() createValue(value_type=None, key="local_key", value="0x112233", manager=keys, local_param=True) createValue(value_type=None, key="global_key", value="0b00110011", manager=keys, local_param=False) cleanKeyStore(manager=keys, remove_locals=False, remove_globals=True) assert keys.hasParameter("local_key", perfect_match=True, local_param=True, explore_other_scope=False) assert not keys.hasParameter("global_key", perfect_match=True, local_param=False, explore_other_scope=True)
djo938/supershell
pyshell/addons/test/parameter_test.py
Python
gpl-3.0
29,938
from cgi import escape import gzip as gzip_module import re import time import types import uuid from cStringIO import StringIO def resolve_content(response): rv = "".join(item for item in response.iter_content()) if type(rv) == unicode: rv = rv.encode(response.encoding) return rv class Pipeline(object): pipes = {} def __init__(self, pipe_string): self.pipe_functions = self.parse(pipe_string) def parse(self, pipe_string): functions = [] for item in PipeTokenizer().tokenize(pipe_string): if not item: break if item[0] == "function": functions.append((self.pipes[item[1]], [])) elif item[0] == "argument": functions[-1][1].append(item[1]) return functions def __call__(self, request, response): for func, args in self.pipe_functions: response = func(request, response, *args) return response class PipeTokenizer(object): def __init__(self): #This whole class can likely be replaced by some regexps self.state = None def tokenize(self, string): self.string = string self.state = self.func_name_state self._index = 0 while self.state: yield self.state() yield None def get_char(self): if self._index >= len(self.string): return None rv = self.string[self._index] self._index += 1 return rv def func_name_state(self): rv = "" while True: char = self.get_char() if char is None: self.state = None if rv: return ("function", rv) else: return None elif char == "(": self.state = self.argument_state return ("function", rv) elif char == "|": if rv: return ("function", rv) else: rv += char def argument_state(self): rv = "" while True: char = self.get_char() if char is None: self.state = None return ("argument", rv) elif char == "\\": rv += self.get_escape() if rv is None: #This should perhaps be an error instead return ("argument", rv) elif char == ",": return ("argument", rv) elif char == ")": self.state = self.func_name_state return ("argument", rv) else: rv += char def get_escape(self): char = self.get_char() escapes = {"n": "\n", "r": "\r", "t": "\t"} return escapes.get(char, char) class pipe(object): def __init__(self, *arg_converters): self.arg_converters = arg_converters self.max_args = len(self.arg_converters) self.min_args = 0 opt_seen = False for item in self.arg_converters: if not opt_seen: if isinstance(item, opt): opt_seen = True else: self.min_args += 1 else: if not isinstance(item, opt): raise ValueError("Non-optional argument cannot follow optional argument") def __call__(self, f): def inner(request, response, *args): if not (self.min_args <= len(args) <= self.max_args): raise ValueError("Expected between %d and %d args, got %d" % (self.min_args, self.max_args, len(args))) arg_values = tuple(f(x) for f, x in zip(self.arg_converters, args)) return f(request, response, *arg_values) Pipeline.pipes[f.__name__] = inner #We actually want the undecorated function in the main namespace return f class opt(object): def __init__(self, f): self.f = f def __call__(self, arg): return self.f(arg) def nullable(func): def inner(arg): if arg.lower() == "null": return None else: return func(arg) return inner def boolean(arg): if arg.lower() in ("true", "1"): return True elif arg.lower() in ("false", "0"): return False raise ValueError @pipe(int) def status(request, response, code): """Alter the status code. :param code: Status code to use for the response.""" response.status = code return response @pipe(str, str, opt(boolean)) def header(request, response, name, value, append=False): """Set a HTTP header. Replaces any existing HTTP header of the same name unless append is set, in which case the header is appended without replacement. :param name: Name of the header to set. :param value: Value to use for the header. :param append: True if existing headers should not be replaced """ if not append: response.headers.set(name, value) else: response.headers.append(name, value) return response @pipe(str) def trickle(request, response, delays): """Send the response in parts, with time delays. :param delays: A string of delays and amounts, in bytes, of the response to send. Each component is separated by a colon. Amounts in bytes are plain integers, whilst delays are floats prefixed with a single d e.g. d1:100:d2 Would cause a 1 second delay, would then send 100 bytes of the file, and then cause a 2 second delay, before sending the remainder of the file. If the last token is of the form rN, instead of sending the remainder of the file, the previous N instructions will be repeated until the whole file has been sent e.g. d1:100:d2:r2 Causes a delay of 1s, then 100 bytes to be sent, then a 2s delay and then a further 100 bytes followed by a two second delay until the response has been fully sent. """ def parse_delays(): parts = delays.split(":") rv = [] for item in parts: if item.startswith("d"): item_type = "delay" item = item[1:] value = float(item) elif item.startswith("r"): item_type = "repeat" value = int(item[1:]) if not value % 2 == 0: raise ValueError else: item_type = "bytes" value = int(item) if len(rv) and rv[-1][0] == item_type: rv[-1][1] += value else: rv.append((item_type, value)) return rv delays = parse_delays() if not delays: return response content = resolve_content(response) modified_content = [] offset = [0] def sleep(seconds): def inner(): time.sleep(seconds) return "" return inner def add_content(delays, repeat=False): for i, (item_type, value) in enumerate(delays): if item_type == "bytes": modified_content.append(content[offset[0]:offset[0] + value]) offset[0] += value elif item_type == "delay": modified_content.append(sleep(value)) elif item_type == "repeat": assert i == len(delays) - 1 while offset[0] < len(content): add_content(delays[-(value + 1):-1], True) if not repeat and offset[0] < len(content): modified_content.append(content[offset[0]:]) add_content(delays) response.content = modified_content return response @pipe(nullable(int), opt(nullable(int))) def slice(request, response, start, end=None): """Send a byte range of the response body :param start: The starting offset. Follows python semantics including negative numbers. :param end: The ending offset, again with python semantics and None (spelled "null" in a query string) to indicate the end of the file. """ content = resolve_content(response) response.content = content[start:end] return response class ReplacementTokenizer(object): def ident(scanner, token): return ("ident", token) def index(scanner, token): token = token[1:-1] try: token = int(token) except ValueError: token = unicode(token, "utf8") return ("index", token) def var(scanner, token): token = token[:-1] return ("var", token) def tokenize(self, string): return self.scanner.scan(string)[0] scanner = re.Scanner([(r"\$\w+:", var), (r"\$?\w+(?:\(\))?", ident), (r"\[[^\]]*\]", index)]) class FirstWrapper(object): def __init__(self, params): self.params = params def __getitem__(self, key): try: return self.params.first(key) except KeyError: return "" @pipe() def sub(request, response): """Substitute environment information about the server and request into the script. The format is a very limited template language. Substitutions are enclosed by {{ and }}. There are several avaliable substitutions: host A simple string value and represents the primary host from which the tests are being run. domains A dictionary of available domains indexed by subdomain name. ports A dictionary of lists of ports indexed by protocol. location A dictionary of parts of the request URL. Valid keys are 'server, 'scheme', 'host', 'hostname', 'port', 'path' and 'query'. 'server' is scheme://host:port, 'host' is hostname:port, and query includes the leading '?', but other delimiters are omitted. headers A dictionary of HTTP headers in the request. GET A dictionary of query parameters supplied with the request. uuid() A pesudo-random UUID suitable for usage with stash So for example in a setup running on localhost with a www subdomain and a http server on ports 80 and 81:: {{host}} => localhost {{domains[www]}} => www.localhost {{ports[http][1]}} => 81 It is also possible to assign a value to a variable name, which must start with the $ character, using the ":" syntax e.g. {{$id:uuid()} Later substitutions in the same file may then refer to the variable by name e.g. {{$id}} """ content = resolve_content(response) new_content = template(request, content) response.content = new_content return response def template(request, content): #TODO: There basically isn't any error handling here tokenizer = ReplacementTokenizer() variables = {} def config_replacement(match): content, = match.groups() tokens = tokenizer.tokenize(content) if tokens[0][0] == "var": variable = tokens[0][1] tokens = tokens[1:] else: variable = None assert tokens[0][0] == "ident" and all(item[0] == "index" for item in tokens[1:]), tokens field = tokens[0][1] if field in variables: value = variables[field] elif field == "headers": value = request.headers elif field == "GET": value = FirstWrapper(request.GET) elif field in request.server.config: value = request.server.config[tokens[0][1]] elif field == "location": value = {"server": "%s://%s:%s" % (request.url_parts.scheme, request.url_parts.hostname, request.url_parts.port), "scheme": request.url_parts.scheme, "host": "%s:%s" % (request.url_parts.hostname, request.url_parts.port), "hostname": request.url_parts.hostname, "port": request.url_parts.port, "path": request.url_parts.path, "query": "?%s" % request.url_parts.query} elif field == "uuid()": value = str(uuid.uuid4()) elif field == "url_base": value = request.url_base else: raise Exception("Undefined template variable %s" % field) for item in tokens[1:]: value = value[item[1]] assert isinstance(value, (int,) + types.StringTypes), tokens if variable is not None: variables[variable] = value #Should possibly support escaping for other contexts e.g. script #TODO: read the encoding of the response return escape(unicode(value), quote=True).encode("utf-8") template_regexp = re.compile(r"{{([^}]*)}}") new_content, count = template_regexp.subn(config_replacement, content) return new_content @pipe() def gzip(request, response): """This pipe gzip-encodes response data. It sets (or overwrites) these HTTP headers: Content-Encoding is set to gzip Content-Length is set to the length of the compressed content """ content = resolve_content(response) response.headers.set("Content-Encoding", "gzip") out = StringIO() with gzip_module.GzipFile(fileobj=out, mode="w") as f: f.write(content) response.content = out.getvalue() response.headers.set("Content-Length", len(response.content)) return response
kaksmet/servo
tests/wpt/web-platform-tests/tools/wptserve/wptserve/pipes.py
Python
mpl-2.0
13,913
# -*- coding: utf-8 -*- # Generated by Django 1.9 on 2016-02-25 11:36 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('base', '0016_auto_20160224_1039'), ] operations = [ migrations.AddField( model_name='person', name='language', field=models.CharField(choices=[('FR', 'Français'), ('EN', 'English')], default='FR', max_length=30, null=True), ), ]
uclouvain/OSIS-Louvain
base/migrations/0017_person_language.py
Python
agpl-3.0
520
# -*- coding: utf-8 -*- # This file is part of Shuup. # # Copyright (c) 2012-2017, Shoop Commerce Ltd. All rights reserved. # # This source code is licensed under the OSL-3.0 license found in the # LICENSE file in the root directory of this source tree. from __future__ import unicode_literals from django import forms from django.conf import settings from django.contrib import messages from django.contrib.auth import get_user_model from django.contrib.auth.models import Group as PermissionGroup from django.forms.models import modelform_factory from django.http.response import HttpResponseRedirect from django.utils.encoding import force_text from django.utils.translation import ugettext_lazy as _ from django.views.generic.edit import UpdateView from shuup.admin.forms.fields import Select2MultipleField from shuup.admin.toolbar import get_default_edit_toolbar from shuup.admin.utils.urls import get_model_url class PermissionChangeFormBase(forms.ModelForm): old_password = forms.CharField( label=_("Your Password"), widget=forms.PasswordInput, help_text=_("For security purposes, we need your current password.") ) def __init__(self, changing_user, *args, **kwargs): super(PermissionChangeFormBase, self).__init__(*args, **kwargs) self.changing_user = changing_user if getattr(self.instance, 'is_superuser', False) and not getattr(self.changing_user, 'is_superuser', False): self.fields.pop("is_superuser") if not ( self.changing_user == self.instance or getattr(self.instance, 'is_superuser', False) ): # Only require old password when editing self.fields.pop("old_password") initial_groups = self._get_initial_groups() permission_groups_field = Select2MultipleField( model=PermissionGroup, initial=[group.pk for group in initial_groups], required=False, label=_("Permission Groups"), help_text=_( "The permission groups that this user belongs to. " "Permission groups are configured through Contacts - Permission Groups." ) ) permission_groups_field.widget.choices = [(group.pk, force_text(group)) for group in initial_groups] self.fields["permission_groups"] = permission_groups_field def _get_initial_groups(self): if self.instance.pk and hasattr(self.instance, "groups"): return self.instance.groups.all() else: return [] def clean_old_password(self): """ Validates that the old_password field is correct. """ old_password = self.cleaned_data["old_password"] if not self.changing_user.check_password(old_password): raise forms.ValidationError( _("Your old password was entered incorrectly. Please enter it again."), code='password_incorrect', ) return old_password def clean_members(self): members = self.cleaned_data.get("members", []) return get_user_model().objects.filter(pk__in=members).all() def clean_permission_groups(self): permission_groups = self.cleaned_data.get("permission_groups", []) return PermissionGroup.objects.filter(pk__in=permission_groups) def clean(self): for field in ("is_staff", "is_superuser"): flag = self.cleaned_data[field] if self.changing_user == self.instance and not flag: self.add_error(field, _("You can't unset this status for yourself.")) return self.cleaned_data def save(self): obj = super(PermissionChangeFormBase, self).save() obj.groups.clear() obj.groups = self.cleaned_data["permission_groups"] class UserChangePermissionsView(UpdateView): template_name = "shuup/admin/users/change_permissions.jinja" model = settings.AUTH_USER_MODEL title = _("Change User Permissions") def get_form_class(self): return modelform_factory( model=get_user_model(), form=PermissionChangeFormBase, fields=("is_staff", "is_superuser") ) def get_queryset(self): return get_user_model().objects.all() def get_toolbar(self): toolbar = get_default_edit_toolbar( self, "permissions_form", discard_url=get_model_url(self.object), with_split_save=False ) return toolbar def get_form_kwargs(self): kwargs = super(UserChangePermissionsView, self).get_form_kwargs() kwargs["changing_user"] = self.request.user return kwargs def get_context_data(self, **kwargs): context = super(UserChangePermissionsView, self).get_context_data(**kwargs) context["toolbar"] = self.get_toolbar() context["title"] = _("Change Permissions: %s") % self.object return context def form_valid(self, form): form.save() messages.success(self.request, _("Permissions changed for %s.") % self.object) return HttpResponseRedirect(self.get_success_url()) def get_success_url(self): return get_model_url(self.object)
suutari-ai/shoop
shuup/admin/modules/users/views/permissions.py
Python
agpl-3.0
5,257
""" Utility library for working with the edx-milestones app """ from django.conf import settings from django.utils.translation import gettext as _ from edx_toggles.toggles import SettingDictToggle from milestones import api as milestones_api from milestones.exceptions import InvalidMilestoneRelationshipTypeException, InvalidUserException from milestones.models import MilestoneRelationshipType from milestones.services import MilestonesService from opaque_keys import InvalidKeyError from opaque_keys.edx.keys import CourseKey from openedx.core.djangoapps.content.course_overviews.models import CourseOverview from openedx.core.lib.cache_utils import get_cache from xmodule.modulestore.django import modulestore # lint-amnesty, pylint: disable=wrong-import-order NAMESPACE_CHOICES = { 'ENTRANCE_EXAM': 'entrance_exams' } REQUEST_CACHE_NAME = "milestones" # TODO this should be moved to edx/edx-milestones # .. toggle_name: FEATURES['MILESTONES_APP'] # .. toggle_implementation: SettingDictToggle # .. toggle_default: False # .. toggle_description: Enable the milestones application, which manages significant Course and/or Student events in # the Open edX platform. (see https://github.com/edx/edx-milestones) Note that this feature is required to enable # course pre-requisites. # .. toggle_use_cases: open_edx # .. toggle_creation_date: 2014-11-21 ENABLE_MILESTONES_APP = SettingDictToggle("FEATURES", "MILESTONES_APP", default=False, module_name=__name__) def get_namespace_choices(): """ Return the enum to the caller """ return NAMESPACE_CHOICES def is_prerequisite_courses_enabled(): """ Returns boolean indicating prerequisite courses enabled system wide or not. """ return settings.FEATURES.get('ENABLE_PREREQUISITE_COURSES') and ENABLE_MILESTONES_APP.is_enabled() def add_prerequisite_course(course_key, prerequisite_course_key): """ It would create a milestone, then it would set newly created milestones as requirement for course referred by `course_key` and it would set newly created milestone as fulfillment milestone for course referred by `prerequisite_course_key`. """ if not is_prerequisite_courses_enabled(): return None milestone_name = _('Course {course_id} requires {prerequisite_course_id}').format( course_id=str(course_key), prerequisite_course_id=str(prerequisite_course_key) ) milestone = milestones_api.add_milestone({ 'name': milestone_name, 'namespace': str(prerequisite_course_key), 'description': _('System defined milestone'), }) # add requirement course milestone milestones_api.add_course_milestone(course_key, 'requires', milestone) # add fulfillment course milestone milestones_api.add_course_milestone(prerequisite_course_key, 'fulfills', milestone) def remove_prerequisite_course(course_key, milestone): """ It would remove pre-requisite course milestone for course referred by `course_key`. """ if not is_prerequisite_courses_enabled(): return None milestones_api.remove_course_milestone( course_key, milestone, ) def set_prerequisite_courses(course_key, prerequisite_course_keys): """ It would remove any existing requirement milestones for the given `course_key` and create new milestones for each pre-requisite course in `prerequisite_course_keys`. To only remove course milestones pass `course_key` and empty list or None as `prerequisite_course_keys` . """ if not is_prerequisite_courses_enabled(): return None #remove any existing requirement milestones with this pre-requisite course as requirement course_milestones = milestones_api.get_course_milestones(course_key=course_key, relationship="requires") if course_milestones: for milestone in course_milestones: remove_prerequisite_course(course_key, milestone) # add milestones if pre-requisite course is selected if prerequisite_course_keys: for prerequisite_course_key_string in prerequisite_course_keys: prerequisite_course_key = CourseKey.from_string(prerequisite_course_key_string) add_prerequisite_course(course_key, prerequisite_course_key) def get_pre_requisite_courses_not_completed(user, enrolled_courses): """ Makes a dict mapping courses to their unfulfilled milestones using the fulfillment API of the milestones app. Arguments: user (User): the user for whom we are checking prerequisites. enrolled_courses (CourseKey): a list of keys for the courses to be checked. The given user must be enrolled in all of these courses. Returns: dict[CourseKey: dict[ 'courses': list[dict['key': CourseKey, 'display': str]] ]] If a course has no incomplete prerequisites, it will be excluded from the dictionary. """ if not is_prerequisite_courses_enabled(): return {} pre_requisite_courses = {} for course_key in enrolled_courses: required_courses = [] fulfillment_paths = milestones_api.get_course_milestones_fulfillment_paths(course_key, {'id': user.id}) for __, milestone_value in fulfillment_paths.items(): for key, value in milestone_value.items(): if key == 'courses' and value: for required_course in value: required_course_key = CourseKey.from_string(required_course) required_course_overview = CourseOverview.get_from_id(required_course_key) required_courses.append({ 'key': required_course_key, 'display': get_course_display_string(required_course_overview) }) # If there are required courses, add them to the result dict. if required_courses: pre_requisite_courses[course_key] = {'courses': required_courses} return pre_requisite_courses def get_prerequisite_courses_display(course_descriptor): """ It would retrieve pre-requisite courses, make display strings and return list of dictionary with course key as 'key' field and course display name as `display` field. """ pre_requisite_courses = [] if is_prerequisite_courses_enabled() and course_descriptor.pre_requisite_courses: for course_id in course_descriptor.pre_requisite_courses: course_key = CourseKey.from_string(course_id) required_course_descriptor = modulestore().get_course(course_key) prc = { 'key': course_key, 'display': get_course_display_string(required_course_descriptor) } pre_requisite_courses.append(prc) return pre_requisite_courses def get_course_display_string(descriptor): """ Returns a string to display for a course or course overview. Arguments: descriptor (CourseBlock|CourseOverview): a course or course overview. """ return ' '.join([ descriptor.display_org_with_default, descriptor.display_number_with_default ]) def fulfill_course_milestone(course_key, user): """ Marks the course specified by the given course_key as complete for the given user. If any other courses require this course as a prerequisite, their milestones will be appropriately updated. """ if not ENABLE_MILESTONES_APP.is_enabled(): return None try: course_milestones = milestones_api.get_course_milestones(course_key=course_key, relationship="fulfills") except InvalidMilestoneRelationshipTypeException: # we have not seeded milestone relationship types seed_milestone_relationship_types() course_milestones = milestones_api.get_course_milestones(course_key=course_key, relationship="fulfills") for milestone in course_milestones: milestones_api.add_user_milestone({'id': user.id}, milestone) def remove_course_milestones(course_key, user, relationship): """ Remove all user milestones for the course specified by course_key. """ if not ENABLE_MILESTONES_APP.is_enabled(): return None course_milestones = milestones_api.get_course_milestones(course_key=course_key, relationship=relationship) for milestone in course_milestones: milestones_api.remove_user_milestone({'id': user.id}, milestone) def get_required_content(course_key, user): """ Queries milestones subsystem to see if the specified course is gated on one or more milestones, and if those milestones can be fulfilled via completion of a particular course content module """ required_content = [] if ENABLE_MILESTONES_APP.is_enabled(): course_run_id = str(course_key) if user.is_authenticated: # Get all of the outstanding milestones for this course, for this user try: milestone_paths = get_course_milestones_fulfillment_paths( course_run_id, serialize_user(user) ) except InvalidMilestoneRelationshipTypeException: return required_content # For each outstanding milestone, see if this content is one of its fulfillment paths for path_key in milestone_paths: milestone_path = milestone_paths[path_key] if milestone_path.get('content') and len(milestone_path['content']): # lint-amnesty, pylint: disable=len-as-condition for content in milestone_path['content']: required_content.append(content) else: if get_course_milestones(course_run_id): # NOTE (CCB): The initial version of anonymous courseware access is very simple. We avoid accidentally # exposing locked content by simply avoiding anonymous access altogether for courses runs with # milestones. raise InvalidUserException('Anonymous access is not allowed for course runs with milestones set.') return required_content def milestones_achieved_by_user(user, namespace): """ It would fetch list of milestones completed by user """ if not ENABLE_MILESTONES_APP.is_enabled(): return None return milestones_api.get_user_milestones({'id': user.id}, namespace) def is_valid_course_key(key): """ validates course key. returns True if valid else False. """ try: course_key = CourseKey.from_string(key) except InvalidKeyError: course_key = key return isinstance(course_key, CourseKey) def seed_milestone_relationship_types(): """ Helper method to pre-populate MRTs so the tests can run """ if not ENABLE_MILESTONES_APP.is_enabled(): return None MilestoneRelationshipType.objects.create(name='requires') MilestoneRelationshipType.objects.create(name='fulfills') def generate_milestone_namespace(namespace, course_key=None): """ Returns a specifically-formatted namespace string for the specified type """ if namespace in list(NAMESPACE_CHOICES.values()): if namespace == 'entrance_exams': return '{}.{}'.format(str(course_key), NAMESPACE_CHOICES['ENTRANCE_EXAM']) def serialize_user(user): """ Returns a milestones-friendly representation of a user object """ return { 'id': user.id, } def add_milestone(milestone_data): """ Client API operation adapter/wrapper """ if not ENABLE_MILESTONES_APP.is_enabled(): return None return milestones_api.add_milestone(milestone_data) def get_milestones(namespace): """ Client API operation adapter/wrapper """ if not ENABLE_MILESTONES_APP.is_enabled(): return [] return milestones_api.get_milestones(namespace) def get_milestone_relationship_types(): """ Client API operation adapter/wrapper """ if not ENABLE_MILESTONES_APP.is_enabled(): return {} return milestones_api.get_milestone_relationship_types() def add_course_milestone(course_id, relationship, milestone): """ Client API operation adapter/wrapper """ if not ENABLE_MILESTONES_APP.is_enabled(): return None return milestones_api.add_course_milestone(course_id, relationship, milestone) def get_course_milestones(course_id): """ Client API operation adapter/wrapper """ if not ENABLE_MILESTONES_APP.is_enabled(): return [] return milestones_api.get_course_milestones(course_id) def add_course_content_milestone(course_id, content_id, relationship, milestone): """ Client API operation adapter/wrapper """ if not ENABLE_MILESTONES_APP.is_enabled(): return None return milestones_api.add_course_content_milestone(course_id, content_id, relationship, milestone) def get_course_content_milestones(course_id, content_id=None, relationship='requires', user_id=None): """ Client API operation adapter/wrapper Uses the request cache to store all of a user's milestones Returns all content blocks in a course if content_id is None, otherwise it just returns that specific content block. """ if not ENABLE_MILESTONES_APP.is_enabled(): return [] if user_id is None: return milestones_api.get_course_content_milestones(course_id, content_id, relationship) request_cache_dict = get_cache(REQUEST_CACHE_NAME) if user_id not in request_cache_dict: request_cache_dict[user_id] = {} if relationship not in request_cache_dict[user_id]: request_cache_dict[user_id][relationship] = milestones_api.get_course_content_milestones( course_key=course_id, relationship=relationship, user={"id": user_id} ) if content_id is None: return request_cache_dict[user_id][relationship] return [m for m in request_cache_dict[user_id][relationship] if m['content_id'] == str(content_id)] def remove_course_content_user_milestones(course_key, content_key, user, relationship): """ Removes the specified User-Milestone link from the system for the specified course content module. """ if not ENABLE_MILESTONES_APP.is_enabled(): return [] course_content_milestones = milestones_api.get_course_content_milestones(course_key, content_key, relationship) for milestone in course_content_milestones: milestones_api.remove_user_milestone({'id': user.id}, milestone) def remove_content_references(content_id): """ Client API operation adapter/wrapper """ if not ENABLE_MILESTONES_APP.is_enabled(): return None return milestones_api.remove_content_references(content_id) def any_unfulfilled_milestones(course_id, user_id): """ Returns a boolean if user has any unfulfilled milestones """ if not ENABLE_MILESTONES_APP.is_enabled(): return False user_id = None if user_id is None else int(user_id) fulfillment_paths = milestones_api.get_course_milestones_fulfillment_paths(course_id, {'id': user_id}) # Returns True if any of the milestones is unfulfilled. False if # values is empty or all values are. return any(fulfillment_paths.values()) def get_course_milestones_fulfillment_paths(course_id, user_id): """ Client API operation adapter/wrapper """ if not ENABLE_MILESTONES_APP.is_enabled(): return None return milestones_api.get_course_milestones_fulfillment_paths( course_id, user_id ) def add_user_milestone(user, milestone): """ Client API operation adapter/wrapper """ if not ENABLE_MILESTONES_APP.is_enabled(): return None return milestones_api.add_user_milestone(user, milestone) def remove_user_milestone(user, milestone): """ Client API operation adapter/wrapper """ if not ENABLE_MILESTONES_APP.is_enabled(): return None return milestones_api.remove_user_milestone(user, milestone) def get_service(): """ Returns MilestonesService instance if feature flag enabled; else returns None. Note: MilestonesService only has access to the functions explicitly requested in the MilestonesServices class """ if not ENABLE_MILESTONES_APP.is_enabled(): return None return MilestonesService()
eduNEXT/edx-platform
common/djangoapps/util/milestones_helpers.py
Python
agpl-3.0
16,458
from django.utils.translation import ugettext as _, ugettext_lazy as _lazy from rest.views.blocks.base import BlockSSDataTables, ResourceBlockAction from consts import EDIT, CONFIRM, EDIT_MULTIPLE, VIEW from lib.shortcuts import render_to_response, render_to_xml_response, render_to_context_response from gf.gas.forms.base import SingleUserForm from django.forms.formsets import formset_factory from lib.formsets import BaseFormSetWithRequest from flexi_auth.models import ObjectWithContext from gf.base.models import Person #------------------------------------------------------------------------------# # # #------------------------------------------------------------------------------# class Block(BlockSSDataTables): BLOCK_NAME = "users" #FIXME minor: BLOCK_DESCRIPTION = _lazy("Users") #FIXME minor: _lazy is appropriate, but there is probably some bug elsewhere...now use ugettext it is safe in our case BLOCK_DESCRIPTION = _("Users") BLOCK_VALID_RESOURCE_TYPES = [] #KO: because we NEED subclasses COLUMN_INDEX_NAME_MAP = { 0: 'pk', 1: 'username', 2: 'first_name', 3: 'last_name', 4: 'email', 5: 'last_login', 6: 'date_joined', 7: 'is_active', 8: 'person' } def _get_user_actions(self, request): user_actions = [] if request.user.has_perm(EDIT, obj=ObjectWithContext(request.resource)): user_actions += [ ResourceBlockAction( block_name = self.BLOCK_NAME, resource = request.resource, name=VIEW, verbose_name=_("Show"), popup_form=False, method="get", ), ResourceBlockAction( block_name = self.BLOCK_NAME, resource = request.resource, name=EDIT_MULTIPLE, verbose_name=_("Edit"), popup_form=False, method="get", ), ] return user_actions def _get_resource_list(self, request): """Rather than adding a 'users' method to the resource, we compute users list here, because users may be not still bound to the correspondent Person. This block is in fact used only for Admin purposes during a specific stage of the registration process. """ raise ProgrammingError("You must use a subclass to retrieve users list") def _get_edit_multiple_form_class(self): qs = self._get_resource_list(self.request) return formset_factory( form=SingleUserForm, formset=BaseFormSetWithRequest, extra=qs.count() #0 ) def _get_records(self, request, querySet): """Return records of rendered table fields.""" data = {} i = 0 c = querySet.count() map_info = { } av = True for i,el in enumerate(querySet): key_prefix = 'form-%d' % i try: el._cached_p = el.person except Person.DoesNotExist as e: el._cached_p = None data.update({ '%s-id' % key_prefix : el.pk, '%s-pk' % key_prefix : el.pk, '%s-is_active' % key_prefix : bool(el.is_active), '%s-person' % key_prefix : el._cached_p, }) map_info[el.pk] = {'formset_index' : i} data['form-TOTAL_FORMS'] = c data['form-INITIAL_FORMS'] = c data['form-MAX_NUM_FORMS'] = 0 formset = self._get_edit_multiple_form_class()(request, data) records = [] for i, el in enumerate(querySet): form = formset[map_info[el.pk]['formset_index']] if el._cached_p: person = el._cached_p person_urn = el._cached_p.urn else: person = form['person'] person_urn = None records.append({ 'id' : "%s %s" % (form['pk'], form['id']), 'username' : el.username, 'first_name' : el.first_name, 'last_name' : el.last_name, 'email' : el.email, 'last_login' : el.last_login, 'date_joined' : el.date_joined, 'is_active' : form['is_active'], 'person' : person, 'person_urn': person_urn, }) return formset, records, {}
michelesr/gasistafelice
gasistafelice/rest/views/blocks/users.py
Python
agpl-3.0
4,621
import os import pytest skipiftravis = pytest.mark.skipif( os.environ.get('TRAVIS') == 'true', reason='skip on Travis-CI')
SANDAG/pandana
pandana/testing.py
Python
agpl-3.0
129
# ---------------------------------------------------------------------- # Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2020, Numenta, Inc. Unless you have an agreement # with Numenta, Inc., for a separate license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU Affero Public License for more details. # # You should have received a copy of the GNU Affero Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- import copy from torch.optim.lr_scheduler import OneCycleLR from nupic.research.frameworks.vernon.experiment_utils import create_lr_scheduler class MultiCycleLR: """ Composes a sequence of OneCycleLR regimes, allowing different configurations for each cycle. This infers args like total_batches, epochs, and also the div_factor for subsequent cycles. """ def setup_experiment(self, config): """ :param config: - multi_cycle_lr_args: A list of (epoch, dict) pairs. The dicts don't need to include epoch counts, this is inferred from the config. """ config = copy.deepcopy(config) ignored_class = config.pop("lr_scheduler_class", None) ignored_args = config.pop("lr_scheduler_args", None) config["lr_scheduler_step_every_batch"] = True super().setup_experiment(config) if ignored_class is not None and ignored_class != OneCycleLR: self.logger.warning("Ignoring lr_scheduler_class, using OneCycleLR") if ignored_args is not None and len(ignored_args) > 0: self.logger.warning("Ignoring lr_scheduler_args, using " "multi_cycle_lr_args") # Insert epoch counts and div_factors improved_args = {} multi_cycle_lr_args = sorted(config["multi_cycle_lr_args"], key=lambda x: x[0]) for i, (start_epoch, cycle_config) in enumerate(multi_cycle_lr_args): if i + 1 < len(multi_cycle_lr_args): end_epoch = multi_cycle_lr_args[i + 1][0] else: end_epoch = config["epochs"] cycle_config = copy.deepcopy(cycle_config) cycle_config["epochs"] = end_epoch - start_epoch # Default behavior: no sudden change in learning rate between # cycles. if "div_factor" not in cycle_config and i > 0: prev_cycle_config = multi_cycle_lr_args[i - 1][1] if "final_div_factor" in prev_cycle_config: cycle_config["div_factor"] = \ prev_cycle_config["final_div_factor"] improved_args[start_epoch] = cycle_config self.multi_cycle_args_by_epoch = improved_args self.logger.info("MultiCycleLR regime: " f"{self.multi_cycle_args_by_epoch}") # Set it immediately, rather than waiting for the pre_epoch, in case a # restore is occurring. args = self.multi_cycle_args_by_epoch[0] self.lr_scheduler = create_lr_scheduler( optimizer=self.optimizer, lr_scheduler_class=OneCycleLR, lr_scheduler_args=args, steps_per_epoch=self.total_batches) def pre_epoch(self): super().pre_epoch() if self.current_epoch != 0 and \ self.current_epoch in self.multi_cycle_args_by_epoch: args = self.multi_cycle_args_by_epoch[self.current_epoch] self.lr_scheduler = create_lr_scheduler( optimizer=self.optimizer, lr_scheduler_class=OneCycleLR, lr_scheduler_args=args, steps_per_epoch=self.total_batches) @classmethod def get_execution_order(cls): eo = super().get_execution_order() eo["setup_experiment"].insert( 0, "MultiCycleLR: Prevent LR scheduler from being constructed") eo["setup_experiment"].append("MultiCycleLR: Initialize") eo["pre_epoch"].append("MultiCycleLR: Maybe initialize lr_scheduler") return eo
numenta/nupic.research
src/nupic/research/frameworks/vernon/mixins/multi_cycle_lr.py
Python
agpl-3.0
4,655
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class RPng(RPackage): """This package provides an easy and simple way to read, write and display bitmap images stored in the PNG format. It can read and write both files and in-memory raw vectors.""" homepage = "http://www.rforge.net/png/" url = "https://cloud.r-project.org/src/contrib/png_0.1-7.tar.gz" list_url = "https://cloud.r-project.org/src/contrib/Archive/png" version('0.1-7', sha256='e269ff968f04384fc9421d17cfc7c10cf7756b11c2d6d126e9776f5aca65553c') depends_on('r@2.9.0:', type=('build', 'run')) depends_on('libpng')
iulian787/spack
var/spack/repos/builtin/packages/r-png/package.py
Python
lgpl-2.1
793
# -*- Mode: Python; test-case-name: flumotion.test.test_feedcomponent010 -*- # vi:si:et:sw=4:sts=4:ts=4 # Flumotion - a streaming media server # Copyright (C) 2004,2005,2006,2007,2008,2009 Fluendo, S.L. # Copyright (C) 2010,2011 Flumotion Services, S.A. # All rights reserved. # # This file may be distributed and/or modified under the terms of # the GNU Lesser General Public License version 2.1 as published by # the Free Software Foundation. # This file is distributed without any warranty; without even the implied # warranty of merchantability or fitness for a particular purpose. # See "LICENSE.LGPL" in the source distribution for more information. # # Headers in this file shall remain intact. import gst from twisted.internet import defer, reactor from twisted.trial import unittest from flumotion.common import testsuite from flumotion.component import padmonitor attr = testsuite.attr class TestPadMonitor(testsuite.TestCase): slow = True def _run_pipeline(self, pipeline): pipeline.set_state(gst.STATE_PLAYING) pipeline.get_bus().poll(gst.MESSAGE_EOS, -1) pipeline.set_state(gst.STATE_NULL) def testPadMonitorActivation(self): pipeline = gst.parse_launch( 'fakesrc num-buffers=1 ! identity name=id ! fakesink') identity = pipeline.get_by_name('id') srcpad = identity.get_pad('src') monitor = padmonitor.PadMonitor(srcpad, "identity-source", lambda name: None, lambda name: None) self.assertEquals(monitor.isActive(), False) self._run_pipeline(pipeline) # Now give the reactor a chance to process the callFromThread() d = defer.Deferred() def finishTest(): self.assertEquals(monitor.isActive(), True) monitor.detach() d.callback(True) reactor.callLater(0.1, finishTest) return d def testPadMonitorTimeout(self): padmonitor.PadMonitor.PAD_MONITOR_PROBE_INTERVAL = 0.2 padmonitor.PadMonitor.PAD_MONITOR_CHECK_INTERVAL = 0.5 pipeline = gst.parse_launch( 'fakesrc num-buffers=1 ! identity name=id ! fakesink') identity = pipeline.get_by_name('id') srcpad = identity.get_pad('src') # Now give the reactor a chance to process the callFromThread() def finished(): monitor.detach() d.callback(True) def hasInactivated(name): # We can't detach the monitor from this callback safely, so do # it from a reactor.callLater() reactor.callLater(0, finished) def hasActivated(): self.assertEquals(monitor.isActive(), True) # Now, we don't send any more data, and after our 0.5 second # timeout we should go inactive. Pass our test if that happens. # Otherwise trial will time out. monitor = padmonitor.PadMonitor(srcpad, "identity-source", lambda name: None, hasInactivated) self.assertEquals(monitor.isActive(), False) self._run_pipeline(pipeline) d = defer.Deferred() reactor.callLater(0.2, hasActivated) return d if __name__ == '__main__': unittest.main()
timvideos/flumotion
flumotion/test/test_component_padmonitor.py
Python
lgpl-2.1
3,361
#!/usr/bin/env python3 # # test_codecmaps_cn.py # Codec mapping tests for PRC encodings # from test import support from test import test_multibytecodec_support import unittest class TestGB2312Map(test_multibytecodec_support.TestBase_Mapping, unittest.TestCase): encoding = 'gb2312' mapfileurl = 'http://people.freebsd.org/~perky/i18n/EUC-CN.TXT' class TestGBKMap(test_multibytecodec_support.TestBase_Mapping, unittest.TestCase): encoding = 'gbk' mapfileurl = 'http://www.unicode.org/Public/MAPPINGS/VENDORS/' \ 'MICSFT/WINDOWS/CP936.TXT' class TestGB18030Map(test_multibytecodec_support.TestBase_Mapping, unittest.TestCase): encoding = 'gb18030' mapfileurl = 'http://source.icu-project.org/repos/icu/data/' \ 'trunk/charset/data/xml/gb-18030-2000.xml' def test_main(): support.run_unittest(__name__) if __name__ == "__main__": test_main()
harmy/kbengine
kbe/res/scripts/common/Lib/test/test_codecmaps_cn.py
Python
lgpl-3.0
1,006
"""Implementation of magic functions for interaction with the OS. Note: this module is named 'osm' instead of 'os' to avoid a collision with the builtin. """ #----------------------------------------------------------------------------- # Copyright (c) 2012 The IPython Development Team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file COPYING.txt, distributed with this software. #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- # Stdlib import io import os import re import sys from pprint import pformat # Our own packages from IPython.core import magic_arguments from IPython.core import oinspect from IPython.core import page from IPython.core.error import UsageError, StdinNotImplementedError from IPython.core.magic import ( Magics, compress_dhist, magics_class, line_magic, cell_magic, line_cell_magic ) from IPython.testing.skipdoctest import skip_doctest from IPython.utils.io import file_read, nlprint from IPython.utils.path import get_py_filename, unquote_filename from IPython.utils.process import abbrev_cwd from IPython.utils.terminal import set_term_title #----------------------------------------------------------------------------- # Magic implementation classes #----------------------------------------------------------------------------- @magics_class class OSMagics(Magics): """Magics to interact with the underlying OS (shell-type functionality). """ @skip_doctest @line_magic def alias(self, parameter_s=''): """Define an alias for a system command. '%alias alias_name cmd' defines 'alias_name' as an alias for 'cmd' Then, typing 'alias_name params' will execute the system command 'cmd params' (from your underlying operating system). Aliases have lower precedence than magic functions and Python normal variables, so if 'foo' is both a Python variable and an alias, the alias can not be executed until 'del foo' removes the Python variable. You can use the %l specifier in an alias definition to represent the whole line when the alias is called. For example:: In [2]: alias bracket echo "Input in brackets: <%l>" In [3]: bracket hello world Input in brackets: <hello world> You can also define aliases with parameters using %s specifiers (one per parameter):: In [1]: alias parts echo first %s second %s In [2]: %parts A B first A second B In [3]: %parts A Incorrect number of arguments: 2 expected. parts is an alias to: 'echo first %s second %s' Note that %l and %s are mutually exclusive. You can only use one or the other in your aliases. Aliases expand Python variables just like system calls using ! or !! do: all expressions prefixed with '$' get expanded. For details of the semantic rules, see PEP-215: http://www.python.org/peps/pep-0215.html. This is the library used by IPython for variable expansion. If you want to access a true shell variable, an extra $ is necessary to prevent its expansion by IPython:: In [6]: alias show echo In [7]: PATH='A Python string' In [8]: show $PATH A Python string In [9]: show $$PATH /usr/local/lf9560/bin:/usr/local/intel/compiler70/ia32/bin:... You can use the alias facility to acess all of $PATH. See the %rehash and %rehashx functions, which automatically create aliases for the contents of your $PATH. If called with no parameters, %alias prints the current alias table.""" par = parameter_s.strip() if not par: aliases = sorted(self.shell.alias_manager.aliases) # stored = self.shell.db.get('stored_aliases', {} ) # for k, v in stored: # atab.append(k, v[0]) print "Total number of aliases:", len(aliases) sys.stdout.flush() return aliases # Now try to define a new one try: alias,cmd = par.split(None, 1) except: print oinspect.getdoc(self.alias) else: self.shell.alias_manager.soft_define_alias(alias, cmd) # end magic_alias @line_magic def unalias(self, parameter_s=''): """Remove an alias""" aname = parameter_s.strip() self.shell.alias_manager.undefine_alias(aname) stored = self.shell.db.get('stored_aliases', {} ) if aname in stored: print "Removing %stored alias",aname del stored[aname] self.shell.db['stored_aliases'] = stored @line_magic def rehashx(self, parameter_s=''): """Update the alias table with all executable files in $PATH. This version explicitly checks that every entry in $PATH is a file with execute access (os.X_OK), so it is much slower than %rehash. Under Windows, it checks executability as a match against a '|'-separated string of extensions, stored in the IPython config variable win_exec_ext. This defaults to 'exe|com|bat'. This function also resets the root module cache of module completer, used on slow filesystems. """ from IPython.core.alias import InvalidAliasError # for the benefit of module completer in ipy_completers.py del self.shell.db['rootmodules'] path = [os.path.abspath(os.path.expanduser(p)) for p in os.environ.get('PATH','').split(os.pathsep)] path = filter(os.path.isdir,path) syscmdlist = [] # Now define isexec in a cross platform manner. if os.name == 'posix': isexec = lambda fname:os.path.isfile(fname) and \ os.access(fname,os.X_OK) else: try: winext = os.environ['pathext'].replace(';','|').replace('.','') except KeyError: winext = 'exe|com|bat|py' if 'py' not in winext: winext += '|py' execre = re.compile(r'(.*)\.(%s)$' % winext,re.IGNORECASE) isexec = lambda fname:os.path.isfile(fname) and execre.match(fname) savedir = os.getcwdu() # Now walk the paths looking for executables to alias. try: # write the whole loop for posix/Windows so we don't have an if in # the innermost part if os.name == 'posix': for pdir in path: os.chdir(pdir) for ff in os.listdir(pdir): if isexec(ff): try: # Removes dots from the name since ipython # will assume names with dots to be python. self.shell.alias_manager.define_alias( ff.replace('.',''), ff) except InvalidAliasError: pass else: syscmdlist.append(ff) else: no_alias = self.shell.alias_manager.no_alias for pdir in path: os.chdir(pdir) for ff in os.listdir(pdir): base, ext = os.path.splitext(ff) if isexec(ff) and base.lower() not in no_alias: if ext.lower() == '.exe': ff = base try: # Removes dots from the name since ipython # will assume names with dots to be python. self.shell.alias_manager.define_alias( base.lower().replace('.',''), ff) except InvalidAliasError: pass syscmdlist.append(ff) self.shell.db['syscmdlist'] = syscmdlist finally: os.chdir(savedir) @skip_doctest @line_magic def pwd(self, parameter_s=''): """Return the current working directory path. Examples -------- :: In [9]: pwd Out[9]: '/home/tsuser/sprint/ipython' """ return os.getcwdu() @skip_doctest @line_magic def cd(self, parameter_s=''): """Change the current working directory. This command automatically maintains an internal list of directories you visit during your IPython session, in the variable _dh. The command %dhist shows this history nicely formatted. You can also do 'cd -<tab>' to see directory history conveniently. Usage: cd 'dir': changes to directory 'dir'. cd -: changes to the last visited directory. cd -<n>: changes to the n-th directory in the directory history. cd --foo: change to directory that matches 'foo' in history cd -b <bookmark_name>: jump to a bookmark set by %bookmark (note: cd <bookmark_name> is enough if there is no directory <bookmark_name>, but a bookmark with the name exists.) 'cd -b <tab>' allows you to tab-complete bookmark names. Options: -q: quiet. Do not print the working directory after the cd command is executed. By default IPython's cd command does print this directory, since the default prompts do not display path information. Note that !cd doesn't work for this purpose because the shell where !command runs is immediately discarded after executing 'command'. Examples -------- :: In [10]: cd parent/child /home/tsuser/parent/child """ oldcwd = os.getcwdu() numcd = re.match(r'(-)(\d+)$',parameter_s) # jump in directory history by number if numcd: nn = int(numcd.group(2)) try: ps = self.shell.user_ns['_dh'][nn] except IndexError: print 'The requested directory does not exist in history.' return else: opts = {} elif parameter_s.startswith('--'): ps = None fallback = None pat = parameter_s[2:] dh = self.shell.user_ns['_dh'] # first search only by basename (last component) for ent in reversed(dh): if pat in os.path.basename(ent) and os.path.isdir(ent): ps = ent break if fallback is None and pat in ent and os.path.isdir(ent): fallback = ent # if we have no last part match, pick the first full path match if ps is None: ps = fallback if ps is None: print "No matching entry in directory history" return else: opts = {} else: #turn all non-space-escaping backslashes to slashes, # for c:\windows\directory\names\ parameter_s = re.sub(r'\\(?! )','/', parameter_s) opts,ps = self.parse_options(parameter_s,'qb',mode='string') # jump to previous if ps == '-': try: ps = self.shell.user_ns['_dh'][-2] except IndexError: raise UsageError('%cd -: No previous directory to change to.') # jump to bookmark if needed else: if not os.path.isdir(ps) or 'b' in opts: bkms = self.shell.db.get('bookmarks', {}) if ps in bkms: target = bkms[ps] print '(bookmark:%s) -> %s' % (ps, target) ps = target else: if 'b' in opts: raise UsageError("Bookmark '%s' not found. " "Use '%%bookmark -l' to see your bookmarks." % ps) # strip extra quotes on Windows, because os.chdir doesn't like them ps = unquote_filename(ps) # at this point ps should point to the target dir if ps: try: os.chdir(os.path.expanduser(ps)) if hasattr(self.shell, 'term_title') and self.shell.term_title: set_term_title('IPython: ' + abbrev_cwd()) except OSError: print sys.exc_info()[1] else: cwd = os.getcwdu() dhist = self.shell.user_ns['_dh'] if oldcwd != cwd: dhist.append(cwd) self.shell.db['dhist'] = compress_dhist(dhist)[-100:] else: os.chdir(self.shell.home_dir) if hasattr(self.shell, 'term_title') and self.shell.term_title: set_term_title('IPython: ' + '~') cwd = os.getcwdu() dhist = self.shell.user_ns['_dh'] if oldcwd != cwd: dhist.append(cwd) self.shell.db['dhist'] = compress_dhist(dhist)[-100:] if not 'q' in opts and self.shell.user_ns['_dh']: print self.shell.user_ns['_dh'][-1] @line_magic def env(self, parameter_s=''): """List environment variables.""" return dict(os.environ) @line_magic def pushd(self, parameter_s=''): """Place the current dir on stack and change directory. Usage:\\ %pushd ['dirname'] """ dir_s = self.shell.dir_stack tgt = os.path.expanduser(unquote_filename(parameter_s)) cwd = os.getcwdu().replace(self.shell.home_dir,'~') if tgt: self.cd(parameter_s) dir_s.insert(0,cwd) return self.shell.magic('dirs') @line_magic def popd(self, parameter_s=''): """Change to directory popped off the top of the stack. """ if not self.shell.dir_stack: raise UsageError("%popd on empty stack") top = self.shell.dir_stack.pop(0) self.cd(top) print "popd ->",top @line_magic def dirs(self, parameter_s=''): """Return the current directory stack.""" return self.shell.dir_stack @line_magic def dhist(self, parameter_s=''): """Print your history of visited directories. %dhist -> print full history\\ %dhist n -> print last n entries only\\ %dhist n1 n2 -> print entries between n1 and n2 (n1 not included)\\ This history is automatically maintained by the %cd command, and always available as the global list variable _dh. You can use %cd -<n> to go to directory number <n>. Note that most of time, you should view directory history by entering cd -<TAB>. """ dh = self.shell.user_ns['_dh'] if parameter_s: try: args = map(int,parameter_s.split()) except: self.arg_err(self.dhist) return if len(args) == 1: ini,fin = max(len(dh)-(args[0]),0),len(dh) elif len(args) == 2: ini,fin = args else: self.arg_err(self.dhist) return else: ini,fin = 0,len(dh) nlprint(dh, header = 'Directory history (kept in _dh)', start=ini,stop=fin) @skip_doctest @line_magic def sc(self, parameter_s=''): """Shell capture - run shell command and capture output (DEPRECATED use !). DEPRECATED. Suboptimal, retained for backwards compatibility. You should use the form 'var = !command' instead. Example: "%sc -l myfiles = ls ~" should now be written as "myfiles = !ls ~" myfiles.s, myfiles.l and myfiles.n still apply as documented below. -- %sc [options] varname=command IPython will run the given command using commands.getoutput(), and will then update the user's interactive namespace with a variable called varname, containing the value of the call. Your command can contain shell wildcards, pipes, etc. The '=' sign in the syntax is mandatory, and the variable name you supply must follow Python's standard conventions for valid names. (A special format without variable name exists for internal use) Options: -l: list output. Split the output on newlines into a list before assigning it to the given variable. By default the output is stored as a single string. -v: verbose. Print the contents of the variable. In most cases you should not need to split as a list, because the returned value is a special type of string which can automatically provide its contents either as a list (split on newlines) or as a space-separated string. These are convenient, respectively, either for sequential processing or to be passed to a shell command. For example:: # Capture into variable a In [1]: sc a=ls *py # a is a string with embedded newlines In [2]: a Out[2]: 'setup.py\\nwin32_manual_post_install.py' # which can be seen as a list: In [3]: a.l Out[3]: ['setup.py', 'win32_manual_post_install.py'] # or as a whitespace-separated string: In [4]: a.s Out[4]: 'setup.py win32_manual_post_install.py' # a.s is useful to pass as a single command line: In [5]: !wc -l $a.s 146 setup.py 130 win32_manual_post_install.py 276 total # while the list form is useful to loop over: In [6]: for f in a.l: ...: !wc -l $f ...: 146 setup.py 130 win32_manual_post_install.py Similarly, the lists returned by the -l option are also special, in the sense that you can equally invoke the .s attribute on them to automatically get a whitespace-separated string from their contents:: In [7]: sc -l b=ls *py In [8]: b Out[8]: ['setup.py', 'win32_manual_post_install.py'] In [9]: b.s Out[9]: 'setup.py win32_manual_post_install.py' In summary, both the lists and strings used for output capture have the following special attributes:: .l (or .list) : value as list. .n (or .nlstr): value as newline-separated string. .s (or .spstr): value as space-separated string. """ opts,args = self.parse_options(parameter_s, 'lv') # Try to get a variable name and command to run try: # the variable name must be obtained from the parse_options # output, which uses shlex.split to strip options out. var,_ = args.split('=', 1) var = var.strip() # But the command has to be extracted from the original input # parameter_s, not on what parse_options returns, to avoid the # quote stripping which shlex.split performs on it. _,cmd = parameter_s.split('=', 1) except ValueError: var,cmd = '','' # If all looks ok, proceed split = 'l' in opts out = self.shell.getoutput(cmd, split=split) if 'v' in opts: print '%s ==\n%s' % (var, pformat(out)) if var: self.shell.user_ns.update({var:out}) else: return out @line_cell_magic def sx(self, line='', cell=None): """Shell execute - run shell command and capture output (!! is short-hand). %sx command IPython will run the given command using commands.getoutput(), and return the result formatted as a list (split on '\\n'). Since the output is _returned_, it will be stored in ipython's regular output cache Out[N] and in the '_N' automatic variables. Notes: 1) If an input line begins with '!!', then %sx is automatically invoked. That is, while:: !ls causes ipython to simply issue system('ls'), typing:: !!ls is a shorthand equivalent to:: %sx ls 2) %sx differs from %sc in that %sx automatically splits into a list, like '%sc -l'. The reason for this is to make it as easy as possible to process line-oriented shell output via further python commands. %sc is meant to provide much finer control, but requires more typing. 3) Just like %sc -l, this is a list with special attributes: :: .l (or .list) : value as list. .n (or .nlstr): value as newline-separated string. .s (or .spstr): value as whitespace-separated string. This is very useful when trying to use such lists as arguments to system commands.""" if cell is None: # line magic return self.shell.getoutput(line) else: opts,args = self.parse_options(line, '', 'out=') output = self.shell.getoutput(cell) out_name = opts.get('out', opts.get('o')) if out_name: self.shell.user_ns[out_name] = output else: return output system = line_cell_magic('system')(sx) bang = cell_magic('!')(sx) @line_magic def bookmark(self, parameter_s=''): """Manage IPython's bookmark system. %bookmark <name> - set bookmark to current dir %bookmark <name> <dir> - set bookmark to <dir> %bookmark -l - list all bookmarks %bookmark -d <name> - remove bookmark %bookmark -r - remove all bookmarks You can later on access a bookmarked folder with:: %cd -b <name> or simply '%cd <name>' if there is no directory called <name> AND there is such a bookmark defined. Your bookmarks persist through IPython sessions, but they are associated with each profile.""" opts,args = self.parse_options(parameter_s,'drl',mode='list') if len(args) > 2: raise UsageError("%bookmark: too many arguments") bkms = self.shell.db.get('bookmarks',{}) if 'd' in opts: try: todel = args[0] except IndexError: raise UsageError( "%bookmark -d: must provide a bookmark to delete") else: try: del bkms[todel] except KeyError: raise UsageError( "%%bookmark -d: Can't delete bookmark '%s'" % todel) elif 'r' in opts: bkms = {} elif 'l' in opts: bks = bkms.keys() bks.sort() if bks: size = max(map(len, bks)) else: size = 0 fmt = '%-'+str(size)+'s -> %s' print 'Current bookmarks:' for bk in bks: print fmt % (bk, bkms[bk]) else: if not args: raise UsageError("%bookmark: You must specify the bookmark name") elif len(args)==1: bkms[args[0]] = os.getcwdu() elif len(args)==2: bkms[args[0]] = args[1] self.shell.db['bookmarks'] = bkms @line_magic def pycat(self, parameter_s=''): """Show a syntax-highlighted file through a pager. This magic is similar to the cat utility, but it will assume the file to be Python source and will show it with syntax highlighting. This magic command can either take a local filename, an url, an history range (see %history) or a macro as argument :: %pycat myscript.py %pycat 7-27 %pycat myMacro %pycat http://www.example.com/myscript.py """ try : cont = self.shell.find_user_code(parameter_s) except (ValueError, IOError): print "Error: no such file, variable, URL, history range or macro" return page.page(self.shell.pycolorize(cont)) @magic_arguments.magic_arguments() @magic_arguments.argument( '-a', '--amend', action='store_true', default=False, help='Open file for amending if it exists' ) @magic_arguments.argument( 'filename', type=unicode, help='file to write' ) @cell_magic def file(self, line, cell): """Write the contents of the cell to a file. For frontends that do not support stdin (Notebook), -f is implied. """ args = magic_arguments.parse_argstring(self.file, line) filename = unquote_filename(args.filename) if os.path.exists(filename): if args.amend: print "Amending to %s" % filename else: print "Overwriting %s" % filename else: print "Writing %s" % filename mode = 'a' if args.amend else 'w' with io.open(filename, mode, encoding='utf-8') as f: f.write(cell)
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/core/magics/osm.py
Python
lgpl-3.0
25,771
# vim: set et sw=4 ts=4 ai: import unittest import utils from testbin import TestBin class TestBinSlam(TestBin, unittest.TestCase): def setUp(self): self.bin = 'slam' def tearDown(self): pass
compatibleone/accords-platform
testsuite/basic/slam.py
Python
apache-2.0
220
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tensor utility functions.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import functools import inspect import re from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util import decorator_utils def _add_deprecated_function_notice_to_docstring(doc, date, instructions): """Adds a deprecation notice to a docstring for deprecated functions.""" return decorator_utils.add_notice_to_docstring( doc, instructions, 'DEPRECATED FUNCTION', '(deprecated)', [ 'THIS FUNCTION IS DEPRECATED. It will be removed after %s.' % date, 'Instructions for updating:']) def _add_deprecated_arg_notice_to_docstring(doc, date, instructions): """Adds a deprecation notice to a docstring for deprecated arguments.""" return decorator_utils.add_notice_to_docstring( doc, instructions, 'DEPRECATED FUNCTION ARGUMENTS', '(deprecated arguments)', [ 'SOME ARGUMENTS ARE DEPRECATED. ' 'They will be removed after %s.' % date, 'Instructions for updating:']) def _validate_deprecation_args(date, instructions): if not date: raise ValueError('Tell us what date this will be deprecated!') if not re.match(r'20\d\d-[01]\d-[0123]\d', date): raise ValueError('Date must be YYYY-MM-DD.') if not instructions: raise ValueError('Don\'t deprecate things without conversion instructions!') def _call_location(level=2): """Returns call location given level up from current call.""" stack = inspect.stack() # Check that stack has enough elements. if len(stack) > level: location = stack[level] return '%s:%d in %s.' % (location[1], location[2], location[3]) return '<unknown>' def deprecated(date, instructions): """Decorator for marking functions or methods deprecated. This decorator logs a deprecation warning whenever the decorated function is called. It has the following format: <function> (from <module>) is deprecated and will be removed after <date>. Instructions for updating: <instructions> <function> will include the class name if it is a method. It also edits the docstring of the function: ' (deprecated)' is appended to the first line of the docstring and a deprecation notice is prepended to the rest of the docstring. Args: date: String. The date the function is scheduled to be removed. Must be ISO 8601 (YYYY-MM-DD). instructions: String. Instructions on how to update code using the deprecated function. Returns: Decorated function or method. Raises: ValueError: If date is not in ISO 8601 format, or instructions are empty. """ _validate_deprecation_args(date, instructions) def deprecated_wrapper(func): """Deprecation wrapper.""" decorator_utils.validate_callable(func, 'deprecated') @functools.wraps(func) def new_func(*args, **kwargs): logging.warning( 'From %s: %s (from %s) is deprecated and will be removed ' 'after %s.\n' 'Instructions for updating:\n%s', _call_location(), decorator_utils.get_qualified_name(func), func.__module__, date, instructions) return func(*args, **kwargs) new_func.__doc__ = _add_deprecated_function_notice_to_docstring( func.__doc__, date, instructions) return new_func return deprecated_wrapper DeprecatedArgSpec = collections.namedtuple( 'DeprecatedArgSpec', ['position', 'has_ok_value', 'ok_value']) def deprecated_args(date, instructions, *deprecated_arg_names_or_tuples): """Decorator for marking specific function arguments as deprecated. This decorator logs a deprecation warning whenever the decorated function is called with the deprecated argument. It has the following format: Calling <function> (from <module>) with <arg> is deprecated and will be removed after <date>. Instructions for updating: <instructions> <function> will include the class name if it is a method. It also edits the docstring of the function: ' (deprecated arguments)' is appended to the first line of the docstring and a deprecation notice is prepended to the rest of the docstring. Args: date: String. The date the function is scheduled to be removed. Must be ISO 8601 (YYYY-MM-DD). instructions: String. Instructions on how to update code using the deprecated function. *deprecated_arg_names_or_tuples: String. or 2-Tuple(String, [ok_vals]). The string is the deprecated argument name. Optionally, an ok-value may be provided. If the user provided argument equals this value, the warning is suppressed. Returns: Decorated function or method. Raises: ValueError: If date is not in ISO 8601 format, instructions are empty, the deprecated arguments are not present in the function signature, or the second element of a deprecated_tuple is not a list. """ _validate_deprecation_args(date, instructions) if not deprecated_arg_names_or_tuples: raise ValueError('Specify which argument is deprecated.') def _get_arg_names_to_ok_vals(): """Returns a dict mapping arg_name to DeprecatedArgSpec w/o position.""" d = {} for name_or_tuple in deprecated_arg_names_or_tuples: if isinstance(name_or_tuple, tuple): d[name_or_tuple[0]] = DeprecatedArgSpec(-1, True, name_or_tuple[1]) else: d[name_or_tuple] = DeprecatedArgSpec(-1, False, None) return d def _get_deprecated_positional_arguments(names_to_ok_vals, arg_spec): """Builds a dictionary from deprecated arguments to thier spec. Returned dict is keyed by argument name. Each value is a DeprecatedArgSpec with the following fields: position: The zero-based argument position of the argument within the signature. None if the argument isn't found in the signature. ok_values: Values of this argument for which warning will be suppressed. Args: names_to_ok_vals: dict from string arg_name to a list of values, possibly empty, which should not elicit a warning. arg_spec: Output from inspect.getargspec on the called function. Returns: Dictionary from arg_name to DeprecatedArgSpec. """ arg_name_to_pos = dict( (name, pos) for (pos, name) in enumerate(arg_spec.args)) deprecated_positional_args = {} for arg_name, spec in iter(names_to_ok_vals.items()): if arg_name in arg_name_to_pos: pos = arg_name_to_pos[arg_name] deprecated_positional_args[arg_name] = DeprecatedArgSpec( pos, spec.has_ok_value, spec.ok_value) return deprecated_positional_args def deprecated_wrapper(func): """Deprecation decorator.""" decorator_utils.validate_callable(func, 'deprecated_args') deprecated_arg_names = _get_arg_names_to_ok_vals() arg_spec = inspect.getargspec(func) deprecated_positions = _get_deprecated_positional_arguments( deprecated_arg_names, arg_spec) is_varargs_deprecated = arg_spec.varargs in deprecated_arg_names is_kwargs_deprecated = arg_spec.keywords in deprecated_arg_names if (len(deprecated_positions) + is_varargs_deprecated + is_kwargs_deprecated != len(deprecated_arg_names_or_tuples)): known_args = arg_spec.args + [arg_spec.varargs, arg_spec.keywords] missing_args = [arg_name for arg_name in deprecated_arg_names if arg_name not in known_args] raise ValueError('The following deprecated arguments are not present ' 'in the function signature: %s. ' 'Found next arguments: %s.' % (missing_args, known_args)) @functools.wraps(func) def new_func(*args, **kwargs): """Deprecation wrapper.""" invalid_args = [] named_args = inspect.getcallargs(func, *args, **kwargs) for arg_name, spec in iter(deprecated_positions.items()): if (spec.position < len(args) and not (spec.has_ok_value and named_args[arg_name] == spec.ok_value)): invalid_args.append(arg_name) if is_varargs_deprecated and len(args) > len(arg_spec.args): invalid_args.append(arg_spec.varargs) if is_kwargs_deprecated and kwargs: invalid_args.append(arg_spec.keywords) for arg_name in deprecated_arg_names: if (arg_name in kwargs and not (deprecated_positions[arg_name].has_ok_value and (named_args[arg_name] == deprecated_positions[arg_name].ok_value))): invalid_args.append(arg_name) for arg_name in invalid_args: logging.warning( 'From %s: calling %s (from %s) with %s is deprecated and will ' 'be removed after %s.\nInstructions for updating:\n%s', _call_location(), decorator_utils.get_qualified_name(func), func.__module__, arg_name, date, instructions) return func(*args, **kwargs) new_func.__doc__ = _add_deprecated_arg_notice_to_docstring( func.__doc__, date, instructions) return new_func return deprecated_wrapper def deprecated_arg_values(date, instructions, **deprecated_kwargs): """Decorator for marking specific function argument values as deprecated. This decorator logs a deprecation warning whenever the decorated function is called with the deprecated argument values. It has the following format: Calling <function> (from <module>) with <arg>=<value> is deprecated and will be removed after <date>. Instructions for updating: <instructions> <function> will include the class name if it is a method. It also edits the docstring of the function: ' (deprecated arguments)' is appended to the first line of the docstring and a deprecation notice is prepended to the rest of the docstring. Args: date: String. The date the function is scheduled to be removed. Must be ISO 8601 (YYYY-MM-DD). instructions: String. Instructions on how to update code using the deprecated function. **deprecated_kwargs: The deprecated argument values. Returns: Decorated function or method. Raises: ValueError: If date is not in ISO 8601 format, or instructions are empty. """ _validate_deprecation_args(date, instructions) if not deprecated_kwargs: raise ValueError('Specify which argument values are deprecated.') def deprecated_wrapper(func): """Deprecation decorator.""" decorator_utils.validate_callable(func, 'deprecated_arg_values') @functools.wraps(func) def new_func(*args, **kwargs): """Deprecation wrapper.""" named_args = inspect.getcallargs(func, *args, **kwargs) for arg_name, arg_value in deprecated_kwargs.items(): if arg_name in named_args and named_args[arg_name] == arg_value: logging.warning( 'From %s: calling %s (from %s) with %s=%s is deprecated and will ' 'be removed after %s.\nInstructions for updating:\n%s', _call_location(), decorator_utils.get_qualified_name(func), func.__module__, arg_name, arg_value, date, instructions) return func(*args, **kwargs) new_func.__doc__ = _add_deprecated_arg_notice_to_docstring( func.__doc__, date, instructions) return new_func return deprecated_wrapper
laosiaudi/tensorflow
tensorflow/python/util/deprecation.py
Python
apache-2.0
12,098
# -*- coding: utf-8 -*- # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import unittest import boto3 from airflow import configuration from airflow.contrib.sensors.aws_redshift_cluster_sensor import AwsRedshiftClusterSensor try: from moto import mock_redshift except ImportError: mock_redshift = None class TestAwsRedshiftClusterSensor(unittest.TestCase): def setUp(self): configuration.load_test_config() def _create_cluster(self): client = boto3.client('redshift', region_name='us-east-1') client.create_cluster( ClusterIdentifier='test_cluster', NodeType='dc1.large', MasterUsername='admin', MasterUserPassword='mock_password' ) if len(client.describe_clusters()['Clusters']) == 0: raise ValueError('AWS not properly mocked') @unittest.skipIf(mock_redshift is None, 'mock_redshift package not present') @mock_redshift def test_poke(self): self._create_cluster() op = AwsRedshiftClusterSensor(task_id='test_cluster_sensor', poke_interval=1, timeout=5, aws_conn_id='aws_default', cluster_identifier='test_cluster', target_status='available') self.assertTrue(op.poke(None)) @unittest.skipIf(mock_redshift is None, 'mock_redshift package not present') @mock_redshift def test_poke_false(self): self._create_cluster() op = AwsRedshiftClusterSensor(task_id='test_cluster_sensor', poke_interval=1, timeout=5, aws_conn_id='aws_default', cluster_identifier='test_cluster_not_found', target_status='available') self.assertFalse(op.poke(None)) @unittest.skipIf(mock_redshift is None, 'mock_redshift package not present') @mock_redshift def test_poke_cluster_not_found(self): self._create_cluster() op = AwsRedshiftClusterSensor(task_id='test_cluster_sensor', poke_interval=1, timeout=5, aws_conn_id='aws_default', cluster_identifier='test_cluster_not_found', target_status='cluster_not_found') self.assertTrue(op.poke(None)) if __name__ == '__main__': unittest.main()
danielvdende/incubator-airflow
tests/contrib/sensors/test_aws_redshift_cluster_sensor.py
Python
apache-2.0
3,415
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # IkaLog # ====== # Copyright (C) 2015 Takeshi HASEGAWA # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import time import threading import cv2 from ikalog.utils import * from ikalog.inputs.win.videoinput_wrapper import VideoInputWrapper from ikalog.inputs import VideoInput class DirectShow(VideoInput): # override def _enumerate_sources_func(self): return self._videoinput_wrapper.get_device_list() def read_raw(self): if self._device_id is None: return None frame = self._videoinput_wrapper.get_pixels( self._device_id, parameters=( self._videoinput_wrapper.VI_BGR + self._videoinput_wrapper.VI_VERTICAL_FLIP ) ) return frame # override def _read_frame_func(self): frame = self.read_raw() return frame # override def _initialize_driver_func(self): pass # override def _cleanup_driver_func(self): pass # override def _is_active_func(self): return (self._device_id is not None) # override def _select_device_by_index_func(self, source, width=1280, height=720, framerate=59.94): device_id = int(source) vi = self._videoinput_wrapper self.lock.acquire() try: if self._device_id is not None: raise Exception('Need to deinit the device') formats = [ {'width': width, 'height': height, 'framerate': None}, {'width': width, 'height': height, 'framerate': framerate}, ] for fmt in formats: if fmt['framerate']: vi.set_framerate(device_id, fmt['framerate']) retval = vi.init_device( device_id, flags=self._videoinput_wrapper.DS_RESOLUTION, width=fmt['width'], height=fmt['height'], ) if retval: self._source_width = vi.get_frame_width(device_id) self._source_height = vi.get_frame_height(device_id) success = \ (width == self._source_width) and ( height == self._source_height) if success or (not self.cap_optimal_input_resolution): self._device_id = device_id break vi.deinit_device(device_id) # end of for loop if self._device_id is None: IkaUtils.dprint( '%s: Failed to init the capture device %d' % (self, device_id) ) finally: self.lock.release() # override def _select_device_by_name_func(self, source): IkaUtils.dprint('%s: Select device by name "%s"' % (self, source)) try: index = self.enumerate_sources().index(source) except ValueError: IkaUtils.dprint('%s: Input "%s" not found' % (self, source)) return False IkaUtils.dprint('%s: "%s" -> %d' % (self, source, index)) self._select_device_by_index_func(index) def __init__(self): self.strict_check = False self._device_id = None self._warned_resolution = False self._videoinput_wrapper = VideoInputWrapper() super(DirectShow, self).__init__() if __name__ == "__main__": obj = DirectShow() list = obj.enumerate_sources() for n in range(len(list)): IkaUtils.dprint("%d: %s" % (n, list[n])) dev = input("Please input number (or name) of capture device: ") obj.select_source(dev) k = 0 while k != 27: frame = obj.read_frame() if frame is not None: cv2.imshow(obj.__class__.__name__, frame) k = cv2.waitKey(1) if k == ord('s'): import time cv2.imwrite('screenshot_%d.png' % int(time.time()), frame)
hasegaw/IkaLog
ikalog/inputs/win/directshow.py
Python
apache-2.0
4,592
from django.conf.urls import patterns, include, url from testapp.api import PersonResource from django.contrib import admin admin.autodiscover() person_resource = PersonResource() urlpatterns = patterns('', # Examples: # url(r'^$', 'testapp.views.home', name='home'), # url(r'^blog/', include('blog.urls')), url(r'^admin/', include(admin.site.urls)), (r'^api/', include(person_resource.urls)) )
satish-suradkar/pyresttest
pyresttest/testapp/testapp/urls.py
Python
apache-2.0
535
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Test code for QNN operators.""" import numpy as np import tvm from tvm import topi, relay, te from tvm.contrib import graph_executor import tvm.topi.testing def verify_simulated_quantize(data_shape, out_dtype, channels, axis): # Create placeholder variables for all qnn inputs. A = te.placeholder(data_shape, name="value", dtype="float32") D = te.placeholder([], name="dtype", dtype="int32") S = te.placeholder([te.size_var("scale_dim")], name="scale", dtype="float32") Z = te.placeholder([te.size_var("zp_dim")], name="zp", dtype="int32") SIM_Q = topi.nn.simulated_quantize(A, D, output_scale=S, output_zero_point=Z, axis=axis) # Create random numpy values to assign to inputs. a_np = np.random.uniform(size=data_shape).astype("float32") d_np = np.int32(topi.nn.SQNN_DTYPE_TO_CODE[out_dtype]) s_np = np.random.uniform(low=1e-4, high=0.1, size=channels).astype("float32") z_np = np.random.uniform(low=-10, high=10, size=channels).astype("int32") q_np = np.zeros(shape=data_shape, dtype="float32") def check_target(target, dev): # Wrap the numpy arrays in nd arrays. a = tvm.nd.array(a_np, dev) d = tvm.nd.array(d_np, dev) s = tvm.nd.array(s_np, dev) z = tvm.nd.array(z_np, dev) q = tvm.nd.array(q_np, dev) # Construct equivalent relay graph. per_channel = channels[0] != 1 a_var = relay.var("a", shape=data_shape, dtype="float32") if per_channel: s_var = relay.const(s_np) z_var = relay.const(z_np) else: s_var = relay.const(s_np[0]) z_var = relay.const(z_np[0]) real_q_op = relay.qnn.op.quantize(a_var, s_var, z_var, axis=axis, out_dtype=out_dtype) with tvm.transform.PassContext(opt_level=3): lib = relay.build(tvm.IRModule.from_expr(real_q_op), target=target) # Get real qnn quantize output. m = graph_executor.GraphModule(lib["default"](dev)) m.set_input("a", a_np) m.run() real_q_out = m.get_output(0) # Compile the simulated quantize function. with tvm.target.Target(target): sched = tvm.topi.testing.get_injective_schedule(target)(SIM_Q) func = tvm.build(sched, [A, D, S, Z, SIM_Q], target, name="sim_quantize") func(a, d, s, z, q) # Check correctness against the true qnn output. mismatch = q.numpy() != real_q_out.numpy().astype("float32") # Allow some rounding errors due to GPU fp32 arithmetic. assert np.sum(mismatch) <= 3 for target, dev in tvm.testing.enabled_targets(): check_target(target, dev) def test_simulated_quantize(): verify_simulated_quantize([1], "int8", [1], -1) verify_simulated_quantize([2, 5], "int8", [5], 1) verify_simulated_quantize([1, 32, 32, 32], "int8", [32], -1) verify_simulated_quantize([1, 32, 32, 32], "uint8", [32], -2) verify_simulated_quantize([2, 5], "int32", [5], 1) def verify_simulated_dequantize(data_shape, in_dtype, channels, axis): # Create placeholder variables for all qnn inputs. A = te.placeholder(data_shape, name="value", dtype="float32") D = te.placeholder([], name="dtype", dtype="int32") S = te.placeholder([te.size_var("scale_dim")], name="scale", dtype="float32") Z = te.placeholder([te.size_var("zp_dim")], name="zp", dtype="int32") SIM_DQ = topi.nn.simulated_dequantize(A, D, input_scale=S, input_zero_point=Z, axis=axis) # Create random numpy values to assign to inputs. a_np = np.random.uniform(low=-128, high=127, size=data_shape).astype(in_dtype) a_np_f = a_np.astype("float32") d_np = np.int32(topi.nn.SQNN_DTYPE_TO_CODE[in_dtype]) s_np = np.random.uniform(low=1e-4, high=0.1, size=channels).astype("float32") z_np = np.random.uniform(low=-10, high=10, size=channels).astype("int32") dq_np = np.zeros(shape=data_shape, dtype="float32") def check_target(target, dev): # Wrap the numpy arrays in nd arrays. a = tvm.nd.array(a_np_f, dev) d = tvm.nd.array(d_np, dev) s = tvm.nd.array(s_np, dev) z = tvm.nd.array(z_np, dev) dq = tvm.nd.array(dq_np, dev) # Construct equivalent relay graph. per_channel = channels[0] != 1 a_var = relay.var("a", shape=data_shape, dtype=in_dtype) if per_channel: s_var = relay.const(s_np) z_var = relay.const(z_np) else: s_var = relay.const(s_np[0]) z_var = relay.const(z_np[0]) real_dq_op = relay.qnn.op.dequantize(a_var, s_var, z_var, axis=axis) with tvm.transform.PassContext(opt_level=3): lib = relay.build(tvm.IRModule.from_expr(real_dq_op), target=target) # Get real qnn quantize output. m = graph_executor.GraphModule(lib["default"](dev)) m.set_input("a", a_np) m.run() real_dq_out = m.get_output(0) # Compile the simulated quantize function. with tvm.target.Target(target): sched = tvm.topi.testing.get_injective_schedule(target)(SIM_DQ) func = tvm.build(sched, [A, D, S, Z, SIM_DQ], target, name="sim_quantize") func(a, d, s, z, dq) # Check correctness against the true qnn output. tvm.testing.assert_allclose(dq.numpy(), real_dq_out.numpy().astype("float32"), rtol=1e-5) for target, dev in tvm.testing.enabled_targets(): check_target(target, dev) def test_simulated_dequantize(): verify_simulated_dequantize([1], "int8", [1], -1) verify_simulated_dequantize([2, 5], "int8", [5], 1) verify_simulated_dequantize([2, 5], "int8", [2], 0) verify_simulated_dequantize([1, 32, 32, 32], "int8", [32], -1) verify_simulated_dequantize([1, 32, 32, 32], "uint8", [32], -2) verify_simulated_dequantize([2, 5], "int32", [5], 1) if __name__ == "__main__": test_simulated_quantize() test_simulated_dequantize()
dmlc/tvm
tests/python/topi/python/test_topi_qnn.py
Python
apache-2.0
6,744
import os import json import arcpy import types import general from .._abstract import abstract ######################################################################## class SpatialReference(abstract.AbstractGeometry): """ creates a spatial reference instance """ _wkid = None #---------------------------------------------------------------------- def __init__(self, wkid): """Constructor""" self._wkid = wkid #---------------------------------------------------------------------- @property def wkid(self): """ get/set the wkid """ return self._wkid @wkid.setter def wkid(self, wkid): """ get/set the wkid """ self._wkid = wkid @property def asDictionary(self): """returns the wkid id for use in json calls""" return {"wkid": self._wkid} #---------------------------------------------------------------------- @property def value(self): """returns the wkid id for use in json calls""" return {"wkid": self._wkid} ######################################################################## class Point(abstract.AbstractGeometry): """ Point Geometry Inputs: coord - list of [X,Y] pair or arcpy.Point Object wkid - well know id of spatial references z - is the Z coordinate value m - m value """ _x = None _y = None _z = None _m = None _wkid = None _json = None _geom = None _dict = None #---------------------------------------------------------------------- def __init__(self, coord, wkid, z=None, m=None): """Constructor""" if isinstance(coord, list): self._x = float(coord[0]) self._y = float(coord[1]) elif isinstance(coord, arcpy.Geometry): self._x = coord.centroid.X self._y = coord.centroid.Y self._z = coord.centroid.Z self._m = coord.centroid.M self._geom = coord.centroid self._wkid = wkid if not z is None: self._z = float(z) if not m is None: self._m = m #---------------------------------------------------------------------- def __str__(self): """ returns the object as a string """ return json.dumps(self.asDictionary, default=general._date_handler) #---------------------------------------------------------------------- @property def spatialReference(self): """returns the geometry spatial reference""" return {'wkid' : self._wkid} #---------------------------------------------------------------------- @property def type(self): """ returns the geometry type """ return "esriGeometryPoint" #---------------------------------------------------------------------- @property def asJSON(self): """ returns a geometry as JSON """ value = self._json if value is None: value = json.dumps(self.asDictionary, default=general._date_handler) self._json = value return self._json #---------------------------------------------------------------------- @property def asArcPyObject(self): """ returns the Point as an ESRI arcpy.Point object """ return arcpy.AsShape(self.asDictionary, True) #---------------------------------------------------------------------- @property def asDictionary(self): """ returns the object as a python dictionary """ # template = {"x" : self._x, "y" : self._y, "spatialReference" : {"wkid" : self._wkid} } if not self._z is None: template['z'] = self._z if not self._m is None: template['z'] = self._m return template #---------------------------------------------------------------------- @property def asList(self): """ returns a Point value as a list of [x,y,<z>,<m>] """ base = [self._x, self._y] if not self._z is None: base.append(self._z) elif not self._m is None: base.append(self._m) return base #---------------------------------------------------------------------- @property def X(self): """ gets the X coordinate """ return self._x #---------------------------------------------------------------------- @X.setter def X(self, value): """sets the X coordinate""" if isinstance(value, (int, float, long, types.NoneType)): self._x = value #---------------------------------------------------------------------- @property def Y(self): """ gets the Y Coordinate """ return self._y #---------------------------------------------------------------------- @Y.setter def Y(self, value): """ sets the Y coordinate """ if isinstance(value, (int, float, long, types.NoneType)): self._y = value #---------------------------------------------------------------------- @property def Z(self): """ gets the Z Coordinate """ return self._z #---------------------------------------------------------------------- @Z.setter def Z(self, value): """ sets the Z coordinate """ if isinstance(value, (int, float, long, types.NoneType)): self._z = value #---------------------------------------------------------------------- @property def wkid(self): """ gets the wkid """ return self._wkid #---------------------------------------------------------------------- @wkid.setter def wkid(self, value): """ sets the wkid """ if isinstance(value, (int, long)): self._wkid = value ######################################################################## class MultiPoint(abstract.AbstractGeometry): """ Implements the ArcGIS JSON MultiPoint Geometry Object """ _geom = None _json = None _dict = None _wkid = None _points = None _hasZ = False _hasM = False #---------------------------------------------------------------------- def __init__(self, points, wkid, hasZ=False, hasM=False): """Constructor""" if isinstance(points, list): self._points = points elif isinstance(points, arcpy.Geometry): self._points = self.__geomToPointList(points) self._wkid = wkid self._hasZ = hasZ self._hasM = hasM #---------------------------------------------------------------------- def __geomToPointList(self, geom): """ converts a geometry object to a common.Geometry object """ if isinstance(geom, arcpy.Multipoint): feature_geom = [] fPart = [] for part in geom: fPart = [] for pnt in part: fPart.append(Point(coord=[pnt.X, pnt.Y], wkid=geom.spatialReference.factoryCode, z=pnt.Z, m=pnt.M)) feature_geom.append(fPart) return feature_geom #---------------------------------------------------------------------- @property def spatialReference(self): """returns the geometry spatial reference""" return {'wkid' : self._wkid} #---------------------------------------------------------------------- @property def type(self): """ returns the geometry type """ return "esriGeometryMultipoint" #---------------------------------------------------------------------- @property def asJSON(self): """ returns a geometry as JSON """ value = self._json if value is None: value = json.dumps(self.asDictionary, default=general._date_handler) self._json = value return self._json #---------------------------------------------------------------------- @property def asArcPyObject(self): """ returns the Point as an ESRI arcpy.MultiPoint object """ return arcpy.AsShape(self.asDictionary, True) #---------------------------------------------------------------------- @property def asDictionary(self): """ returns the object as a python dictionary """ # value = self._dict if value is None: template = { "hasM" : self._hasM, "hasZ" : self._hasZ, "points" : [], "spatialReference" : {"wkid" : self._wkid} } for pt in self._points: template['points'].append(pt.asList) self._dict = template return self._dict ######################################################################## class Polyline(abstract.AbstractGeometry): """ Implements the ArcGIS REST API Polyline Object Inputs: paths - list - list of lists of Point objects wkid - integer - well know spatial reference id hasZ - boolean - hasM - boolean - """ _paths = None _wkid = None _json = None _dict = None _geom = None _hasZ = None _hasM = None #---------------------------------------------------------------------- def __init__(self, paths, wkid, hasZ=False, hasM=False): """Constructor""" if isinstance(paths, list): self._paths = paths elif isinstance(paths, arcpy.Geometry): self._paths = self.__geomToPointList(paths) self._wkid = wkid self._hasM = hasM self._hasZ = hasZ #---------------------------------------------------------------------- def __geomToPointList(self, geom): """ converts a geometry object to a common.Geometry object """ if isinstance(geom, arcpy.Polyline): feature_geom = [] fPart = [] for part in geom: fPart = [] for pnt in part: if geom.spatialReference is None: wkid = self._wkid else: wkid = geom.spatialReference.factoryCode fPart.append(Point(coord=[pnt.X, pnt.Y], wkid=wkid, z=pnt.Z, m=pnt.M)) feature_geom.append(fPart) return feature_geom #---------------------------------------------------------------------- @property def spatialReference(self): """returns the geometry spatial reference""" return {'wkid' : self._wkid} #---------------------------------------------------------------------- @property def type(self): """ returns the geometry type """ return "esriGeometryPolyline" #---------------------------------------------------------------------- @property def asJSON(self): """ returns a geometry as JSON """ value = self._json if value is None: value = json.dumps(self.asDictionary, default=general._date_handler) self._json = value return self._json #---------------------------------------------------------------------- @property def asArcPyObject(self): """ returns the Polyline as an ESRI arcpy.Polyline object """ return arcpy.AsShape(self.asDictionary, True) #---------------------------------------------------------------------- @property def asDictionary(self): """ returns the object as a python dictionary """ value = self._dict if value is None: template = { "hasM" : self._hasM, "hasZ" : self._hasZ, "paths" : [], "spatialReference" : {"wkid" : self._wkid} } for part in self._paths: lpart = [] for pt in part: lpart.append(pt.asList) template['paths'].append(lpart) del lpart self._dict = template return self._dict ######################################################################## class Polygon(abstract.AbstractGeometry): """ Implements the ArcGIS REST JSON for Polygon Object """ _rings = None _wkid = None _json = None _dict = None _geom = None _hasZ = None _hasM = None #---------------------------------------------------------------------- def __init__(self, rings, wkid, hasZ=False, hasM=False): """Constructor""" if isinstance(rings, list): self._rings = rings elif isinstance(rings, arcpy.Geometry): self._rings = self.__geomToPointList(rings) ## self._json = rings.JSON ## self._dict = _unicode_convert(json.loads(self._json)) self._wkid = wkid self._hasM = hasM self._hasZ = hasZ #---------------------------------------------------------------------- def __geomToPointList(self, geom): """ converts a geometry object to a common.Geometry object """ sr = geom.spatialReference if sr is None: wkid = self._wkid else: wkid = sr.factoryCode g = json.loads(geom.JSON) top = [] for gring in g['rings']: ring = [] for g in gring: ring.append(Point(coord=g, wkid=wkid, z=None, m=None)) top.append(ring) return top #if isinstance(geom, arcpy.Polygon): #feature_geom = [] #fPart = [] #for part in geom: #fPart = [] #for pnt in part: #if geom.spatialReference is None: #wkid = self._wkid #else: #wkid = geom.spatialReference.factoryCode #fPart.append(Point(coord=[pnt.X, pnt.Y], #wkid=wkid, #z=pnt.Z, m=pnt.M)) #feature_geom.append(fPart) #return feature_geom #---------------------------------------------------------------------- @property def spatialReference(self): """returns the geometry spatial reference""" return {'wkid' : self._wkid} #---------------------------------------------------------------------- @property def type(self): """ returns the geometry type """ return "esriGeometryPolygon" #---------------------------------------------------------------------- @property def asJSON(self): """ returns a geometry as JSON """ value = self._json if value is None: value = json.dumps(self.asDictionary, default=general._date_handler) self._json = value return self._json #---------------------------------------------------------------------- @property def asArcPyObject(self): """ returns the Polyline as an ESRI arcpy.Polyline object """ return arcpy.AsShape(self.asDictionary, True) #---------------------------------------------------------------------- @property def asDictionary(self): """ returns the object as a python dictionary """ value = self._dict if value is None: template = { "hasM" : self._hasM, "hasZ" : self._hasZ, "rings" : [], "spatialReference" : {"wkid" : self._wkid} } for part in self._rings: lpart = [] for pt in part: if isinstance(pt, list): lpart.append(pt) elif isinstance(pt, Point): lpart.append(pt.asList) template['rings'].append(lpart) del lpart self._dict = template return self._dict ######################################################################## class Envelope(abstract.AbstractGeometry): """ An envelope is a rectangle defined by a range of values for each coordinate and attribute. It also has a spatialReference field. The fields for the z and m ranges are optional. """ _json = None _dict = None _geom = None _xmin = None _ymin = None _zmin = None _mmin = None _xmax = None _ymax = None _zmax = None _mmax = None _wkid = None #---------------------------------------------------------------------- def __init__(self, xmin, ymin, xmax, ymax, wkid, zmin=None, zmax=None, mmin=None, mmax=None): """Constructor""" self._xmin = xmin self._ymin = ymin self._zmin = zmin self._mmin = mmin self._xmax = xmax self._ymax = ymax self._zmax = zmax self._mmax = mmax self._wkid = wkid #---------------------------------------------------------------------- @property def spatialReference(self): """returns the geometry spatial reference""" return {'wkid' : self._wkid} #---------------------------------------------------------------------- @property def type(self): """ returns the geometry type """ return "esriGeometryEnvelope" #---------------------------------------------------------------------- @property def asDictionary(self): """ returns the envelope as a dictionary """ template = { "xmin" : self._xmin, "ymin" : self._ymin, "xmax" : self._xmax, "ymax" : self._ymax, "spatialReference" : {"wkid" : self._wkid} } if self._zmax is not None and \ self._zmin is not None: template['zmin'] = self._zmin template['zmax'] = self._zmax if self._mmin is not None and \ self._mmax is not None: template['mmax'] = self._mmax template['mmin'] = self._mmin return template #---------------------------------------------------------------------- @property def value(self): """ returns the envelope as a dictionary """ template = { "xmin" : self._xmin, "ymin" : self._ymin, "xmax" : self._xmax, "ymax" : self._ymax, "spatialReference" : {"wkid" : self._wkid} } if self._zmax is not None and \ self._zmin is not None: template['zmin'] = self._zmin template['zmax'] = self._zmax if self._mmin is not None and \ self._mmax is not None: template['mmax'] = self._mmax template['mmin'] = self._mmin return template #---------------------------------------------------------------------- def __str__(self): """returns object as string""" return self.asJSON #---------------------------------------------------------------------- @property def asJSON(self): """ returns a geometry as JSON """ value = self._json if value is None: value = json.dumps(self.asDictionary, default=general._date_handler) self._json = value return self._json #---------------------------------------------------------------------- @property def asArcPyObject(self): """ returns the Envelope as an ESRI arcpy.Polygon object """ env = self.asDictionary ring = [[ Point(env['xmin'], env['ymin'], self._wkid), Point(env['xmax'], env['ymin'], self._wkid), Point(env['xmax'], env['ymax'], self._wkid), Point(env['xmin'], env['ymax'], self._wkid) ]] return Polygon(ring, self._wkid).asArcPyObject
achapkowski/ArcREST
src/arcrest/common/geometry.py
Python
apache-2.0
20,189
# Copyright 2013 Cloudera Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Implements the Python DB API 2.0 (PEP 249) for Impala""" from __future__ import absolute_import import six import time import datetime from impala.error import ( # noqa Error, Warning, InterfaceError, DatabaseError, InternalError, OperationalError, ProgrammingError, IntegrityError, DataError, NotSupportedError) from impala.util import warn_deprecate, warn_protocol_param import impala.hiveserver2 as hs2 AUTH_MECHANISMS = ['NOSASL', 'PLAIN', 'GSSAPI', 'LDAP'] # PEP 249 module globals apilevel = '2.0' threadsafety = 1 # Threads may share the module, but not connections paramstyle = 'pyformat' def connect(host='localhost', port=21050, database=None, timeout=None, use_ssl=False, ca_cert=None, auth_mechanism='NOSASL', user=None, password=None, kerberos_service_name='impala', use_ldap=None, ldap_user=None, ldap_password=None, use_kerberos=None, protocol=None): """Get a connection to HiveServer2 (HS2). These options are largely compatible with the impala-shell command line arguments. See those docs for more information. Parameters ---------- host : str The hostname for HS2. For Impala, this can be any of the `impalad`s. port : int, optional The port number for HS2. The Impala default is 21050. The Hive port is likely different. database : str, optional The default database. If `None`, the result is implementation-dependent. timeout : int, optional Connection timeout in seconds. Default is no timeout. use_ssl : bool, optional Enable SSL. ca_cert : str, optional Local path to the the third-party CA certificate. If SSL is enabled but the certificate is not specified, the server certificate will not be validated. auth_mechanism : {'NOSASL', 'PLAIN', 'GSSAPI', 'LDAP'} Specify the authentication mechanism. `'NOSASL'` for unsecured Impala. `'PLAIN'` for unsecured Hive (because Hive requires the SASL transport). `'GSSAPI'` for Kerberos and `'LDAP'` for Kerberos with LDAP. user : str, optional LDAP user, if applicable. password : str, optional LDAP password, if applicable. kerberos_service_name : str, optional Authenticate to a particular `impalad` service principal. Uses `'impala'` by default. use_ldap : bool, optional Specify `auth_mechanism='LDAP'` instead. .. deprecated:: 0.11.0 ldap_user : str, optional Use `user` parameter instead. .. deprecated:: 0.11.0 ldap_password : str, optional Use `password` parameter instead. .. deprecated:: 0.11.0 use_kerberos : bool, optional Specify `auth_mechanism='GSSAPI'` instead. .. deprecated:: 0.11.0 protocol : str, optional Do not use. HiveServer2 is the only protocol currently supported. .. deprecated:: 0.11.0 Returns ------- HiveServer2Connection A `Connection` object (DB API 2.0-compliant). """ # pylint: disable=too-many-locals if use_kerberos is not None: warn_deprecate('use_kerberos', 'auth_mechanism="GSSAPI"') if use_kerberos: auth_mechanism = 'GSSAPI' if use_ldap is not None: warn_deprecate('use_ldap', 'auth_mechanism="LDAP"') if use_ldap: auth_mechanism = 'LDAP' if auth_mechanism: auth_mechanism = auth_mechanism.upper() else: auth_mechanism = 'NOSASL' if auth_mechanism not in AUTH_MECHANISMS: raise NotSupportedError( 'Unsupported authentication mechanism: {0}'.format(auth_mechanism)) if ldap_user is not None: warn_deprecate('ldap_user', 'user') user = ldap_user if ldap_password is not None: warn_deprecate('ldap_password', 'password') password = ldap_password if protocol is not None: if protocol.lower() == 'hiveserver2': warn_protocol_param() else: raise NotSupportedError( "'{0}' is not a supported protocol; only HiveServer2 is " "supported".format(protocol)) service = hs2.connect(host=host, port=port, timeout=timeout, use_ssl=use_ssl, ca_cert=ca_cert, user=user, password=password, kerberos_service_name=kerberos_service_name, auth_mechanism=auth_mechanism) return hs2.HiveServer2Connection(service, default_db=database) class _DBAPITypeObject(object): # Compliance with Type Objects of PEP 249. def __init__(self, *values): self.values = values def __cmp__(self, other): if other in self.values: return 0 else: return -1 def __eq__(self, other): # py3 ignores __cmp__ return other in self.values STRING = _DBAPITypeObject('STRING') BINARY = _DBAPITypeObject('BINARY') NUMBER = _DBAPITypeObject('BOOLEAN', 'TINYINT', 'SMALLINT', 'INT', 'BIGINT', 'FLOAT', 'DOUBLE', 'DECIMAL') DATETIME = _DBAPITypeObject('TIMESTAMP') ROWID = _DBAPITypeObject() Date = datetime.date Time = datetime.time Timestamp = datetime.datetime def DateFromTicks(ticks): return Date(*time.localtime(ticks)[:3]) def TimeFromTicks(ticks): return Time(*time.localtime(ticks)[3:6]) def TimestampFromTicks(ticks): return Timestamp(*time.localtime(ticks)[:6]) if six.PY3: buffer = memoryview Binary = buffer
wesm/impyla
impala/dbapi.py
Python
apache-2.0
6,149
# Copyright 2009 Shikhar Bhushan # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from errors import OperationError, TimeoutExpiredError, MissingCapabilityError from rpc import RPC, RPCReply, RPCError, RaiseMode # rfc4741 ops from retrieve import Get, GetConfig, GetReply from edit import EditConfig, CopyConfig, DeleteConfig, Validate, Commit, DiscardChanges from session import CloseSession, KillSession from lock import Lock, Unlock, LockContext # others... from flowmon import PoweroffMachine, RebootMachine __all__ = [ 'RPC', 'RPCReply', 'RPCError', 'RaiseMode', 'Get', 'GetConfig', 'GetReply', 'EditConfig', 'CopyConfig', 'Validate', 'Commit', 'DiscardChanges', 'DeleteConfig', 'Lock', 'Unlock', 'PoweroffMachine', 'RebootMachine', 'LockContext', 'CloseSession', 'KillSession', 'OperationError', 'TimeoutExpiredError', 'MissingCapabilityError' ]
mbaukes/ncclient
ncclient/operations/__init__.py
Python
apache-2.0
1,443
# Tests for Pmw color handling. import Tkinter import Test import Pmw Test.initialise() testData = () defaultPalette = Pmw.Color.getdefaultpalette(Test.root) c = Tkinter.Button colors = ('red', 'orange', 'yellow', 'green', 'blue', 'purple', 'white') normalcolors = map(Pmw.Color.changebrightness, (Test.root,) * len(colors), colors, (0.85,) * len(colors)) kw = {} tests = ( (Pmw.Color.setscheme, (Test.root, normalcolors[0]), {'foreground' : 'white'}), ) testData = testData + ((c, ((tests, kw),)),) for color in normalcolors[1:]: kw = {'text' : color} tests = ( (c.pack, ()), ('state', 'active'), ) testData = testData + ((c, ((tests, kw),)),) kw = {} tests = ( (Pmw.Color.setscheme, (Test.root, color), {'foreground' : 'red'}), ) testData = testData + ((c, ((tests, kw),)),) # Restore the default colors. kw = {} tests = ( (Pmw.Color.setscheme, (Test.root,), defaultPalette), ) testData = testData + ((c, ((tests, kw),)),) if __name__ == '__main__': Test.runTests(testData)
mhcrnl/PmwTkEx
src/Pmw/Pmw_1_3/tests/Colors_test.py
Python
apache-2.0
1,045
# Copyright (c) 2016-2021, NVIDIA CORPORATION. # SPDX-License-Identifier: Apache-2.0
swiftstack/container-crawler
test/unit/__init__.py
Python
apache-2.0
85
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('server', '0003_auto_20150612_1123'), ] operations = [ migrations.RemoveField( model_name='apikey', name='read_only', ), migrations.AlterField( model_name='condition', name='machine', field=models.ForeignKey(related_name='conditions', to='server.Machine'), ), migrations.AlterField( model_name='fact', name='machine', field=models.ForeignKey(related_name='facts', to='server.Machine'), ), migrations.AlterField( model_name='historicalfact', name='machine', field=models.ForeignKey(related_name='historical_facts', to='server.Machine'), ), migrations.AlterField( model_name='pendingappleupdate', name='machine', field=models.ForeignKey(related_name='pending_apple_updates', to='server.Machine'), ), migrations.AlterField( model_name='pendingupdate', name='machine', field=models.ForeignKey(related_name='pending_updates', to='server.Machine'), ), migrations.AlterField( model_name='plugin', name='name', field=models.CharField(unique=True, max_length=512), ), ]
chasetb/sal
server/migrations/0004_auto_20150623_1623.py
Python
apache-2.0
1,504
config = { "interfaces": { "google.devtools.clouderrorreporting.v1beta1.ReportErrorsService": { "retry_codes": { "idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"], "non_idempotent": [] }, "retry_params": { "default": { "initial_retry_delay_millis": 100, "retry_delay_multiplier": 1.3, "max_retry_delay_millis": 60000, "initial_rpc_timeout_millis": 20000, "rpc_timeout_multiplier": 1.0, "max_rpc_timeout_millis": 20000, "total_timeout_millis": 600000 } }, "methods": { "ReportErrorEvent": { "timeout_millis": 60000, "retry_codes_name": "non_idempotent", "retry_params_name": "default" } } } } }
jonparrott/google-cloud-python
error_reporting/google/cloud/errorreporting_v1beta1/gapic/report_errors_service_client_config.py
Python
apache-2.0
987
from distutils.core import setup setup( name="kafka-python", version="0.1-alpha", author="David Arthur", author_email="mumrah@gmail.com", url="https://github.com/mumrah/kafka-python", packages=["kafka"], license="Copyright 2012, David Arthur under Apache License, v2.0", description="Pure Python client for Apache Kafka", long_description=open("README.md").read(), )
enoex/kafka-python
setup.py
Python
apache-2.0
404
#!/usr/bin/python # # mallocstacks Trace malloc() calls in a process and print the full # stack trace for all callsites. # For Linux, uses BCC, eBPF. Embedded C. # # This script is a basic example of the new Linux 4.6+ BPF_STACK_TRACE # table API. # # Copyright 2016 GitHub, Inc. # Licensed under the Apache License, Version 2.0 (the "License") from __future__ import print_function from bcc import BPF from bcc.utils import printb from time import sleep import sys if len(sys.argv) < 2: print("USAGE: mallocstacks PID [NUM_STACKS=1024]") exit() pid = int(sys.argv[1]) if len(sys.argv) == 3: try: assert int(sys.argv[2]) > 0, "" except (ValueError, AssertionError) as e: print("USAGE: mallocstacks PID [NUM_STACKS=1024]") print("NUM_STACKS must be a non-zero, positive integer") exit() stacks = sys.argv[2] else: stacks = "1024" # load BPF program b = BPF(text=""" #include <uapi/linux/ptrace.h> BPF_HASH(calls, int); BPF_STACK_TRACE(stack_traces, """ + stacks + """); int alloc_enter(struct pt_regs *ctx, size_t size) { int key = stack_traces.get_stackid(ctx, BPF_F_USER_STACK); if (key < 0) return 0; // could also use `calls.increment(key, size);` u64 zero = 0, *val; val = calls.lookup_or_try_init(&key, &zero); if (val) { (*val) += size; } return 0; }; """) b.attach_uprobe(name="c", sym="malloc", fn_name="alloc_enter", pid=pid) print("Attaching to malloc in pid %d, Ctrl+C to quit." % pid) # sleep until Ctrl-C try: sleep(99999999) except KeyboardInterrupt: pass calls = b.get_table("calls") stack_traces = b.get_table("stack_traces") for k, v in reversed(sorted(calls.items(), key=lambda c: c[1].value)): print("%d bytes allocated at:" % v.value) if k.value > 0 : for addr in stack_traces.walk(k.value): printb(b"\t%s" % b.sym(addr, pid, show_offset=True))
tuxology/bcc
examples/tracing/mallocstacks.py
Python
apache-2.0
1,942
# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import datetime import uuid import mock import testscenarios from oslo.config import cfg from oslo.db import exception as db_exc from oslo.utils import importutils from oslo.utils import timeutils from sqlalchemy.orm import query from neutron.common import constants from neutron.common import topics from neutron import context as q_context from neutron.db import agents_db from neutron.db import common_db_mixin from neutron.db import db_base_plugin_v2 as db_v2 from neutron.db import l3_agentschedulers_db from neutron.db import l3_db from neutron.db import l3_dvrscheduler_db from neutron.db import l3_hamode_db from neutron.db import l3_hascheduler_db from neutron.extensions import l3agentscheduler as l3agent from neutron import manager from neutron.scheduler import l3_agent_scheduler from neutron.tests import base from neutron.tests.unit import test_db_plugin from neutron.tests.unit import test_l3_plugin from neutron.tests.unit import testlib_api from neutron.tests.unit import testlib_plugin # the below code is required for the following reason # (as documented in testscenarios) """Multiply tests depending on their 'scenarios' attribute. This can be assigned to 'load_tests' in any test module to make this automatically work across tests in the module. """ load_tests = testscenarios.load_tests_apply_scenarios HOST_DVR = 'my_l3_host_dvr' DVR_L3_AGENT = { 'binary': 'neutron-l3-agent', 'host': HOST_DVR, 'topic': topics.L3_AGENT, 'configurations': {'agent_mode': 'dvr'}, 'agent_type': constants.AGENT_TYPE_L3, 'start_flag': True } HOST_DVR_SNAT = 'my_l3_host_dvr_snat' DVR_SNAT_L3_AGENT = { 'binary': 'neutron-l3-agent', 'host': HOST_DVR_SNAT, 'topic': topics.L3_AGENT, 'configurations': {'agent_mode': 'dvr_snat'}, 'agent_type': constants.AGENT_TYPE_L3, 'start_flag': True } class FakeL3Scheduler(l3_agent_scheduler.L3Scheduler): def schedule(self): pass def _choose_router_agent(self): pass def _choose_router_agents_for_ha(self): pass class L3SchedulerBaseTestCase(base.BaseTestCase): def setUp(self): super(L3SchedulerBaseTestCase, self).setUp() self.scheduler = FakeL3Scheduler() self.plugin = mock.Mock() def test_auto_schedule_routers(self): self.plugin.get_enabled_agent_on_host.return_value = [mock.ANY] with contextlib.nested( mock.patch.object(self.scheduler, 'get_routers_to_schedule'), mock.patch.object(self.scheduler, 'get_routers_can_schedule')) as ( gs, gr): result = self.scheduler.auto_schedule_routers( self.plugin, mock.ANY, mock.ANY, mock.ANY) self.assertTrue(self.plugin.get_enabled_agent_on_host.called) self.assertTrue(result) self.assertTrue(gs.called) self.assertTrue(gr.called) def test_auto_schedule_routers_no_agents(self): self.plugin.get_enabled_agent_on_host.return_value = None result = self.scheduler.auto_schedule_routers( self.plugin, mock.ANY, mock.ANY, mock.ANY) self.assertTrue(self.plugin.get_enabled_agent_on_host.called) self.assertFalse(result) def test_auto_schedule_routers_no_unscheduled_routers(self): type(self.plugin).supported_extension_aliases = ( mock.PropertyMock(return_value=[])) with mock.patch.object(self.scheduler, 'get_routers_to_schedule') as mock_routers: mock_routers.return_value = [] result = self.scheduler.auto_schedule_routers( self.plugin, mock.ANY, mock.ANY, mock.ANY) self.assertTrue(self.plugin.get_enabled_agent_on_host.called) self.assertFalse(result) def test_auto_schedule_routers_no_target_routers(self): self.plugin.get_enabled_agent_on_host.return_value = [mock.ANY] with contextlib.nested( mock.patch.object(self.scheduler, 'get_routers_to_schedule'), mock.patch.object(self.scheduler, 'get_routers_can_schedule')) as ( mock_unscheduled_routers, mock_target_routers): mock_unscheduled_routers.return_value = mock.ANY mock_target_routers.return_value = None result = self.scheduler.auto_schedule_routers( self.plugin, mock.ANY, mock.ANY, mock.ANY) self.assertTrue(self.plugin.get_enabled_agent_on_host.called) self.assertFalse(result) def test_get_routers_to_schedule_with_router_ids(self): router_ids = ['foo_router_1', 'foo_router_2'] expected_routers = [ {'id': 'foo_router1'}, {'id': 'foo_router_2'} ] self.plugin.get_routers.return_value = expected_routers with mock.patch.object(self.scheduler, 'filter_unscheduled_routers') as mock_filter: mock_filter.return_value = expected_routers unscheduled_routers = self.scheduler.get_routers_to_schedule( mock.ANY, self.plugin, router_ids) mock_filter.assert_called_once_with( mock.ANY, self.plugin, expected_routers) self.assertEqual(expected_routers, unscheduled_routers) def test_get_routers_to_schedule_without_router_ids(self): expected_routers = [ {'id': 'foo_router1'}, {'id': 'foo_router_2'} ] with mock.patch.object(self.scheduler, 'get_unscheduled_routers') as mock_get: mock_get.return_value = expected_routers unscheduled_routers = self.scheduler.get_routers_to_schedule( mock.ANY, self.plugin) mock_get.assert_called_once_with(mock.ANY, self.plugin) self.assertEqual(expected_routers, unscheduled_routers) def test_get_routers_to_schedule_exclude_distributed(self): routers = [ {'id': 'foo_router1', 'distributed': True}, {'id': 'foo_router_2'} ] expected_routers = [{'id': 'foo_router_2'}] with mock.patch.object(self.scheduler, 'get_unscheduled_routers') as mock_get: mock_get.return_value = routers unscheduled_routers = self.scheduler.get_routers_to_schedule( mock.ANY, self.plugin, router_ids=None, exclude_distributed=True) mock_get.assert_called_once_with(mock.ANY, self.plugin) self.assertEqual(expected_routers, unscheduled_routers) def _test_get_routers_can_schedule(self, routers, agent, target_routers): self.plugin.get_l3_agent_candidates.return_value = agent result = self.scheduler.get_routers_can_schedule( mock.ANY, self.plugin, routers, mock.ANY) self.assertEqual(target_routers, result) def _test_filter_unscheduled_routers(self, routers, agents, expected): self.plugin.get_l3_agents_hosting_routers.return_value = agents unscheduled_routers = self.scheduler.filter_unscheduled_routers( mock.ANY, self.plugin, routers) self.assertEqual(expected, unscheduled_routers) def test_filter_unscheduled_routers_already_scheduled(self): self._test_filter_unscheduled_routers( [{'id': 'foo_router1'}, {'id': 'foo_router_2'}], [{'id': 'foo_agent_id'}], []) def test_filter_unscheduled_routers_non_scheduled(self): self._test_filter_unscheduled_routers( [{'id': 'foo_router1'}, {'id': 'foo_router_2'}], None, [{'id': 'foo_router1'}, {'id': 'foo_router_2'}]) def test_get_routers_can_schedule_with_compat_agent(self): routers = [{'id': 'foo_router'}] self._test_get_routers_can_schedule(routers, mock.ANY, routers) def test_get_routers_can_schedule_with_no_compat_agent(self): routers = [{'id': 'foo_router'}] self._test_get_routers_can_schedule(routers, None, []) def test_bind_routers_centralized(self): routers = [{'id': 'foo_router'}] with mock.patch.object(self.scheduler, 'bind_router') as mock_bind: self.scheduler.bind_routers(mock.ANY, mock.ANY, routers, mock.ANY) mock_bind.assert_called_once_with(mock.ANY, 'foo_router', mock.ANY) def _test_bind_routers_ha(self, has_binding): routers = [{'id': 'foo_router', 'ha': True, 'tenant_id': '42'}] agent = agents_db.Agent(id='foo_agent') with contextlib.nested( mock.patch.object(self.scheduler, 'router_has_binding', return_value=has_binding), mock.patch.object(self.scheduler, 'create_ha_router_binding')) as ( mock_has_binding, mock_bind): self.scheduler.bind_routers(mock.ANY, mock.ANY, routers, agent) mock_has_binding.assert_called_once_with(mock.ANY, 'foo_router', 'foo_agent') self.assertEqual(not has_binding, mock_bind.called) def test_bind_routers_ha_has_binding(self): self._test_bind_routers_ha(has_binding=True) def test_bind_routers_ha_no_binding(self): self._test_bind_routers_ha(has_binding=False) class L3SchedulerBaseMixin(object): def _register_l3_agent(self, host, agent_mode='legacy', plugin=None): if not plugin: plugin = self.plugin agent = { 'binary': 'neutron-l3-agent', 'host': host, 'topic': topics.L3_AGENT, 'configurations': {'agent_mode': agent_mode}, 'agent_type': constants.AGENT_TYPE_L3, 'start_flag': True } callback = agents_db.AgentExtRpcCallback() callback.report_state(self.adminContext, agent_state={'agent_state': agent}, time=timeutils.strtime()) agent_db = plugin.get_agents_db(self.adminContext, filters={'host': [agent['host']]}) return agent_db[0] def _register_l3_agents(self, plugin=None): self.agent1 = self._register_l3_agent('host_1', plugin=plugin) self.agent_id1 = self.agent1.id self.agent2 = self._register_l3_agent('host_2', plugin=plugin) self.agent_id2 = self.agent2.id def _register_l3_dvr_agents(self): callback = agents_db.AgentExtRpcCallback() callback.report_state(self.adminContext, agent_state={'agent_state': DVR_L3_AGENT}, time=timeutils.strtime()) agent_db = self.plugin.get_agents_db(self.adminContext, filters={'host': [HOST_DVR]}) self.l3_dvr_agent = agent_db[0] self.l3_dvr_agent_id = agent_db[0].id callback.report_state(self.adminContext, agent_state={'agent_state': DVR_SNAT_L3_AGENT}, time=timeutils.strtime()) agent_db = self.plugin.get_agents_db(self.adminContext, filters={'host': [HOST_DVR_SNAT]}) self.l3_dvr_snat_id = agent_db[0].id self.l3_dvr_snat_agent = agent_db[0] def _set_l3_agent_admin_state(self, context, agent_id, state=True): update = {'agent': {'admin_state_up': state}} self.plugin.update_agent(context, agent_id, update) def _set_l3_agent_dead(self, agent_id): update = { 'agent': { 'heartbeat_timestamp': timeutils.utcnow() - datetime.timedelta(hours=1)}} self.plugin.update_agent(self.adminContext, agent_id, update) @contextlib.contextmanager def router_with_ext_gw(self, name='router1', admin_state_up=True, fmt=None, tenant_id=str(uuid.uuid4()), external_gateway_info=None, subnet=None, set_context=False, **kwargs): router = self._make_router(fmt or self.fmt, tenant_id, name, admin_state_up, external_gateway_info, set_context, **kwargs) self._add_external_gateway_to_router( router['router']['id'], subnet['subnet']['network_id']) yield router self._remove_external_gateway_from_router( router['router']['id'], subnet['subnet']['network_id']) self._delete('routers', router['router']['id']) class L3SchedulerTestBaseMixin(object): def _test_add_router_to_l3_agent(self, distributed=False, already_scheduled=False, external_gw=None): agent_id = self.agent_id1 agent = self.agent1 if distributed: self._register_l3_dvr_agents() agent_id = self.l3_dvr_snat_id agent = self.l3_dvr_snat_agent router = self._make_router(self.fmt, tenant_id=str(uuid.uuid4()), name='r1') router['router']['distributed'] = distributed router['router']['external_gateway_info'] = external_gw if already_scheduled: self._test_schedule_bind_router(agent, router) with contextlib.nested( mock.patch.object(self, "validate_agent_router_combination"), mock.patch.object(self, "create_router_to_agent_binding"), mock.patch('neutron.db.l3_db.L3_NAT_db_mixin.get_router', return_value=router['router']) ) as (valid, auto_s, gr): self.add_router_to_l3_agent(self.adminContext, agent_id, router['router']['id']) self.assertNotEqual(already_scheduled, auto_s.called) def _create_router_for_l3_agent_dvr_test(self, distributed=False, external_gw=None): router = self._make_router(self.fmt, tenant_id=str(uuid.uuid4()), name='r1') router['router']['distributed'] = distributed router['router']['external_gateway_info'] = external_gw return router def _prepare_l3_agent_dvr_move_exceptions(self, distributed=False, external_gw=None, agent_id=None, expected_exception=None): router = self._create_router_for_l3_agent_dvr_test( distributed=distributed, external_gw=external_gw) with contextlib.nested( mock.patch.object(self, "create_router_to_agent_binding"), mock.patch('neutron.db.l3_db.L3_NAT_db_mixin.get_router', return_value=router['router'])): self.assertRaises(expected_exception, self.add_router_to_l3_agent, self.adminContext, agent_id, router['router']['id']) def test_add_router_to_l3_agent_mismatch_error_dvr_to_legacy(self): self._register_l3_agents() self._prepare_l3_agent_dvr_move_exceptions( distributed=True, agent_id=self.agent_id1, expected_exception=l3agent.RouterL3AgentMismatch) def test_add_router_to_l3_agent_mismatch_error_legacy_to_dvr(self): self._register_l3_dvr_agents() self._prepare_l3_agent_dvr_move_exceptions( agent_id=self.l3_dvr_agent_id, expected_exception=l3agent.RouterL3AgentMismatch) def test_add_router_to_l3_agent_mismatch_error_dvr_to_dvr(self): self._register_l3_dvr_agents() self._prepare_l3_agent_dvr_move_exceptions( distributed=True, agent_id=self.l3_dvr_agent_id, expected_exception=l3agent.DVRL3CannotAssignToDvrAgent) def test_add_router_to_l3_agent_dvr_to_snat(self): external_gw_info = { "network_id": str(uuid.uuid4()), "enable_snat": True } self._register_l3_dvr_agents() agent_id = self.l3_dvr_snat_id agent = self.l3_dvr_snat_agent router = self._create_router_for_l3_agent_dvr_test( distributed=True, external_gw=external_gw_info) with contextlib.nested( mock.patch.object(self, "validate_agent_router_combination"), mock.patch.object(self, "create_router_to_agent_binding"), mock.patch('neutron.db.l3_db.L3_NAT_db_mixin.get_router', return_value=router['router']) ) as (valid_agent_rtr, rtr_agent_binding, get_rtr): self.add_router_to_l3_agent(self.adminContext, agent_id, router['router']['id']) rtr_agent_binding.assert_called_once_with( self.adminContext, agent, router['router']) def test_add_router_to_l3_agent(self): self._test_add_router_to_l3_agent() def test_add_distributed_router_to_l3_agent(self): external_gw_info = { "network_id": str(uuid.uuid4()), "enable_snat": True } self._test_add_router_to_l3_agent(distributed=True, external_gw=external_gw_info) def test_add_router_to_l3_agent_already_scheduled(self): self._test_add_router_to_l3_agent(already_scheduled=True) def test_add_distributed_router_to_l3_agent_already_scheduled(self): external_gw_info = { "network_id": str(uuid.uuid4()), "enable_snat": True } self._test_add_router_to_l3_agent(distributed=True, already_scheduled=True, external_gw=external_gw_info) def _prepare_schedule_dvr_tests(self): scheduler = l3_agent_scheduler.ChanceScheduler() agent = agents_db.Agent() agent.admin_state_up = True agent.heartbeat_timestamp = timeutils.utcnow() plugin = mock.Mock() plugin.get_l3_agents_hosting_routers.return_value = [] plugin.get_l3_agents.return_value = [agent] plugin.get_l3_agent_candidates.return_value = [agent] return scheduler, agent, plugin def test_schedule_dvr_router_without_snatbinding_and_no_gw(self): scheduler, agent, plugin = self._prepare_schedule_dvr_tests() sync_router = { 'id': 'foo_router_id', 'distributed': True } plugin.get_router.return_value = sync_router with contextlib.nested( mock.patch.object(scheduler, 'bind_router'), mock.patch.object( plugin, 'get_snat_bindings', return_value=False) ): scheduler._schedule_router( plugin, self.adminContext, 'foo_router_id', None) expected_calls = [ mock.call.get_router(mock.ANY, 'foo_router_id'), mock.call.get_l3_agents_hosting_routers( mock.ANY, ['foo_router_id'], admin_state_up=True), mock.call.get_l3_agents(mock.ANY, active=True), mock.call.get_l3_agent_candidates(mock.ANY, sync_router, [agent]), ] plugin.assert_has_calls(expected_calls) def test_schedule_dvr_router_with_snatbinding_no_gw(self): scheduler, agent, plugin = self._prepare_schedule_dvr_tests() sync_router = {'id': 'foo_router_id', 'distributed': True} plugin.get_router.return_value = sync_router with contextlib.nested( mock.patch.object(scheduler, 'bind_router'), mock.patch.object(plugin, 'get_snat_bindings', return_value=True)): scheduler._schedule_router( plugin, self.adminContext, 'foo_router_id', None) expected_calls = [ mock.call.get_router(mock.ANY, 'foo_router_id'), mock.call.unbind_snat_servicenode(mock.ANY, 'foo_router_id'), mock.call.get_l3_agents_hosting_routers( mock.ANY, ['foo_router_id'], admin_state_up=True), mock.call.get_l3_agents(mock.ANY, active=True), mock.call.get_l3_agent_candidates(mock.ANY, sync_router, [agent]), ] plugin.assert_has_calls(expected_calls) def test_schedule_router_distributed(self): scheduler, agent, plugin = self._prepare_schedule_dvr_tests() sync_router = { 'id': 'foo_router_id', 'distributed': True, 'external_gateway_info': { 'network_id': str(uuid.uuid4()), 'enable_snat': True } } plugin.get_router.return_value = sync_router with contextlib.nested( mock.patch.object(scheduler, 'bind_router'), mock.patch.object( plugin, 'get_snat_bindings', return_value=False) ): scheduler._schedule_router( plugin, self.adminContext, 'foo_router_id', None) expected_calls = [ mock.call.get_router(mock.ANY, 'foo_router_id'), mock.call.schedule_snat_router( mock.ANY, 'foo_router_id', sync_router), mock.call.get_l3_agents_hosting_routers( mock.ANY, ['foo_router_id'], admin_state_up=True), mock.call.get_l3_agents(mock.ANY, active=True), mock.call.get_l3_agent_candidates(mock.ANY, sync_router, [agent]), ] plugin.assert_has_calls(expected_calls) def _test_schedule_bind_router(self, agent, router): ctx = self.adminContext session = ctx.session db = l3_agentschedulers_db.RouterL3AgentBinding scheduler = l3_agent_scheduler.ChanceScheduler() rid = router['router']['id'] scheduler.bind_router(ctx, rid, agent) results = (session.query(db).filter_by(router_id=rid).all()) self.assertTrue(len(results) > 0) self.assertIn(agent.id, [bind.l3_agent_id for bind in results]) def test_bind_new_router(self): router = self._make_router(self.fmt, tenant_id=str(uuid.uuid4()), name='r1') with mock.patch.object(l3_agent_scheduler.LOG, 'debug') as flog: self._test_schedule_bind_router(self.agent1, router) self.assertEqual(1, flog.call_count) args, kwargs = flog.call_args self.assertIn('is scheduled', args[0]) def test_bind_absent_router(self): scheduler = l3_agent_scheduler.ChanceScheduler() # checking that bind_router() is not throwing # when supplied with router_id of non-existing router scheduler.bind_router(self.adminContext, "dummyID", self.agent1) def test_bind_existing_router(self): router = self._make_router(self.fmt, tenant_id=str(uuid.uuid4()), name='r2') self._test_schedule_bind_router(self.agent1, router) with mock.patch.object(l3_agent_scheduler.LOG, 'debug') as flog: self._test_schedule_bind_router(self.agent1, router) self.assertEqual(1, flog.call_count) args, kwargs = flog.call_args self.assertIn('has already been scheduled', args[0]) def _check_get_l3_agent_candidates( self, router, agent_list, exp_host, count=1): candidates = self.get_l3_agent_candidates(self.adminContext, router, agent_list) self.assertEqual(len(candidates), count) if count: self.assertEqual(candidates[0]['host'], exp_host) def test_get_l3_agent_candidates_legacy(self): self._register_l3_dvr_agents() router = self._make_router(self.fmt, tenant_id=str(uuid.uuid4()), name='r2') router['external_gateway_info'] = None router['id'] = str(uuid.uuid4()) agent_list = [self.agent1, self.l3_dvr_agent] # test legacy agent_mode case: only legacy agent should be candidate router['distributed'] = False exp_host = 'host_1' self._check_get_l3_agent_candidates(router, agent_list, exp_host) def test_get_l3_agent_candidates_dvr(self): self._register_l3_dvr_agents() router = self._make_router(self.fmt, tenant_id=str(uuid.uuid4()), name='r2') router['external_gateway_info'] = None router['id'] = str(uuid.uuid4()) agent_list = [self.agent1, self.l3_dvr_agent] # test dvr agent_mode case only dvr agent should be candidate router['distributed'] = True exp_host = DVR_L3_AGENT.get('host') self.check_ports_exist_on_l3agent = mock.Mock(return_value=True) self._check_get_l3_agent_candidates(router, agent_list, exp_host) def test_get_l3_agent_candidates_dvr_no_vms(self): self._register_l3_dvr_agents() router = self._make_router(self.fmt, tenant_id=str(uuid.uuid4()), name='r2') router['external_gateway_info'] = None router['id'] = str(uuid.uuid4()) agent_list = [self.agent1, self.l3_dvr_agent] exp_host = DVR_L3_AGENT.get('host') router['distributed'] = True # Test no VMs present case self.check_ports_exist_on_l3agent = mock.Mock(return_value=False) self._check_get_l3_agent_candidates( router, agent_list, exp_host, count=0) def test_get_l3_agent_candidates_dvr_snat(self): self._register_l3_dvr_agents() router = self._make_router(self.fmt, tenant_id=str(uuid.uuid4()), name='r2') router['external_gateway_info'] = None router['id'] = str(uuid.uuid4()) router['distributed'] = True agent_list = [self.l3_dvr_snat_agent] exp_host = DVR_SNAT_L3_AGENT.get('host') self.check_ports_exist_on_l3agent = mock.Mock(return_value=True) self._check_get_l3_agent_candidates(router, agent_list, exp_host) def test_get_l3_agent_candidates_dvr_snat_no_vms(self): self._register_l3_dvr_agents() router = self._make_router(self.fmt, tenant_id=str(uuid.uuid4()), name='r2') router['external_gateway_info'] = None router['id'] = str(uuid.uuid4()) router['distributed'] = True agent_list = [self.l3_dvr_snat_agent] exp_host = DVR_SNAT_L3_AGENT.get('host') self.check_ports_exist_on_l3agent = mock.Mock(return_value=False) # Test no VMs present case self.check_ports_exist_on_l3agent.return_value = False self._check_get_l3_agent_candidates( router, agent_list, exp_host, count=0) def test_get_l3_agent_candidates_centralized(self): self._register_l3_dvr_agents() router = self._make_router(self.fmt, tenant_id=str(uuid.uuid4()), name='r2') router['external_gateway_info'] = None router['id'] = str(uuid.uuid4()) # check centralized test case router['distributed'] = False exp_host = DVR_SNAT_L3_AGENT.get('host') agent_list = [self.l3_dvr_snat_agent] self._check_get_l3_agent_candidates(router, agent_list, exp_host) def _prepare_check_ports_exist_tests(self): l3_agent = agents_db.Agent() l3_agent.admin_state_up = True l3_agent.host = 'host_1' router = self._make_router(self.fmt, tenant_id=str(uuid.uuid4()), name='r2') router['external_gateway_info'] = None router['id'] = str(uuid.uuid4()) self.plugin.get_ports = mock.Mock(return_value=[]) self.get_subnet_ids_on_router = mock.Mock(return_value=[]) return l3_agent, router def test_check_ports_exist_on_l3agent_no_subnets(self): l3_agent, router = self._prepare_check_ports_exist_tests() with mock.patch.object(manager.NeutronManager, 'get_plugin') as getp: getp.return_value = self.plugin # no subnets val = self.check_ports_exist_on_l3agent(self.adminContext, l3_agent, router['id']) self.assertFalse(val) def test_check_ports_exist_on_l3agent_no_subnet_match(self): l3_agent, router = self._prepare_check_ports_exist_tests() with mock.patch.object(manager.NeutronManager, 'get_plugin') as getp: getp.return_value = self.plugin # no matching subnet self.get_subnet_ids_on_router.return_value = [str(uuid.uuid4())] val = self.check_ports_exist_on_l3agent(self.adminContext, l3_agent, router['id']) self.assertFalse(val) def test_check_ports_exist_on_l3agent_subnet_match(self): l3_agent, router = self._prepare_check_ports_exist_tests() with mock.patch.object(manager.NeutronManager, 'get_plugin') as getp: getp.return_value = self.plugin # matching subnet port = {'subnet_id': str(uuid.uuid4()), 'binding:host_id': 'host_1', 'device_owner': 'compute:', 'id': 1234} self.plugin.get_ports.return_value = [port] self.plugin.get_subnet_ids_on_router = mock.Mock( return_value=[port['subnet_id']]) val = self.check_ports_exist_on_l3agent(self.adminContext, l3_agent, router['id']) self.assertTrue(val) class L3SchedulerTestCase(l3_agentschedulers_db.L3AgentSchedulerDbMixin, l3_db.L3_NAT_db_mixin, common_db_mixin.CommonDbMixin, test_db_plugin.NeutronDbPluginV2TestCase, test_l3_plugin.L3NatTestCaseMixin, L3SchedulerBaseMixin, L3SchedulerTestBaseMixin): def setUp(self): self.mock_rescheduling = False ext_mgr = test_l3_plugin.L3TestExtensionManager() plugin_str = ('neutron.tests.unit.test_l3_plugin.' 'TestL3NatIntAgentSchedulingPlugin') super(L3SchedulerTestCase, self).setUp(plugin=plugin_str, ext_mgr=ext_mgr) self.adminContext = q_context.get_admin_context() self.plugin = manager.NeutronManager.get_plugin() self.plugin.router_scheduler = importutils.import_object( 'neutron.scheduler.l3_agent_scheduler.ChanceScheduler' ) self._register_l3_agents() class L3AgentChanceSchedulerTestCase(L3SchedulerTestCase): def test_random_scheduling(self): random_patch = mock.patch('random.choice') random_mock = random_patch.start() def side_effect(seq): return seq[0] random_mock.side_effect = side_effect with self.subnet() as subnet: self._set_net_external(subnet['subnet']['network_id']) with self.router_with_ext_gw(name='r1', subnet=subnet) as r1: agents = self.get_l3_agents_hosting_routers( self.adminContext, [r1['router']['id']], admin_state_up=True) self.assertEqual(len(agents), 1) self.assertEqual(random_mock.call_count, 1) with self.router_with_ext_gw(name='r2', subnet=subnet) as r2: agents = self.get_l3_agents_hosting_routers( self.adminContext, [r2['router']['id']], admin_state_up=True) self.assertEqual(len(agents), 1) self.assertEqual(random_mock.call_count, 2) random_patch.stop() def test_scheduler_auto_schedule_when_agent_added(self): self._set_l3_agent_admin_state(self.adminContext, self.agent_id1, False) self._set_l3_agent_admin_state(self.adminContext, self.agent_id2, False) with self.subnet() as subnet: self._set_net_external(subnet['subnet']['network_id']) with self.router_with_ext_gw(name='r1', subnet=subnet) as r1: agents = self.get_l3_agents_hosting_routers( self.adminContext, [r1['router']['id']], admin_state_up=True) self.assertEqual(0, len(agents)) self._set_l3_agent_admin_state(self.adminContext, self.agent_id1, True) self.plugin.auto_schedule_routers(self.adminContext, 'host_1', [r1['router']['id']]) agents = self.get_l3_agents_hosting_routers( self.adminContext, [r1['router']['id']], admin_state_up=True) self.assertEqual('host_1', agents[0]['host']) class L3AgentLeastRoutersSchedulerTestCase(L3SchedulerTestCase): def setUp(self): super(L3AgentLeastRoutersSchedulerTestCase, self).setUp() self.plugin.router_scheduler = importutils.import_object( 'neutron.scheduler.l3_agent_scheduler.LeastRoutersScheduler' ) def test_scheduler(self): # disable one agent to force the scheduling to the only one. self._set_l3_agent_admin_state(self.adminContext, self.agent_id2, False) with self.subnet() as subnet: self._set_net_external(subnet['subnet']['network_id']) with self.router_with_ext_gw(name='r1', subnet=subnet) as r1: agents = self.get_l3_agents_hosting_routers( self.adminContext, [r1['router']['id']], admin_state_up=True) self.assertEqual(len(agents), 1) agent_id1 = agents[0]['id'] with self.router_with_ext_gw(name='r2', subnet=subnet) as r2: agents = self.get_l3_agents_hosting_routers( self.adminContext, [r2['router']['id']], admin_state_up=True) self.assertEqual(len(agents), 1) agent_id2 = agents[0]['id'] self.assertEqual(agent_id1, agent_id2) # re-enable the second agent to see whether the next router # spawned will be on this one. self._set_l3_agent_admin_state(self.adminContext, self.agent_id2, True) with self.router_with_ext_gw(name='r3', subnet=subnet) as r3: agents = self.get_l3_agents_hosting_routers( self.adminContext, [r3['router']['id']], admin_state_up=True) self.assertEqual(len(agents), 1) agent_id3 = agents[0]['id'] self.assertNotEqual(agent_id1, agent_id3) class L3DvrScheduler(l3_db.L3_NAT_db_mixin, l3_dvrscheduler_db.L3_DVRsch_db_mixin): pass class L3DvrSchedulerTestCase(testlib_api.SqlTestCase, testlib_plugin.PluginSetupHelper): def setUp(self): plugin = 'neutron.plugins.ml2.plugin.Ml2Plugin' self.setup_coreplugin(plugin) super(L3DvrSchedulerTestCase, self).setUp() self.adminContext = q_context.get_admin_context() self.dut = L3DvrScheduler() def test_dvr_update_router_addvm(self): port = { 'device_id': 'abcd', 'device_owner': 'compute:nova', 'fixed_ips': [ { 'subnet_id': '80947d4a-fbc8-484b-9f92-623a6bfcf3e0', 'ip_address': '10.10.10.3' } ] } dvr_port = { 'id': 'dvr_port1', 'device_id': 'r1', 'device_owner': 'network:router_interface_distributed', 'fixed_ips': [ { 'subnet_id': '80947d4a-fbc8-484b-9f92-623a6bfcf3e0', 'ip_address': '10.10.10.1' } ] } r1 = { 'id': 'r1', 'distributed': True, } with contextlib.nested( mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2' '.get_ports', return_value=[dvr_port]), mock.patch('neutron.manager.NeutronManager.get_service_plugins', return_value=mock.Mock()), mock.patch('neutron.db.l3_db.L3_NAT_db_mixin.get_router', return_value=r1), mock.patch('neutron.api.rpc.agentnotifiers.l3_rpc_agent_api' '.L3AgentNotifyAPI')): self.dut.dvr_update_router_addvm(self.adminContext, port) def test_get_dvr_routers_by_portid(self): dvr_port = { 'id': 'dvr_port1', 'device_id': 'r1', 'device_owner': 'network:router_interface_distributed', 'fixed_ips': [ { 'subnet_id': '80947d4a-fbc8-484b-9f92-623a6bfcf3e0', 'ip_address': '10.10.10.1' } ] } r1 = { 'id': 'r1', 'distributed': True, } with contextlib.nested( mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2' '.get_port', return_value=dvr_port), mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2' '.get_ports', return_value=[dvr_port])): router_id = self.dut.get_dvr_routers_by_portid(self.adminContext, dvr_port['id']) self.assertEqual(router_id.pop(), r1['id']) def test_get_subnet_ids_on_router(self): dvr_port = { 'id': 'dvr_port1', 'device_id': 'r1', 'device_owner': 'network:router_interface_distributed', 'fixed_ips': [ { 'subnet_id': '80947d4a-fbc8-484b-9f92-623a6bfcf3e0', 'ip_address': '10.10.10.1' } ] } r1 = { 'id': 'r1', 'distributed': True, } with contextlib.nested( mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2' '.get_ports', return_value=[dvr_port])): sub_ids = self.dut.get_subnet_ids_on_router(self.adminContext, r1['id']) self.assertEqual(sub_ids.pop(), dvr_port.get('fixed_ips').pop(0).get('subnet_id')) def test_check_ports_active_on_host_and_subnet(self): dvr_port = { 'id': 'dvr_port1', 'device_id': 'r1', 'status': 'ACTIVE', 'binding:host_id': 'thisHost', 'device_owner': 'compute:nova', 'fixed_ips': [ { 'subnet_id': '80947d4a-fbc8-484b-9f92-623a6bfcf3e0', 'ip_address': '10.10.10.1' } ] } r1 = { 'id': 'r1', 'distributed': True, } with contextlib.nested( mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2' '.get_ports', return_value=[dvr_port]), mock.patch('neutron.manager.NeutronManager.get_service_plugins', return_value=mock.Mock()), mock.patch('neutron.db.l3_db.L3_NAT_db_mixin.get_router', return_value=r1), mock.patch('neutron.api.rpc.agentnotifiers.l3_rpc_agent_api' '.L3AgentNotifyAPI')): sub_ids = self.dut.get_subnet_ids_on_router(self.adminContext, r1['id']) result = self.dut.check_ports_active_on_host_and_subnet( self.adminContext, 'thisHost', 'dvr_port1', sub_ids) self.assertFalse(result) def _test_dvr_serviced_port_exists_on_subnet(self, port): with mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2.' 'get_ports', return_value=[port]): result = self.dut.check_ports_active_on_host_and_subnet( self.adminContext, 'thisHost', 'dvr1-intf-id', 'my-subnet-id') self.assertTrue(result) def test_dvr_serviced_vip_port_exists_on_subnet(self): vip_port = { 'id': 'lbaas-vip-port1', 'device_id': 'vip-pool-id', 'status': 'ACTIVE', 'binding:host_id': 'thisHost', 'device_owner': constants.DEVICE_OWNER_LOADBALANCER, 'fixed_ips': [ { 'subnet_id': 'my-subnet-id', 'ip_address': '10.10.10.1' } ] } self._test_dvr_serviced_port_exists_on_subnet(port=vip_port) def test_dvr_serviced_dhcp_port_exists_on_subnet(self): dhcp_port = { 'id': 'dhcp-port1', 'device_id': 'dhcp-net-id', 'status': 'ACTIVE', 'binding:host_id': 'thisHost', 'device_owner': constants.DEVICE_OWNER_DHCP, 'fixed_ips': [ { 'subnet_id': 'my-subnet-id', 'ip_address': '10.10.10.2' } ] } self._test_dvr_serviced_port_exists_on_subnet(port=dhcp_port) def _prepare_schedule_snat_tests(self): agent = agents_db.Agent() agent.admin_state_up = True agent.heartbeat_timestamp = timeutils.utcnow() router = { 'id': 'foo_router_id', 'distributed': True, 'external_gateway_info': { 'network_id': str(uuid.uuid4()), 'enable_snat': True } } return agent, router def test_schedule_snat_router_duplicate_entry(self): self._prepare_schedule_snat_tests() with contextlib.nested( mock.patch.object(self.dut, 'get_l3_agents'), mock.patch.object(self.dut, 'get_snat_candidates'), mock.patch.object(self.dut, 'bind_snat_servicenode', side_effect=db_exc.DBDuplicateEntry()), mock.patch.object(self.dut, 'bind_dvr_router_servicenode') ) as (mock_gl3, mock_snat_canidates, mock_bind_snat, mock_bind_dvr): self.dut.schedule_snat_router(self.adminContext, 'foo', 'bar') self.assertTrue(mock_bind_snat.called) self.assertFalse(mock_bind_dvr.called) def test_schedule_router_unbind_snat_servicenode_negativetest(self): router = { 'id': 'foo_router_id', 'distributed': True } with contextlib.nested( mock.patch.object(self.dut, 'get_router'), mock.patch.object(self.dut, 'get_snat_bindings'), mock.patch.object(self.dut, 'unbind_snat_servicenode') ) as (mock_rd, mock_snat_bind, mock_unbind): mock_rd.return_value = router mock_snat_bind.return_value = False self.dut.schedule_snat_router( self.adminContext, 'foo_router_id', router) self.assertFalse(mock_unbind.called) def test_schedule_snat_router_with_snat_candidates(self): agent, router = self._prepare_schedule_snat_tests() with contextlib.nested( mock.patch.object(query.Query, 'first'), mock.patch.object(self.dut, 'get_l3_agents'), mock.patch.object(self.dut, 'get_snat_candidates'), mock.patch.object(self.dut, 'get_router'), mock.patch.object(self.dut, 'bind_dvr_router_servicenode'), mock.patch.object(self.dut, 'bind_snat_servicenode')) as ( mock_query, mock_agents, mock_candidates, mock_rd, mock_dvr, mock_bind): mock_rd.return_value = router mock_query.return_value = [] mock_agents.return_value = [agent] mock_candidates.return_value = [agent] self.dut.schedule_snat_router( self.adminContext, 'foo_router_id', mock.ANY) mock_bind.assert_called_once_with( self.adminContext, 'foo_router_id', [agent]) def test_unbind_snat_servicenode(self): router_id = 'foo_router_id' core_plugin = mock.PropertyMock() type(self.dut)._core_plugin = core_plugin (self.dut._core_plugin.get_ports_on_host_by_subnet. return_value) = [] core_plugin.reset_mock() l3_notifier = mock.PropertyMock() type(self.dut).l3_rpc_notifier = l3_notifier binding = l3_dvrscheduler_db.CentralizedSnatL3AgentBinding( router_id=router_id, l3_agent_id='foo_l3_agent_id', l3_agent=agents_db.Agent()) with contextlib.nested( mock.patch.object(query.Query, 'one'), mock.patch.object(self.adminContext.session, 'delete'), mock.patch.object(query.Query, 'delete'), mock.patch.object(self.dut, 'get_subnet_ids_on_router')) as ( mock_query, mock_session, mock_delete, mock_get_subnets): mock_query.return_value = binding mock_get_subnets.return_value = ['foo_subnet_id'] self.dut.unbind_snat_servicenode(self.adminContext, router_id) mock_get_subnets.assert_called_with(self.adminContext, router_id) self.assertTrue(mock_session.call_count) self.assertTrue(mock_delete.call_count) core_plugin.assert_called_once_with() l3_notifier.assert_called_once_with() class L3HAPlugin(db_v2.NeutronDbPluginV2, l3_hamode_db.L3_HA_NAT_db_mixin, l3_hascheduler_db.L3_HA_scheduler_db_mixin): supported_extension_aliases = ["l3-ha"] class L3HATestCaseMixin(testlib_api.SqlTestCase, L3SchedulerBaseMixin, testlib_plugin.PluginSetupHelper): def setUp(self): super(L3HATestCaseMixin, self).setUp() self.adminContext = q_context.get_admin_context() self.plugin = L3HAPlugin() self.setup_coreplugin('neutron.plugins.ml2.plugin.Ml2Plugin') mock.patch.object(l3_hamode_db.L3_HA_NAT_db_mixin, '_notify_ha_interfaces_updated').start() cfg.CONF.set_override('max_l3_agents_per_router', 0) self.plugin.router_scheduler = importutils.import_object( 'neutron.scheduler.l3_agent_scheduler.ChanceScheduler' ) self._register_l3_agents() def _create_ha_router(self, ha=True, tenant_id='tenant1'): self.adminContext.tenant_id = tenant_id router = {'name': 'router1', 'admin_state_up': True} if ha is not None: router['ha'] = ha return self.plugin.create_router(self.adminContext, {'router': router}) class L3_HA_scheduler_db_mixinTestCase(L3HATestCaseMixin): def _register_l3_agents(self, plugin=None): super(L3_HA_scheduler_db_mixinTestCase, self)._register_l3_agents(plugin=plugin) self.agent3 = self._register_l3_agent('host_3', plugin=plugin) self.agent_id3 = self.agent3.id self.agent4 = self._register_l3_agent('host_4', plugin=plugin) self.agent_id4 = self.agent4.id def test_get_ha_routers_l3_agents_count(self): router1 = self._create_ha_router() router2 = self._create_ha_router() router3 = self._create_ha_router(ha=False) self.plugin.schedule_router(self.adminContext, router1['id']) self.plugin.schedule_router(self.adminContext, router2['id']) self.plugin.schedule_router(self.adminContext, router3['id']) result = self.plugin.get_ha_routers_l3_agents_count( self.adminContext).all() self.assertEqual(2, len(result)) self.assertIn((router1['id'], router1['tenant_id'], 4), result) self.assertIn((router2['id'], router2['tenant_id'], 4), result) self.assertNotIn((router3['id'], router3['tenant_id'], mock.ANY), result) def test_get_ordered_l3_agents_by_num_routers(self): router1 = self._create_ha_router() router2 = self._create_ha_router() router3 = self._create_ha_router(ha=False) router4 = self._create_ha_router(ha=False) # Agent 1 will host 0 routers, agent 2 will host 1, agent 3 will # host 2, and agent 4 will host 3. self.plugin.schedule_router(self.adminContext, router1['id'], candidates=[self.agent2, self.agent4]) self.plugin.schedule_router(self.adminContext, router2['id'], candidates=[self.agent3, self.agent4]) self.plugin.schedule_router(self.adminContext, router3['id'], candidates=[self.agent3]) self.plugin.schedule_router(self.adminContext, router4['id'], candidates=[self.agent4]) agent_ids = [self.agent_id1, self.agent_id2, self.agent_id3, self.agent_id4] result = self.plugin.get_l3_agents_ordered_by_num_routers( self.adminContext, agent_ids) self.assertEqual(agent_ids, [record['id'] for record in result]) class L3AgentSchedulerDbMixinTestCase(L3HATestCaseMixin): def test_reschedule_ha_routers_from_down_agents(self): router = self._create_ha_router() self.plugin.schedule_router(self.adminContext, router['id']) agents = self.plugin.get_l3_agents_hosting_routers( self.adminContext, [router['id']], admin_state_up=True) self.assertEqual(2, len(agents)) self._set_l3_agent_dead(self.agent_id1) with mock.patch.object(self.plugin, 'reschedule_router') as reschedule: self.plugin.reschedule_routers_from_down_agents() self.assertFalse(reschedule.called) class L3HAChanceSchedulerTestCase(L3HATestCaseMixin): def test_scheduler_with_ha_enabled(self): router = self._create_ha_router() self.plugin.schedule_router(self.adminContext, router['id']) agents = self.plugin.get_l3_agents_hosting_routers( self.adminContext, [router['id']], admin_state_up=True) self.assertEqual(2, len(agents)) for agent in agents: sync_data = self.plugin.get_ha_sync_data_for_host( self.adminContext, router_ids=[router['id']], host=agent.host) self.assertEqual(1, len(sync_data)) interface = sync_data[0][constants.HA_INTERFACE_KEY] self.assertIsNotNone(interface) def test_auto_schedule(self): router = self._create_ha_router() self.plugin.auto_schedule_routers( self.adminContext, self.agent1.host, None) self.plugin.auto_schedule_routers( self.adminContext, self.agent2.host, None) agents = self.plugin.get_l3_agents_hosting_routers( self.adminContext, [router['id']]) self.assertEqual(2, len(agents)) def test_auto_schedule_specific_router_when_agent_added(self): self._auto_schedule_when_agent_added(True) def test_auto_schedule_all_routers_when_agent_added(self): self._auto_schedule_when_agent_added(False) def _auto_schedule_when_agent_added(self, specific_router): router = self._create_ha_router() self.plugin.schedule_router(self.adminContext, router['id']) agents = self.plugin.get_l3_agents_hosting_routers( self.adminContext, [router['id']], admin_state_up=True) self.assertEqual(2, len(agents)) agent_ids = [agent['id'] for agent in agents] self.assertIn(self.agent_id1, agent_ids) self.assertIn(self.agent_id2, agent_ids) agent = self._register_l3_agent('host_3') self.agent_id3 = agent.id routers_to_auto_schedule = [router['id']] if specific_router else [] self.plugin.auto_schedule_routers(self.adminContext, 'host_3', routers_to_auto_schedule) agents = self.plugin.get_l3_agents_hosting_routers( self.adminContext, [router['id']], admin_state_up=True) self.assertEqual(3, len(agents)) # Simulate agent restart to make sure we don't try to re-bind self.plugin.auto_schedule_routers(self.adminContext, 'host_3', routers_to_auto_schedule) def test_scheduler_with_ha_enabled_not_enough_agent(self): r1 = self._create_ha_router() self.plugin.schedule_router(self.adminContext, r1['id']) agents = self.plugin.get_l3_agents_hosting_routers( self.adminContext, [r1['id']], admin_state_up=True) self.assertEqual(2, len(agents)) self._set_l3_agent_admin_state(self.adminContext, self.agent_id2, False) r2 = self._create_ha_router() self.plugin.schedule_router(self.adminContext, r2['id']) agents = self.plugin.get_l3_agents_hosting_routers( self.adminContext, [r2['id']], admin_state_up=True) self.assertEqual(0, len(agents)) self._set_l3_agent_admin_state(self.adminContext, self.agent_id2, True) class L3HALeastRoutersSchedulerTestCase(L3HATestCaseMixin): def _register_l3_agents(self, plugin=None): super(L3HALeastRoutersSchedulerTestCase, self)._register_l3_agents(plugin=plugin) agent = self._register_l3_agent('host_3', plugin=plugin) self.agent_id3 = agent.id agent = self._register_l3_agent('host_4', plugin=plugin) self.agent_id4 = agent.id def setUp(self): super(L3HALeastRoutersSchedulerTestCase, self).setUp() self.plugin.router_scheduler = importutils.import_object( 'neutron.scheduler.l3_agent_scheduler.LeastRoutersScheduler' ) def test_scheduler(self): cfg.CONF.set_override('max_l3_agents_per_router', 2) # disable the third agent to be sure that the router will # be scheduled of the two firsts self._set_l3_agent_admin_state(self.adminContext, self.agent_id3, False) self._set_l3_agent_admin_state(self.adminContext, self.agent_id4, False) r1 = self._create_ha_router() self.plugin.schedule_router(self.adminContext, r1['id']) agents = self.plugin.get_l3_agents_hosting_routers( self.adminContext, [r1['id']], admin_state_up=True) self.assertEqual(2, len(agents)) agent_ids = [agent['id'] for agent in agents] self.assertIn(self.agent_id1, agent_ids) self.assertIn(self.agent_id2, agent_ids) self._set_l3_agent_admin_state(self.adminContext, self.agent_id3, True) self._set_l3_agent_admin_state(self.adminContext, self.agent_id4, True) r2 = self._create_ha_router() self.plugin.schedule_router(self.adminContext, r2['id']) agents = self.plugin.get_l3_agents_hosting_routers( self.adminContext, [r2['id']], admin_state_up=True) self.assertEqual(2, len(agents)) agent_ids = [agent['id'] for agent in agents] self.assertIn(self.agent_id3, agent_ids) self.assertIn(self.agent_id4, agent_ids) class TestGetL3AgentsWithAgentModeFilter(testlib_api.SqlTestCase, testlib_plugin.PluginSetupHelper, L3SchedulerBaseMixin): """Test cases to test get_l3_agents. This class tests the L3AgentSchedulerDbMixin.get_l3_agents() for the 'agent_mode' filter with various values. 5 l3 agents are registered in the order - legacy, dvr_snat, dvr, fake_mode and legacy """ scenarios = [ ('no filter', dict(agent_modes=[], expected_agent_modes=['legacy', 'dvr_snat', 'dvr', 'fake_mode', 'legacy'])), ('legacy', dict(agent_modes=['legacy'], expected_agent_modes=['legacy', 'legacy'])), ('dvr_snat', dict(agent_modes=['dvr_snat'], expected_agent_modes=['dvr_snat'])), ('dvr ', dict(agent_modes=['dvr'], expected_agent_modes=['dvr'])), ('legacy and dvr snat', dict(agent_modes=['legacy', 'dvr_snat', 'legacy'], expected_agent_modes=['legacy', 'dvr_snat', 'legacy'])), ('legacy and dvr', dict(agent_modes=['legacy', 'dvr'], expected_agent_modes=['legacy', 'dvr', 'legacy'])), ('dvr_snat and dvr', dict(agent_modes=['dvr_snat', 'dvr'], expected_agent_modes=['dvr_snat', 'dvr'])), ('legacy, dvr_snat and dvr', dict(agent_modes=['legacy', 'dvr_snat', 'dvr'], expected_agent_modes=['legacy', 'dvr_snat', 'dvr', 'legacy'])), ('invalid', dict(agent_modes=['invalid'], expected_agent_modes=[])), ] def setUp(self): super(TestGetL3AgentsWithAgentModeFilter, self).setUp() self.plugin = L3HAPlugin() self.setup_coreplugin('neutron.plugins.ml2.plugin.Ml2Plugin') self.adminContext = q_context.get_admin_context() hosts = ['host_1', 'host_2', 'host_3', 'host_4', 'host_5'] agent_modes = ['legacy', 'dvr_snat', 'dvr', 'fake_mode', 'legacy'] for host, agent_mode in zip(hosts, agent_modes): self._register_l3_agent(host, agent_mode, self.plugin) def _get_agent_mode(self, agent): agent_conf = self.plugin.get_configuration_dict(agent) return agent_conf.get('agent_mode', 'None') def test_get_l3_agents(self): l3_agents = self.plugin.get_l3_agents( self.adminContext, filters={'agent_modes': self.agent_modes}) self.assertEqual(len(self.expected_agent_modes), len(l3_agents)) returned_agent_modes = [self._get_agent_mode(agent) for agent in l3_agents] self.assertEqual(self.expected_agent_modes, returned_agent_modes)
projectcalico/calico-neutron
neutron/tests/unit/test_l3_schedulers.py
Python
apache-2.0
61,087
#!/usr/bin/env python # # VM Backup extension # # Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import re import sys import subprocess import threading from workloadPatch.LogbackupPatch import LogBackupPatch from time import sleep from datetime import datetime # Example of Parameter File Content: # *.db_name='CDB1' def parameterFileParser(): regX = re.compile(r"\*\..+=.+") parameterFile = open(logbackup.parameterFilePath, 'r') contents = parameterFile.read() for match in regX.finditer(contents): keyParameter = match.group().split('=')[0].lstrip('*\.') valueParameter = [name.strip('\'') for name in match.group().split('=')[1].split(',')] logbackup.oracleParameter[keyParameter] = valueParameter def setLocation(): nowTimestamp = datetime.now() nowTimestamp = nowTimestamp.strftime("%Y%m%d%H%M%S") fullPath = logbackup.baseLocation + nowTimestamp os.system('mkdir -m777 '+ fullPath) return fullPath def takeBackup(): print("logbackup: Taking a backup") backupPath = setLocation() if 'oracle' in logbackup.name.lower(): backupOracle = logbackup.command + " -s / as sysdba @" + "/var/lib/waagent/Microsoft.Azure.RecoveryServices.VMSnapshotLinux-1.0.9164.0/main/workloadPatch/scripts/logbackup.sql " + backupPath argsForControlFile = ["su", "-", logbackup.cred_string, "-c", backupOracle] snapshotControlFile = subprocess.Popen(argsForControlFile) while snapshotControlFile.poll()==None: sleep(1) recoveryFileDest = logbackup.oracleParameter['db_recovery_file_dest'] dbName = logbackup.oracleParameter['db_name'] print(' logbackup: Archive log backup started at ', datetime.now().strftime("%Y%m%d%H%M%S")) os.system('cp -R -f ' + recoveryFileDest[0] + '/' + dbName[0] + '/archivelog ' + backupPath) print(' logbackup: Archive log backup complete at ', datetime.now().strftime("%Y%m%d%H%M%S")) print("logbackup: Backup Complete") def main(): global logbackup logbackup = LogBackupPatch() parameterFileParser() takeBackup() if __name__ == "__main__": main()
Azure/azure-linux-extensions
VMBackup/main/workloadPatch/WorkloadUtils/OracleLogBackup.py
Python
apache-2.0
2,709
import logging from docker.utils import kwargs_from_env from cattle import default_value, Config log = logging.getLogger('docker') _ENABLED = True class DockerConfig: def __init__(self): pass @staticmethod def docker_enabled(): return default_value('DOCKER_ENABLED', 'true') == 'true' @staticmethod def docker_host_ip(): return default_value('DOCKER_HOST_IP', Config.agent_ip()) @staticmethod def docker_home(): return default_value('DOCKER_HOME', '/var/lib/docker') @staticmethod def docker_uuid_file(): def_value = '{0}/.docker_uuid'.format(Config.state_dir()) return default_value('DOCKER_UUID_FILE', def_value) @staticmethod def docker_uuid(): return Config.get_uuid_from_file('DOCKER_UUID', DockerConfig.docker_uuid_file()) @staticmethod def url_base(): return default_value('DOCKER_URL_BASE', None) @staticmethod def api_version(): return default_value('DOCKER_API_VERSION', '1.18') @staticmethod def docker_required(): return default_value('DOCKER_REQUIRED', 'true') == 'true' @staticmethod def delegate_timeout(): return int(default_value('DOCKER_DELEGATE_TIMEOUT', '120')) @staticmethod def use_boot2docker_connection_env_vars(): use_b2d = default_value('DOCKER_USE_BOOT2DOCKER', 'false') return use_b2d.lower() == 'true' @staticmethod def is_host_pidns(): return default_value('AGENT_PIDNS', 'container') == 'host' def docker_client(version=None, base_url_override=None, tls_config=None): if DockerConfig.use_boot2docker_connection_env_vars(): kwargs = kwargs_from_env(assert_hostname=False) else: kwargs = {'base_url': DockerConfig.url_base()} if base_url_override: kwargs['base_url'] = base_url_override if tls_config: kwargs['tls'] = tls_config if version is None: version = DockerConfig.api_version() kwargs['version'] = version log.debug('docker_client=%s', kwargs) return Client(**kwargs) def pull_image(image, progress): _DOCKER_POOL.pull_image(image, progress) def get_compute(): return _DOCKER_COMPUTE try: from docker import Client except: log.info('Disabling docker, docker-py not found') _ENABLED = False try: if _ENABLED: docker_client().info() except Exception, e: log.exception('Disabling docker, could not contact docker') _ENABLED = False if _ENABLED and DockerConfig.docker_enabled(): from .storage import DockerPool from .compute import DockerCompute from .delegate import DockerDelegate from cattle import type_manager _DOCKER_POOL = DockerPool() _DOCKER_COMPUTE = DockerCompute() _DOCKER_DELEGATE = DockerDelegate() type_manager.register_type(type_manager.STORAGE_DRIVER, _DOCKER_POOL) type_manager.register_type(type_manager.COMPUTE_DRIVER, _DOCKER_COMPUTE) type_manager.register_type(type_manager.PRE_REQUEST_HANDLER, _DOCKER_DELEGATE) if not _ENABLED and DockerConfig.docker_required(): raise Exception('Failed to initialize Docker')
sonchang/python-agent
cattle/plugins/docker/__init__.py
Python
apache-2.0
3,232
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1) # # (1) Kamaelia Contributors are listed in the AUTHORS file and at # http://www.kamaelia.org/AUTHORS - please extend this file, # not this notice. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ------------------------------------------------------------------------- # """ ==================== DataTx ==================== The DataTx packetises the data. It adds packet header to the How it works? --------------------- The DataTx adds a header to the data received on its inboxes "keyin" and "inbox". The packet header contains packet type and packet length. It is necessary to distinguish between encrypted data to be sent and encrypted session keys because the client needs to be able to distinguish between the two. """ import Axon import struct class DataTx(Axon.Component.component): """\ DataTx() -> new DataTx component Handles packetizing Keyword arguments: None """ Inboxes = {"inbox" : "encrypted data", "keyIn" : "encrypted session key", "control" : "receive shutdown messages"} Outboxes = {"outbox" : "add header and send encrypted key and data packets", "signal" : "pass shutdown messages"} def __init__(self): super(DataTx,self).__init__() def main(self): KEY = 0x20 DATA = 0x30 while 1: #add header - packet type=4 bytes and packet length = 4 bytes while self.dataReady("keyIn"): data = self.recv("keyIn") header = struct.pack("!2L", KEY, len(data)) packet = header + data self.send(packet, "outbox") yield 1 if self.dataReady("inbox"): data = self.recv("inbox") header = struct.pack("!2L", DATA, len(data)) packet = header + data self.send(packet, "outbox") yield 1
sparkslabs/kamaelia_
Sketches/AM/KPIPackage/Kamaelia/Community/AM/Kamaelia/KPIFramework/KPI/Server/DataTx.py
Python
apache-2.0
2,606
# Copyright 2016 Hewlett Packard Enterprise Development LP # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib import constants from oslo_config import cfg from neutron.callbacks import events from neutron.callbacks import registry from neutron.plugins.ml2.drivers.openvswitch.agent.common import ( constants as agent_consts) from neutron.services.trunk.drivers.openvswitch import driver from neutron.tests import base GEN_TRUNK_BR_NAME_PATCH = ( 'neutron.services.trunk.drivers.openvswitch.utils.gen_trunk_br_name') class OVSDriverTestCase(base.BaseTestCase): def test_driver_creation(self): ovs_driver = driver.OVSDriver.create() self.assertFalse(ovs_driver.is_loaded) self.assertEqual(driver.NAME, ovs_driver.name) self.assertEqual(driver.SUPPORTED_INTERFACES, ovs_driver.interfaces) self.assertEqual(driver.SUPPORTED_SEGMENTATION_TYPES, ovs_driver.segmentation_types) self.assertEqual(constants.AGENT_TYPE_OVS, ovs_driver.agent_type) self.assertFalse(ovs_driver.can_trunk_bound_port) self.assertTrue( ovs_driver.is_agent_compatible(constants.AGENT_TYPE_OVS)) self.assertTrue( ovs_driver.is_interface_compatible(driver.SUPPORTED_INTERFACES[0])) def test_driver_is_loaded(self): cfg.CONF.set_override('mechanism_drivers', 'openvswitch', group='ml2') ovs_driver = driver.OVSDriver.create() self.assertTrue(ovs_driver.is_loaded) def test_driver_is_not_loaded(self): cfg.CONF.set_override('core_plugin', 'my_foo_plugin') ovs_driver = driver.OVSDriver.create() self.assertFalse(ovs_driver.is_loaded) @mock.patch(GEN_TRUNK_BR_NAME_PATCH) def test_vif_details_bridge_name_handler_registration(self, mock_gen_br_name): driver.register() mock_gen_br_name.return_value = 'fake-trunk-br-name' test_trigger = mock.Mock() registry.notify(agent_consts.OVS_BRIDGE_NAME, events.BEFORE_READ, test_trigger, **{'port': {'trunk_details': {'trunk_id': 'foo'}}}) test_trigger.assert_called_once_with('fake-trunk-br-name')
sebrandon1/neutron
neutron/tests/unit/services/trunk/drivers/openvswitch/test_driver.py
Python
apache-2.0
2,829
''' Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ''' __author__ = 'Marko A. Rodriguez (http://markorodriguez.com)' import unittest from unittest import TestCase import six from gremlin_python.statics import long from gremlin_python.structure.graph import Edge from gremlin_python.structure.graph import Property from gremlin_python.structure.graph import Vertex from gremlin_python.structure.graph import VertexProperty from gremlin_python.structure.graph import Path class TestGraph(TestCase): def test_graph_objects(self): vertex = Vertex(1) assert "v[1]" == str(vertex) assert "vertex" == vertex.label assert "person" == Vertex(1, "person").label assert vertex == Vertex(1) # edge = Edge(2, Vertex(1), "said", Vertex("hello", "phrase")) assert "e[2][1-said->hello]" == str(edge) assert Vertex(1) == edge.outV assert Vertex("hello") == edge.inV assert "said" == edge.label assert "phrase" == edge.inV.label assert edge.inV != edge.outV # vertex_property = VertexProperty(long(24), "name", "marko") assert "vp[name->marko]" == str(vertex_property) assert "name" == vertex_property.label assert "name" == vertex_property.key assert "marko" == vertex_property.value assert long(24) == vertex_property.id assert isinstance(vertex_property.id, long) assert vertex_property == VertexProperty(long(24), "name", "marko") # property = Property("age", 29) assert "p[age->29]" == str(property) assert "age" == property.key assert 29 == property.value assert isinstance(property.value, int) assert property == Property("age", 29) if not six.PY3: assert property != Property("age", long(29)) # for i in [vertex, edge, vertex_property, property]: for j in [vertex, edge, vertex_property, property]: if type(i) != type(j): assert i != j else: assert i == j assert i.__hash__() == hash(i) def test_path(self): path = Path([set(["a", "b"]), set(["c", "b"]), set([])], [1, Vertex(1), "hello"]) assert "[1, v[1], 'hello']" == str(path) assert 1 == path["a"] assert Vertex(1) == path["c"] assert [1, Vertex(1)] == path["b"] assert path[0] == 1 assert path[1] == Vertex(1) assert path[2] == "hello" assert 3 == len(path) assert "hello" in path assert "goodbye" not in path assert Vertex(1) in path assert Vertex(123) not in path # try: temp = path[3] raise Exception("Accessing beyond the list index should throw an index error") except IndexError: pass # try: temp = path["zz"] raise Exception("Accessing nothing should throw a key error") except KeyError: pass # try: temp = path[1:2] raise Exception("Accessing using slices should throw a type error") except TypeError: pass # assert path == path assert hash(path) == hash(path) path2 = Path([set(["a", "b"]), set(["c", "b"]), set([])], [1, Vertex(1), "hello"]) assert path == path2 assert hash(path) == hash(path2) assert path != Path([set(["a"]), set(["c", "b"]), set([])], [1, Vertex(1), "hello"]) assert path != Path([set(["a", "b"]), set(["c", "b"]), set([])], [3, Vertex(1), "hello"]) if __name__ == '__main__': unittest.main()
samiunn/incubator-tinkerpop
gremlin-python/src/main/jython/tests/structure/test_graph.py
Python
apache-2.0
4,417
# Copyright (c) 2016-present, Facebook, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ############################################################################## ## @package workspace # Module caffe2.python.workspace from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import contextlib from google.protobuf.message import Message from multiprocessing import Process import os from collections import defaultdict import logging import numpy as np from past.builtins import basestring import shutil import socket import tempfile from caffe2.proto import caffe2_pb2 from caffe2.python import scope, utils import caffe2.python._import_c_extension as C logger = logging.getLogger(__name__) Blobs = C.blobs CreateBlob = C.create_blob CurrentWorkspace = C.current_workspace DeserializeBlob = C.deserialize_blob GlobalInit = C.global_init HasBlob = C.has_blob RegisteredOperators = C.registered_operators SerializeBlob = C.serialize_blob SwitchWorkspace = C.switch_workspace RootFolder = C.root_folder Workspaces = C.workspaces BenchmarkNet = C.benchmark_net GetStats = C.get_stats operator_tracebacks = defaultdict(dict) is_asan = C.is_asan has_gpu_support = C.has_gpu_support if has_gpu_support: NumCudaDevices = C.num_cuda_devices SetDefaultGPUID = C.set_default_gpu_id GetDefaultGPUID = C.get_default_gpu_id GetCUDAVersion = C.get_cuda_version GetCuDNNVersion = C.get_cudnn_version def GetCudaPeerAccessPattern(): return np.asarray(C.get_cuda_peer_access_pattern()) GetDeviceProperties = C.get_device_properties else: NumCudaDevices = lambda: 0 # noqa SetDefaultGPUID = lambda x: None # noqa GetDefaultGPUID = lambda: 0 # noqa GetCuDNNVersion = lambda: 0 # noqa GetCuDNNVersion = lambda: 0 # noqa GetCudaPeerAccessPattern = lambda: np.array([]) # noqa GetDeviceProperties = lambda x: None # noqa def _GetFreeFlaskPort(): """Get a free flask port.""" # We will prefer to use 5000. If not, we will then pick a random port. sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) result = sock.connect_ex(('127.0.0.1', 5000)) if result == 0: return 5000 else: s = socket.socket() s.bind(('', 0)) port = s.getsockname()[1] s.close() # Race condition: between the interval we close the socket and actually # start a mint process, another process might have occupied the port. We # don't do much here as this is mostly for convenience in research # rather than 24x7 service. return port def StartMint(root_folder=None, port=None): """Start a mint instance. TODO(Yangqing): this does not work well under ipython yet. According to https://github.com/ipython/ipython/issues/5862 writing up some fix is a todo item. """ from caffe2.python.mint import app if root_folder is None: # Get the root folder from the current workspace root_folder = C.root_folder() if port is None: port = _GetFreeFlaskPort() process = Process( target=app.main, args=( ['-p', str(port), '-r', root_folder], ) ) process.start() print('Mint running at http://{}:{}'.format(socket.getfqdn(), port)) return process def StringifyProto(obj): """Stringify a protocol buffer object. Inputs: obj: a protocol buffer object, or a Pycaffe2 object that has a Proto() function. Outputs: string: the output protobuf string. Raises: AttributeError: if the passed in object does not have the right attribute. """ if isinstance(obj, basestring): return obj else: if isinstance(obj, Message): # First, see if this object is a protocol buffer, which we can # simply serialize with the SerializeToString() call. return obj.SerializeToString() elif hasattr(obj, 'Proto'): return obj.Proto().SerializeToString() else: raise ValueError("Unexpected argument to StringifyProto of type " + type(obj).__name__) def ResetWorkspace(root_folder=None): if root_folder is None: # Reset the workspace, but keep the current root folder setting. return C.reset_workspace(C.root_folder()) else: if not os.path.exists(root_folder): os.makedirs(root_folder) return C.reset_workspace(root_folder) def CreateNet(net, overwrite=False, input_blobs=None): if input_blobs is None: input_blobs = [] for input_blob in input_blobs: C.create_blob(input_blob) return CallWithExceptionIntercept( C.create_net, C.Workspace.current._last_failed_op_net_position, GetNetName(net), StringifyProto(net), overwrite, ) def Predictor(init_net, predict_net): return C.Predictor(StringifyProto(init_net), StringifyProto(predict_net)) def GetOperatorCost(operator, blobs): return C.get_operator_cost(StringifyProto(operator), blobs) def RunOperatorOnce(operator): return C.run_operator_once(StringifyProto(operator)) def RunOperatorsOnce(operators): for op in operators: success = RunOperatorOnce(op) if not success: return False return True def CallWithExceptionIntercept(func, op_id_fetcher, net_name, *args, **kwargs): try: return func(*args, **kwargs) except Exception: op_id = op_id_fetcher() net_tracebacks = operator_tracebacks.get(net_name, None) print("Traceback for operator {} in network {}".format(op_id, net_name)) if net_tracebacks and op_id in net_tracebacks: tb = net_tracebacks[op_id] for line in tb: print(':'.join(map(str, line))) raise def RunNetOnce(net): return CallWithExceptionIntercept( C.run_net_once, C.Workspace.current._last_failed_op_net_position, GetNetName(net), StringifyProto(net), ) def RunNet(name, num_iter=1, allow_fail=False): """Runs a given net. Inputs: name: the name of the net, or a reference to the net. num_iter: number of iterations to run allow_fail: if True, does not assert on net exec failure but returns False Returns: True or an exception. """ return CallWithExceptionIntercept( C.run_net, C.Workspace.current._last_failed_op_net_position, GetNetName(name), StringifyNetName(name), num_iter, allow_fail, ) def RunPlan(plan_or_step): # TODO(jiayq): refactor core.py/workspace.py to avoid circular deps import caffe2.python.core as core if isinstance(plan_or_step, core.ExecutionStep): plan_or_step = core.Plan(plan_or_step) return C.run_plan(StringifyProto(plan_or_step)) def InferShapesAndTypes(nets, blob_dimensions=None): """Infers the shapes and types for the specified nets. Inputs: nets: the list of nets blob_dimensions (optional): a dictionary of blobs and their dimensions. If not specified, the workspace blobs are used. Returns: A tuple of (shapes, types) dictionaries keyed by blob name. """ net_protos = [StringifyProto(n.Proto()) for n in nets] if blob_dimensions is None: blobdesc_prototxt = C.infer_shapes_and_types_from_workspace(net_protos) else: blobdesc_prototxt = C.infer_shapes_and_types_from_map( net_protos, blob_dimensions ) blobdesc_proto = caffe2_pb2.TensorShapes() blobdesc_proto.ParseFromString(blobdesc_prototxt) shapes = {} types = {} for ts in blobdesc_proto.shapes: if not ts.unknown_shape: shapes[ts.name] = list(ts.dims) types[ts.name] = ts.data_type return (shapes, types) def _StringifyName(name, expected_type): if isinstance(name, basestring): return name assert type(name).__name__ == expected_type, \ "Expected a string or %s" % expected_type return str(name) def StringifyBlobName(name): return _StringifyName(name, "BlobReference") def StringifyNetName(name): return _StringifyName(name, "Net") def GetNetName(net): if isinstance(net, basestring): return net if type(net).__name__ == "Net": return net.Name() if isinstance(net, caffe2_pb2.NetDef): return net.name raise Exception("Not a Net object: {}".format(str(net))) def FeedBlob(name, arr, device_option=None): """Feeds a blob into the workspace. Inputs: name: the name of the blob. arr: either a TensorProto object or a numpy array object to be fed into the workspace. device_option (optional): the device option to feed the data with. Returns: True or False, stating whether the feed is successful. """ if type(arr) is caffe2_pb2.TensorProto: arr = utils.Caffe2TensorToNumpyArray(arr) if type(arr) is np.ndarray and arr.dtype.kind in 'SU': # Plain NumPy strings are weird, let's use objects instead arr = arr.astype(np.object) if device_option is None: device_option = scope.CurrentDeviceScope() if device_option and device_option.device_type == caffe2_pb2.CUDA: if arr.dtype == np.dtype('float64'): logger.warning( "CUDA operators do not support 64-bit doubles, " + "please use arr.astype(np.float32) or np.int32 for ints." + " Blob: {}".format(name) + " type: {}".format(str(arr.dtype)) ) name = StringifyBlobName(name) if device_option is not None: return C.feed_blob(name, arr, StringifyProto(device_option)) else: return C.feed_blob(name, arr) def FetchBlobs(names): """Fetches a list of blobs from the workspace. Inputs: names: list of names of blobs - strings or BlobReferences Returns: list of fetched blobs """ return [FetchBlob(name) for name in names] def FetchBlob(name): """Fetches a blob from the workspace. Inputs: name: the name of the blob - a string or a BlobReference Returns: Fetched blob (numpy array or string) if successful """ return C.fetch_blob(StringifyBlobName(name)) def ApplyTransform(transform_key, net): """Apply a Transform to a NetDef protobuf object, and returns the new transformed NetDef. Inputs: transform_key: the name of the transform, as it is stored in the registry net: a NetDef protobuf object Returns: Transformed NetDef protobuf object. """ transformed_net = caffe2_pb2.NetDef() transformed_str = C.apply_transform( str(transform_key).encode('utf-8'), net.SerializeToString(), ) transformed_net.ParseFromString(transformed_str) return transformed_net def ApplyTransformIfFaster(transform_key, net, init_net, **kwargs): """Apply a Transform to a NetDef protobuf object, and returns the new transformed NetDef, only if it runs faster than the original. The runs are performed on the current active workspace (gWorkspace). You should initialize that workspace before making a call to this function. Inputs: transform_key: the name of the transform, as it is stored in the registry net: a NetDef protobuf object init_net: The net to initialize the workspace. warmup_runs (optional): Determines how many times the net is run before testing. Will be 5 by default. main_runs (optional): Determines how many times the net is run during testing. Will be 10 by default. improvement_threshold (optional): Determines the factor which the new net needs to be faster in order to replace the old. Will be 1.01 by default. Returns: Either a Transformed NetDef protobuf object, or the original netdef. """ warmup_runs = kwargs['warmup_runs'] if 'warmup_runs' in kwargs else 5 main_runs = kwargs['main_runs'] if 'main_runs' in kwargs else 10 improvement_threshold = kwargs['improvement_threshold'] \ if 'improvement_threshold' in kwargs else 1.01 transformed_net = caffe2_pb2.NetDef() transformed_str = C.apply_transform_if_faster( str(transform_key).encode('utf-8'), net.SerializeToString(), init_net.SerializeToString(), warmup_runs, main_runs, float(improvement_threshold), ) transformed_net.ParseFromString(transformed_str) return transformed_net def GetNameScope(): """Return the current namescope string. To be used to fetch blobs""" return scope.CurrentNameScope() class _BlobDict(object): """Provides python dict compatible way to do fetching and feeding""" def __getitem__(self, key): return FetchBlob(key) def __setitem__(self, key, value): return FeedBlob(key, value) def __len__(self): return len(C.blobs()) def __iter__(self): return C.blobs().__iter__() def __contains__(self, item): return C.has_blob(item) blobs = _BlobDict() ################################################################################ # Utilities for immediate mode # # Caffe2's immediate mode implements the following behavior: between the two # function calls StartImmediate() and StopImmediate(), for any operator that is # called through CreateOperator(), we will also run that operator in a workspace # that is specific to the immediate mode. The user is explicitly expected to # make sure that these ops have proper inputs and outputs, i.e. one should not # run an op where an external input is not created or fed. # # Users can use FeedImmediate() and FetchImmediate() to interact with blobs # in the immediate workspace. # # Once StopImmediate() is called, all contents in the immediate workspace is # freed up so one can continue using normal runs. # # The immediate mode is solely for debugging purposes and support will be very # sparse. ################################################################################ _immediate_mode = False _immediate_workspace_name = "_CAFFE2_IMMEDIATE" _immediate_root_folder = '' def IsImmediate(): return _immediate_mode @contextlib.contextmanager def WorkspaceGuard(workspace_name): current = CurrentWorkspace() SwitchWorkspace(workspace_name, True) yield SwitchWorkspace(current) def StartImmediate(i_know=False): global _immediate_mode global _immediate_root_folder if IsImmediate(): # already in immediate mode. We will kill the previous one # and start from fresh. StopImmediate() _immediate_mode = True with WorkspaceGuard(_immediate_workspace_name): _immediate_root_folder = tempfile.mkdtemp() ResetWorkspace(_immediate_root_folder) if i_know: # if the user doesn't want to see the warning message, sure... return print(""" Enabling immediate mode in caffe2 python is an EXTREMELY EXPERIMENTAL feature and may very easily go wrong. This is because Caffe2 uses a declarative way of defining operators and models, which is essentially not meant to run things in an interactive way. Read the following carefully to make sure that you understand the caveats. (1) You need to make sure that the sequences of operators you create are actually runnable sequentially. For example, if you create an op that takes an input X, somewhere earlier you should have already created X. (2) Caffe2 immediate uses one single workspace, so if the set of operators you run are intended to be under different workspaces, they will not run. To create boundaries between such use cases, you can call FinishImmediate() and StartImmediate() manually to flush out everything no longer needed. (3) Underlying objects held by the immediate mode may interfere with your normal run. For example, if there is a leveldb that you opened in immediate mode and did not close, your main run will fail because leveldb does not support double opening. Immediate mode may also occupy a lot of memory esp. on GPUs. Call FinishImmediate() as soon as possible when you no longer need it. (4) Immediate is designed to be slow. Every immediate call implicitly creates a temp operator object, runs it, and destroys the operator. This slow-speed run is by design to discourage abuse. For most use cases other than debugging, do NOT turn on immediate mode. (5) If there is anything FATAL happening in the underlying C++ code, the immediate mode will immediately (pun intended) cause the runtime to crash. Thus you should use immediate mode with extra care. If you still would like to, have fun [https://xkcd.com/149/]. """) def StopImmediate(): """Stops an immediate mode run.""" # Phew, that was a dangerous ride. global _immediate_mode global _immediate_root_folder if not IsImmediate(): return with WorkspaceGuard(_immediate_workspace_name): ResetWorkspace() shutil.rmtree(_immediate_root_folder) _immediate_root_folder = '' _immediate_mode = False def ImmediateBlobs(): with WorkspaceGuard(_immediate_workspace_name): return Blobs() def RunOperatorImmediate(op): with WorkspaceGuard(_immediate_workspace_name): RunOperatorOnce(op) def FetchImmediate(*args, **kwargs): with WorkspaceGuard(_immediate_workspace_name): return FetchBlob(*args, **kwargs) def FeedImmediate(*args, **kwargs): with WorkspaceGuard(_immediate_workspace_name): return FeedBlob(*args, **kwargs) # CWorkspace utilities def _Workspace_create_net_with_exception_intercept(ws, net, overwrite=False): return CallWithExceptionIntercept( ws._create_net, ws._last_failed_op_net_position, GetNetName(net), StringifyProto(net), overwrite, ) C.Workspace.create_net = _Workspace_create_net_with_exception_intercept def _Workspace_run(ws, obj): if hasattr(obj, 'Proto'): obj = obj.Proto() if isinstance(obj, caffe2_pb2.PlanDef): return ws._run_plan(obj.SerializeToString()) if isinstance(obj, caffe2_pb2.NetDef): return CallWithExceptionIntercept( ws._run_net, ws._last_failed_op_net_position, GetNetName(obj), obj.SerializeToString(), ) # return ws._run_net(obj.SerializeToString()) if isinstance(obj, caffe2_pb2.OperatorDef): return ws._run_operator(obj.SerializeToString()) raise ValueError( "Don't know how to do Workspace.run() on {}".format(type(obj))) C.Workspace.run = _Workspace_run def _Blob_feed(blob, arg, device_option=None): if device_option is not None: device_option = StringifyProto(device_option) return blob._feed(arg, device_option) C.Blob.feed = _Blob_feed
davinwang/caffe2
caffe2/python/workspace.py
Python
apache-2.0
19,553