code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
from unittest.mock import patch, call
from django.test import TestCase
from .. decorators import cached_method
class TestCachedMethodDecorator(TestCase):
@classmethod
def setUpTestData(cls):
super(cls, TestCachedMethodDecorator).setUpTestData()
# A Test class with a cached method.
class Klass:
@cached_method("{}-test", timeout=99)
def meth(self, *args, **kwags):
return "result"
cls.Klass = Klass
# A dummy object that will be used as input to the cached method
class Obj:
id = 5
cls.obj = Obj()
def test_method_uncached(self):
"""When the method has not been cached, it should set the cache."""
with patch("utils.decorators.cache") as mock_cache:
mock_cache.get.return_value = None # Ensure first call returns None
k = self.Klass()
k.meth(self.obj) # 1st call, cache should get set
# check for cache calls.
mock_cache.assert_has_calls([
call.get("5-test"),
call.set("5-test", "result", timeout=99),
])
def test_method_cached(self):
with patch("utils.decorators.cache") as mock_cache:
mock_cache.get.return_value = "result" # later calls return a value
k = self.Klass()
k.meth(self.obj) # 2nd call, returns cached result
mock_cache.assert_has_calls([
call.get("5-test")
])
| tndatacommons/tndata_backend | tndata_backend/utils/tests/test_decorators.py | Python | mit | 1,519 |
#!/usr/bin/env /usr/bin/python3
import os,yaml
datamap = {}
conf_p = os.getcwd()+"/"+"config.yaml"
if os.path.isfile(conf_p):
conf = open(conf_p)
datamap = yaml.safe_load(conf)
conf.close()
else:
print(("Configure file not found at %s") % conf_p )
exit(0)
def get_plgconf(mod_name):
return datamap["plugins"][mod_name]
| comword/xmppbot | config.py | Python | gpl-3.0 | 329 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from typing import TYPE_CHECKING, Optional, Sequence
from airflow.models import BaseOperator
from airflow.providers.microsoft.azure.hooks.wasb import WasbHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class LocalFilesystemToWasbOperator(BaseOperator):
"""
Uploads a file to Azure Blob Storage.
:param file_path: Path to the file to load. (templated)
:type file_path: str
:param container_name: Name of the container. (templated)
:type container_name: str
:param blob_name: Name of the blob. (templated)
:type blob_name: str
:param wasb_conn_id: Reference to the wasb connection.
:type wasb_conn_id: str
:param load_options: Optional keyword arguments that
`WasbHook.load_file()` takes.
:type load_options: Optional[dict]
"""
template_fields: Sequence[str] = ('file_path', 'container_name', 'blob_name')
def __init__(
self,
*,
file_path: str,
container_name: str,
blob_name: str,
wasb_conn_id: str = 'wasb_default',
load_options: Optional[dict] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
if load_options is None:
load_options = {}
self.file_path = file_path
self.container_name = container_name
self.blob_name = blob_name
self.wasb_conn_id = wasb_conn_id
self.load_options = load_options
def execute(self, context: "Context") -> None:
"""Upload a file to Azure Blob Storage."""
hook = WasbHook(wasb_conn_id=self.wasb_conn_id)
self.log.info(
'Uploading %s to wasb://%s as %s',
self.file_path,
self.container_name,
self.blob_name,
)
hook.load_file(self.file_path, self.container_name, self.blob_name, **self.load_options)
| mistercrunch/airflow | airflow/providers/microsoft/azure/transfers/local_to_wasb.py | Python | apache-2.0 | 2,647 |
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c) 2015 Peter Sprygada, <psprygada@ansible.com>
# Copyright (c) 2017 Red Hat Inc.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from ansible.module_utils._text import to_text
from ansible.module_utils.basic import env_fallback, return_values
from ansible.module_utils.network_common import to_list, ComplexList
from ansible.module_utils.connection import exec_command
_DEVICE_CONFIGS = {}
iosxr_provider_spec = {
'host': dict(),
'port': dict(type='int'),
'username': dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME'])),
'password': dict(fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD']), no_log=True),
'ssh_keyfile': dict(fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE']), type='path'),
'timeout': dict(type='int'),
}
iosxr_argument_spec = {
'provider': dict(type='dict', options=iosxr_provider_spec)
}
iosxr_argument_spec.update(iosxr_provider_spec)
def get_argspec():
return iosxr_argument_spec
def check_args(module, warnings):
for key in iosxr_argument_spec:
if module._name == 'iosxr_user':
if key not in ['password', 'provider'] and module.params[key]:
warnings.append('argument %s has been deprecated and will be in a future version' % key)
else:
if key != 'provider' and module.params[key]:
warnings.append('argument %s has been deprecated and will be removed in a future version' % key)
def get_config(module, flags=[]):
cmd = 'show running-config '
cmd += ' '.join(flags)
cmd = cmd.strip()
try:
return _DEVICE_CONFIGS[cmd]
except KeyError:
rc, out, err = exec_command(module, cmd)
if rc != 0:
module.fail_json(msg='unable to retrieve current config', stderr=to_text(err, errors='surrogate_or_strict'))
cfg = to_text(out, errors='surrogate_or_strict').strip()
_DEVICE_CONFIGS[cmd] = cfg
return cfg
def to_commands(module, commands):
spec = {
'command': dict(key=True),
'prompt': dict(),
'answer': dict()
}
transform = ComplexList(spec, module)
return transform(commands)
def run_commands(module, commands, check_rc=True):
responses = list()
commands = to_commands(module, to_list(commands))
for cmd in to_list(commands):
cmd = module.jsonify(cmd)
rc, out, err = exec_command(module, cmd)
if check_rc and rc != 0:
module.fail_json(msg=to_text(err, errors='surrogate_or_strict'), rc=rc)
responses.append(to_text(out, errors='surrogate_or_strict'))
return responses
def load_config(module, commands, warnings, commit=False, replace=False, comment=None, admin=False):
cmd = 'configure terminal'
if admin:
cmd = 'admin ' + cmd
rc, out, err = exec_command(module, cmd)
if rc != 0:
module.fail_json(msg='unable to enter configuration mode', err=to_text(err, errors='surrogate_or_strict'))
failed = False
for command in to_list(commands):
if command == 'end':
continue
rc, out, err = exec_command(module, command)
if rc != 0:
failed = True
break
if failed:
exec_command(module, 'abort')
module.fail_json(msg=to_text(err, errors='surrogate_or_strict'), commands=commands, rc=rc)
rc, diff, err = exec_command(module, 'show commit changes diff')
if rc != 0:
# If we failed, maybe we are in an old version so
# we run show configuration instead
rc, diff, err = exec_command(module, 'show configuration')
if module._diff:
warnings.append('device platform does not support config diff')
if commit:
cmd = 'commit'
if comment:
cmd += ' comment {0}'.format(comment)
else:
cmd = 'abort'
rc, out, err = exec_command(module, cmd)
if rc != 0:
exec_command(module, 'abort')
module.fail_json(msg=err, commands=commands, rc=rc)
return to_text(diff, errors='surrogate_or_strict')
| e-gob/plataforma-kioscos-autoatencion | scripts/ansible-play/.venv/lib/python2.7/site-packages/ansible/module_utils/iosxr.py | Python | bsd-3-clause | 5,611 |
# Copyright (c) 2014-2019 The Khronos Group Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and/or associated documentation files (the "Materials"),
# to deal in the Materials without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Materials, and to permit persons to whom the
# Materials are furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Materials.
#
# MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS
# STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND
# HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/
#
# THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS
# IN THE MATERIALS.
# This header is automatically generated by the same tool that creates
# the Binary Section of the SPIR-V specification.
# Enumeration tokens for SPIR-V, in various styles:
# C, C++, C++11, JSON, Lua, Python, C#, D
#
# - C will have tokens with a "Spv" prefix, e.g.: SpvSourceLanguageGLSL
# - C++ will have tokens in the "spv" name space, e.g.: spv::SourceLanguageGLSL
# - C++11 will use enum classes in the spv namespace, e.g.: spv::SourceLanguage::GLSL
# - Lua will use tables, e.g.: spv.SourceLanguage.GLSL
# - Python will use dictionaries, e.g.: spv['SourceLanguage']['GLSL']
# - C# will use enum classes in the Specification class located in the "Spv" namespace,
# e.g.: Spv.Specification.SourceLanguage.GLSL
# - D will have tokens under the "spv" module, e.g: spv.SourceLanguage.GLSL
#
# Some tokens act like mask values, which can be OR'd together,
# while others are mutually exclusive. The mask-like ones have
# "Mask" in their name, and a parallel enum that has the shift
# amount (1 << x) for each corresponding enumerant.
spv = {
'MagicNumber' : 0x07230203,
'Version' : 0x00010300,
'Revision' : 7,
'OpCodeMask' : 0xffff,
'WordCountShift' : 16,
'SourceLanguage' : {
'Unknown' : 0,
'ESSL' : 1,
'GLSL' : 2,
'OpenCL_C' : 3,
'OpenCL_CPP' : 4,
'HLSL' : 5,
},
'ExecutionModel' : {
'Vertex' : 0,
'TessellationControl' : 1,
'TessellationEvaluation' : 2,
'Geometry' : 3,
'Fragment' : 4,
'GLCompute' : 5,
'Kernel' : 6,
'TaskNV' : 5267,
'MeshNV' : 5268,
'RayGenerationNV' : 5313,
'IntersectionNV' : 5314,
'AnyHitNV' : 5315,
'ClosestHitNV' : 5316,
'MissNV' : 5317,
'CallableNV' : 5318,
},
'AddressingModel' : {
'Logical' : 0,
'Physical32' : 1,
'Physical64' : 2,
'PhysicalStorageBuffer64EXT' : 5348,
},
'MemoryModel' : {
'Simple' : 0,
'GLSL450' : 1,
'OpenCL' : 2,
'VulkanKHR' : 3,
},
'ExecutionMode' : {
'Invocations' : 0,
'SpacingEqual' : 1,
'SpacingFractionalEven' : 2,
'SpacingFractionalOdd' : 3,
'VertexOrderCw' : 4,
'VertexOrderCcw' : 5,
'PixelCenterInteger' : 6,
'OriginUpperLeft' : 7,
'OriginLowerLeft' : 8,
'EarlyFragmentTests' : 9,
'PointMode' : 10,
'Xfb' : 11,
'DepthReplacing' : 12,
'DepthGreater' : 14,
'DepthLess' : 15,
'DepthUnchanged' : 16,
'LocalSize' : 17,
'LocalSizeHint' : 18,
'InputPoints' : 19,
'InputLines' : 20,
'InputLinesAdjacency' : 21,
'Triangles' : 22,
'InputTrianglesAdjacency' : 23,
'Quads' : 24,
'Isolines' : 25,
'OutputVertices' : 26,
'OutputPoints' : 27,
'OutputLineStrip' : 28,
'OutputTriangleStrip' : 29,
'VecTypeHint' : 30,
'ContractionOff' : 31,
'Initializer' : 33,
'Finalizer' : 34,
'SubgroupSize' : 35,
'SubgroupsPerWorkgroup' : 36,
'SubgroupsPerWorkgroupId' : 37,
'LocalSizeId' : 38,
'LocalSizeHintId' : 39,
'PostDepthCoverage' : 4446,
'DenormPreserve' : 4459,
'DenormFlushToZero' : 4460,
'SignedZeroInfNanPreserve' : 4461,
'RoundingModeRTE' : 4462,
'RoundingModeRTZ' : 4463,
'StencilRefReplacingEXT' : 5027,
'OutputLinesNV' : 5269,
'OutputPrimitivesNV' : 5270,
'DerivativeGroupQuadsNV' : 5289,
'DerivativeGroupLinearNV' : 5290,
'OutputTrianglesNV' : 5298,
},
'StorageClass' : {
'UniformConstant' : 0,
'Input' : 1,
'Uniform' : 2,
'Output' : 3,
'Workgroup' : 4,
'CrossWorkgroup' : 5,
'Private' : 6,
'Function' : 7,
'Generic' : 8,
'PushConstant' : 9,
'AtomicCounter' : 10,
'Image' : 11,
'StorageBuffer' : 12,
'CallableDataNV' : 5328,
'IncomingCallableDataNV' : 5329,
'RayPayloadNV' : 5338,
'HitAttributeNV' : 5339,
'IncomingRayPayloadNV' : 5342,
'ShaderRecordBufferNV' : 5343,
'PhysicalStorageBufferEXT' : 5349,
},
'Dim' : {
'Dim1D' : 0,
'Dim2D' : 1,
'Dim3D' : 2,
'Cube' : 3,
'Rect' : 4,
'Buffer' : 5,
'SubpassData' : 6,
},
'SamplerAddressingMode' : {
'None' : 0,
'ClampToEdge' : 1,
'Clamp' : 2,
'Repeat' : 3,
'RepeatMirrored' : 4,
},
'SamplerFilterMode' : {
'Nearest' : 0,
'Linear' : 1,
},
'ImageFormat' : {
'Unknown' : 0,
'Rgba32f' : 1,
'Rgba16f' : 2,
'R32f' : 3,
'Rgba8' : 4,
'Rgba8Snorm' : 5,
'Rg32f' : 6,
'Rg16f' : 7,
'R11fG11fB10f' : 8,
'R16f' : 9,
'Rgba16' : 10,
'Rgb10A2' : 11,
'Rg16' : 12,
'Rg8' : 13,
'R16' : 14,
'R8' : 15,
'Rgba16Snorm' : 16,
'Rg16Snorm' : 17,
'Rg8Snorm' : 18,
'R16Snorm' : 19,
'R8Snorm' : 20,
'Rgba32i' : 21,
'Rgba16i' : 22,
'Rgba8i' : 23,
'R32i' : 24,
'Rg32i' : 25,
'Rg16i' : 26,
'Rg8i' : 27,
'R16i' : 28,
'R8i' : 29,
'Rgba32ui' : 30,
'Rgba16ui' : 31,
'Rgba8ui' : 32,
'R32ui' : 33,
'Rgb10a2ui' : 34,
'Rg32ui' : 35,
'Rg16ui' : 36,
'Rg8ui' : 37,
'R16ui' : 38,
'R8ui' : 39,
},
'ImageChannelOrder' : {
'R' : 0,
'A' : 1,
'RG' : 2,
'RA' : 3,
'RGB' : 4,
'RGBA' : 5,
'BGRA' : 6,
'ARGB' : 7,
'Intensity' : 8,
'Luminance' : 9,
'Rx' : 10,
'RGx' : 11,
'RGBx' : 12,
'Depth' : 13,
'DepthStencil' : 14,
'sRGB' : 15,
'sRGBx' : 16,
'sRGBA' : 17,
'sBGRA' : 18,
'ABGR' : 19,
},
'ImageChannelDataType' : {
'SnormInt8' : 0,
'SnormInt16' : 1,
'UnormInt8' : 2,
'UnormInt16' : 3,
'UnormShort565' : 4,
'UnormShort555' : 5,
'UnormInt101010' : 6,
'SignedInt8' : 7,
'SignedInt16' : 8,
'SignedInt32' : 9,
'UnsignedInt8' : 10,
'UnsignedInt16' : 11,
'UnsignedInt32' : 12,
'HalfFloat' : 13,
'Float' : 14,
'UnormInt24' : 15,
'UnormInt101010_2' : 16,
},
'ImageOperandsShift' : {
'Bias' : 0,
'Lod' : 1,
'Grad' : 2,
'ConstOffset' : 3,
'Offset' : 4,
'ConstOffsets' : 5,
'Sample' : 6,
'MinLod' : 7,
'MakeTexelAvailableKHR' : 8,
'MakeTexelVisibleKHR' : 9,
'NonPrivateTexelKHR' : 10,
'VolatileTexelKHR' : 11,
},
'ImageOperandsMask' : {
'MaskNone' : 0,
'Bias' : 0x00000001,
'Lod' : 0x00000002,
'Grad' : 0x00000004,
'ConstOffset' : 0x00000008,
'Offset' : 0x00000010,
'ConstOffsets' : 0x00000020,
'Sample' : 0x00000040,
'MinLod' : 0x00000080,
'MakeTexelAvailableKHR' : 0x00000100,
'MakeTexelVisibleKHR' : 0x00000200,
'NonPrivateTexelKHR' : 0x00000400,
'VolatileTexelKHR' : 0x00000800,
},
'FPFastMathModeShift' : {
'NotNaN' : 0,
'NotInf' : 1,
'NSZ' : 2,
'AllowRecip' : 3,
'Fast' : 4,
},
'FPFastMathModeMask' : {
'MaskNone' : 0,
'NotNaN' : 0x00000001,
'NotInf' : 0x00000002,
'NSZ' : 0x00000004,
'AllowRecip' : 0x00000008,
'Fast' : 0x00000010,
},
'FPRoundingMode' : {
'RTE' : 0,
'RTZ' : 1,
'RTP' : 2,
'RTN' : 3,
},
'LinkageType' : {
'Export' : 0,
'Import' : 1,
},
'AccessQualifier' : {
'ReadOnly' : 0,
'WriteOnly' : 1,
'ReadWrite' : 2,
},
'FunctionParameterAttribute' : {
'Zext' : 0,
'Sext' : 1,
'ByVal' : 2,
'Sret' : 3,
'NoAlias' : 4,
'NoCapture' : 5,
'NoWrite' : 6,
'NoReadWrite' : 7,
},
'Decoration' : {
'RelaxedPrecision' : 0,
'SpecId' : 1,
'Block' : 2,
'BufferBlock' : 3,
'RowMajor' : 4,
'ColMajor' : 5,
'ArrayStride' : 6,
'MatrixStride' : 7,
'GLSLShared' : 8,
'GLSLPacked' : 9,
'CPacked' : 10,
'BuiltIn' : 11,
'NoPerspective' : 13,
'Flat' : 14,
'Patch' : 15,
'Centroid' : 16,
'Sample' : 17,
'Invariant' : 18,
'Restrict' : 19,
'Aliased' : 20,
'Volatile' : 21,
'Constant' : 22,
'Coherent' : 23,
'NonWritable' : 24,
'NonReadable' : 25,
'Uniform' : 26,
'SaturatedConversion' : 28,
'Stream' : 29,
'Location' : 30,
'Component' : 31,
'Index' : 32,
'Binding' : 33,
'DescriptorSet' : 34,
'Offset' : 35,
'XfbBuffer' : 36,
'XfbStride' : 37,
'FuncParamAttr' : 38,
'FPRoundingMode' : 39,
'FPFastMathMode' : 40,
'LinkageAttributes' : 41,
'NoContraction' : 42,
'InputAttachmentIndex' : 43,
'Alignment' : 44,
'MaxByteOffset' : 45,
'AlignmentId' : 46,
'MaxByteOffsetId' : 47,
'NoSignedWrap' : 4469,
'NoUnsignedWrap' : 4470,
'ExplicitInterpAMD' : 4999,
'OverrideCoverageNV' : 5248,
'PassthroughNV' : 5250,
'ViewportRelativeNV' : 5252,
'SecondaryViewportRelativeNV' : 5256,
'PerPrimitiveNV' : 5271,
'PerViewNV' : 5272,
'PerTaskNV' : 5273,
'PerVertexNV' : 5285,
'NonUniformEXT' : 5300,
'RestrictPointerEXT' : 5355,
'AliasedPointerEXT' : 5356,
'HlslCounterBufferGOOGLE' : 5634,
'HlslSemanticGOOGLE' : 5635,
},
'BuiltIn' : {
'Position' : 0,
'PointSize' : 1,
'ClipDistance' : 3,
'CullDistance' : 4,
'VertexId' : 5,
'InstanceId' : 6,
'PrimitiveId' : 7,
'InvocationId' : 8,
'Layer' : 9,
'ViewportIndex' : 10,
'TessLevelOuter' : 11,
'TessLevelInner' : 12,
'TessCoord' : 13,
'PatchVertices' : 14,
'FragCoord' : 15,
'PointCoord' : 16,
'FrontFacing' : 17,
'SampleId' : 18,
'SamplePosition' : 19,
'SampleMask' : 20,
'FragDepth' : 22,
'HelperInvocation' : 23,
'NumWorkgroups' : 24,
'WorkgroupSize' : 25,
'WorkgroupId' : 26,
'LocalInvocationId' : 27,
'GlobalInvocationId' : 28,
'LocalInvocationIndex' : 29,
'WorkDim' : 30,
'GlobalSize' : 31,
'EnqueuedWorkgroupSize' : 32,
'GlobalOffset' : 33,
'GlobalLinearId' : 34,
'SubgroupSize' : 36,
'SubgroupMaxSize' : 37,
'NumSubgroups' : 38,
'NumEnqueuedSubgroups' : 39,
'SubgroupId' : 40,
'SubgroupLocalInvocationId' : 41,
'VertexIndex' : 42,
'InstanceIndex' : 43,
'SubgroupEqMask' : 4416,
'SubgroupEqMaskKHR' : 4416,
'SubgroupGeMask' : 4417,
'SubgroupGeMaskKHR' : 4417,
'SubgroupGtMask' : 4418,
'SubgroupGtMaskKHR' : 4418,
'SubgroupLeMask' : 4419,
'SubgroupLeMaskKHR' : 4419,
'SubgroupLtMask' : 4420,
'SubgroupLtMaskKHR' : 4420,
'BaseVertex' : 4424,
'BaseInstance' : 4425,
'DrawIndex' : 4426,
'DeviceIndex' : 4438,
'ViewIndex' : 4440,
'BaryCoordNoPerspAMD' : 4992,
'BaryCoordNoPerspCentroidAMD' : 4993,
'BaryCoordNoPerspSampleAMD' : 4994,
'BaryCoordSmoothAMD' : 4995,
'BaryCoordSmoothCentroidAMD' : 4996,
'BaryCoordSmoothSampleAMD' : 4997,
'BaryCoordPullModelAMD' : 4998,
'FragStencilRefEXT' : 5014,
'ViewportMaskNV' : 5253,
'SecondaryPositionNV' : 5257,
'SecondaryViewportMaskNV' : 5258,
'PositionPerViewNV' : 5261,
'ViewportMaskPerViewNV' : 5262,
'FullyCoveredEXT' : 5264,
'TaskCountNV' : 5274,
'PrimitiveCountNV' : 5275,
'PrimitiveIndicesNV' : 5276,
'ClipDistancePerViewNV' : 5277,
'CullDistancePerViewNV' : 5278,
'LayerPerViewNV' : 5279,
'MeshViewCountNV' : 5280,
'MeshViewIndicesNV' : 5281,
'BaryCoordNV' : 5286,
'BaryCoordNoPerspNV' : 5287,
'FragSizeEXT' : 5292,
'FragmentSizeNV' : 5292,
'FragInvocationCountEXT' : 5293,
'InvocationsPerPixelNV' : 5293,
'LaunchIdNV' : 5319,
'LaunchSizeNV' : 5320,
'WorldRayOriginNV' : 5321,
'WorldRayDirectionNV' : 5322,
'ObjectRayOriginNV' : 5323,
'ObjectRayDirectionNV' : 5324,
'RayTminNV' : 5325,
'RayTmaxNV' : 5326,
'InstanceCustomIndexNV' : 5327,
'ObjectToWorldNV' : 5330,
'WorldToObjectNV' : 5331,
'HitTNV' : 5332,
'HitKindNV' : 5333,
'IncomingRayFlagsNV' : 5351,
},
'SelectionControlShift' : {
'Flatten' : 0,
'DontFlatten' : 1,
},
'SelectionControlMask' : {
'MaskNone' : 0,
'Flatten' : 0x00000001,
'DontFlatten' : 0x00000002,
},
'LoopControlShift' : {
'Unroll' : 0,
'DontUnroll' : 1,
'DependencyInfinite' : 2,
'DependencyLength' : 3,
},
'LoopControlMask' : {
'MaskNone' : 0,
'Unroll' : 0x00000001,
'DontUnroll' : 0x00000002,
'DependencyInfinite' : 0x00000004,
'DependencyLength' : 0x00000008,
},
'FunctionControlShift' : {
'Inline' : 0,
'DontInline' : 1,
'Pure' : 2,
'Const' : 3,
},
'FunctionControlMask' : {
'MaskNone' : 0,
'Inline' : 0x00000001,
'DontInline' : 0x00000002,
'Pure' : 0x00000004,
'Const' : 0x00000008,
},
'MemorySemanticsShift' : {
'Acquire' : 1,
'Release' : 2,
'AcquireRelease' : 3,
'SequentiallyConsistent' : 4,
'UniformMemory' : 6,
'SubgroupMemory' : 7,
'WorkgroupMemory' : 8,
'CrossWorkgroupMemory' : 9,
'AtomicCounterMemory' : 10,
'ImageMemory' : 11,
'OutputMemoryKHR' : 12,
'MakeAvailableKHR' : 13,
'MakeVisibleKHR' : 14,
},
'MemorySemanticsMask' : {
'MaskNone' : 0,
'Acquire' : 0x00000002,
'Release' : 0x00000004,
'AcquireRelease' : 0x00000008,
'SequentiallyConsistent' : 0x00000010,
'UniformMemory' : 0x00000040,
'SubgroupMemory' : 0x00000080,
'WorkgroupMemory' : 0x00000100,
'CrossWorkgroupMemory' : 0x00000200,
'AtomicCounterMemory' : 0x00000400,
'ImageMemory' : 0x00000800,
'OutputMemoryKHR' : 0x00001000,
'MakeAvailableKHR' : 0x00002000,
'MakeVisibleKHR' : 0x00004000,
},
'MemoryAccessShift' : {
'Volatile' : 0,
'Aligned' : 1,
'Nontemporal' : 2,
'MakePointerAvailableKHR' : 3,
'MakePointerVisibleKHR' : 4,
'NonPrivatePointerKHR' : 5,
},
'MemoryAccessMask' : {
'MaskNone' : 0,
'Volatile' : 0x00000001,
'Aligned' : 0x00000002,
'Nontemporal' : 0x00000004,
'MakePointerAvailableKHR' : 0x00000008,
'MakePointerVisibleKHR' : 0x00000010,
'NonPrivatePointerKHR' : 0x00000020,
},
'Scope' : {
'CrossDevice' : 0,
'Device' : 1,
'Workgroup' : 2,
'Subgroup' : 3,
'Invocation' : 4,
'QueueFamilyKHR' : 5,
},
'GroupOperation' : {
'Reduce' : 0,
'InclusiveScan' : 1,
'ExclusiveScan' : 2,
'ClusteredReduce' : 3,
'PartitionedReduceNV' : 6,
'PartitionedInclusiveScanNV' : 7,
'PartitionedExclusiveScanNV' : 8,
},
'KernelEnqueueFlags' : {
'NoWait' : 0,
'WaitKernel' : 1,
'WaitWorkGroup' : 2,
},
'KernelProfilingInfoShift' : {
'CmdExecTime' : 0,
},
'KernelProfilingInfoMask' : {
'MaskNone' : 0,
'CmdExecTime' : 0x00000001,
},
'Capability' : {
'Matrix' : 0,
'Shader' : 1,
'Geometry' : 2,
'Tessellation' : 3,
'Addresses' : 4,
'Linkage' : 5,
'Kernel' : 6,
'Vector16' : 7,
'Float16Buffer' : 8,
'Float16' : 9,
'Float64' : 10,
'Int64' : 11,
'Int64Atomics' : 12,
'ImageBasic' : 13,
'ImageReadWrite' : 14,
'ImageMipmap' : 15,
'Pipes' : 17,
'Groups' : 18,
'DeviceEnqueue' : 19,
'LiteralSampler' : 20,
'AtomicStorage' : 21,
'Int16' : 22,
'TessellationPointSize' : 23,
'GeometryPointSize' : 24,
'ImageGatherExtended' : 25,
'StorageImageMultisample' : 27,
'UniformBufferArrayDynamicIndexing' : 28,
'SampledImageArrayDynamicIndexing' : 29,
'StorageBufferArrayDynamicIndexing' : 30,
'StorageImageArrayDynamicIndexing' : 31,
'ClipDistance' : 32,
'CullDistance' : 33,
'ImageCubeArray' : 34,
'SampleRateShading' : 35,
'ImageRect' : 36,
'SampledRect' : 37,
'GenericPointer' : 38,
'Int8' : 39,
'InputAttachment' : 40,
'SparseResidency' : 41,
'MinLod' : 42,
'Sampled1D' : 43,
'Image1D' : 44,
'SampledCubeArray' : 45,
'SampledBuffer' : 46,
'ImageBuffer' : 47,
'ImageMSArray' : 48,
'StorageImageExtendedFormats' : 49,
'ImageQuery' : 50,
'DerivativeControl' : 51,
'InterpolationFunction' : 52,
'TransformFeedback' : 53,
'GeometryStreams' : 54,
'StorageImageReadWithoutFormat' : 55,
'StorageImageWriteWithoutFormat' : 56,
'MultiViewport' : 57,
'SubgroupDispatch' : 58,
'NamedBarrier' : 59,
'PipeStorage' : 60,
'GroupNonUniform' : 61,
'GroupNonUniformVote' : 62,
'GroupNonUniformArithmetic' : 63,
'GroupNonUniformBallot' : 64,
'GroupNonUniformShuffle' : 65,
'GroupNonUniformShuffleRelative' : 66,
'GroupNonUniformClustered' : 67,
'GroupNonUniformQuad' : 68,
'SubgroupBallotKHR' : 4423,
'DrawParameters' : 4427,
'SubgroupVoteKHR' : 4431,
'StorageBuffer16BitAccess' : 4433,
'StorageUniformBufferBlock16' : 4433,
'StorageUniform16' : 4434,
'UniformAndStorageBuffer16BitAccess' : 4434,
'StoragePushConstant16' : 4435,
'StorageInputOutput16' : 4436,
'DeviceGroup' : 4437,
'MultiView' : 4439,
'VariablePointersStorageBuffer' : 4441,
'VariablePointers' : 4442,
'AtomicStorageOps' : 4445,
'SampleMaskPostDepthCoverage' : 4447,
'StorageBuffer8BitAccess' : 4448,
'UniformAndStorageBuffer8BitAccess' : 4449,
'StoragePushConstant8' : 4450,
'DenormPreserve' : 4464,
'DenormFlushToZero' : 4465,
'SignedZeroInfNanPreserve' : 4466,
'RoundingModeRTE' : 4467,
'RoundingModeRTZ' : 4468,
'Float16ImageAMD' : 5008,
'ImageGatherBiasLodAMD' : 5009,
'FragmentMaskAMD' : 5010,
'StencilExportEXT' : 5013,
'ImageReadWriteLodAMD' : 5015,
'SampleMaskOverrideCoverageNV' : 5249,
'GeometryShaderPassthroughNV' : 5251,
'ShaderViewportIndexLayerEXT' : 5254,
'ShaderViewportIndexLayerNV' : 5254,
'ShaderViewportMaskNV' : 5255,
'ShaderStereoViewNV' : 5259,
'PerViewAttributesNV' : 5260,
'FragmentFullyCoveredEXT' : 5265,
'MeshShadingNV' : 5266,
'ImageFootprintNV' : 5282,
'FragmentBarycentricNV' : 5284,
'ComputeDerivativeGroupQuadsNV' : 5288,
'FragmentDensityEXT' : 5291,
'ShadingRateNV' : 5291,
'GroupNonUniformPartitionedNV' : 5297,
'ShaderNonUniformEXT' : 5301,
'RuntimeDescriptorArrayEXT' : 5302,
'InputAttachmentArrayDynamicIndexingEXT' : 5303,
'UniformTexelBufferArrayDynamicIndexingEXT' : 5304,
'StorageTexelBufferArrayDynamicIndexingEXT' : 5305,
'UniformBufferArrayNonUniformIndexingEXT' : 5306,
'SampledImageArrayNonUniformIndexingEXT' : 5307,
'StorageBufferArrayNonUniformIndexingEXT' : 5308,
'StorageImageArrayNonUniformIndexingEXT' : 5309,
'InputAttachmentArrayNonUniformIndexingEXT' : 5310,
'UniformTexelBufferArrayNonUniformIndexingEXT' : 5311,
'StorageTexelBufferArrayNonUniformIndexingEXT' : 5312,
'RayTracingNV' : 5340,
'VulkanMemoryModelKHR' : 5345,
'VulkanMemoryModelDeviceScopeKHR' : 5346,
'PhysicalStorageBufferAddressesEXT' : 5347,
'ComputeDerivativeGroupLinearNV' : 5350,
'CooperativeMatrixNV' : 5357,
'SubgroupShuffleINTEL' : 5568,
'SubgroupBufferBlockIOINTEL' : 5569,
'SubgroupImageBlockIOINTEL' : 5570,
'SubgroupImageMediaBlockIOINTEL' : 5579,
'SubgroupAvcMotionEstimationINTEL' : 5696,
'SubgroupAvcMotionEstimationIntraINTEL' : 5697,
'SubgroupAvcMotionEstimationChromaINTEL' : 5698,
},
'Op' : {
'OpNop' : 0,
'OpUndef' : 1,
'OpSourceContinued' : 2,
'OpSource' : 3,
'OpSourceExtension' : 4,
'OpName' : 5,
'OpMemberName' : 6,
'OpString' : 7,
'OpLine' : 8,
'OpExtension' : 10,
'OpExtInstImport' : 11,
'OpExtInst' : 12,
'OpMemoryModel' : 14,
'OpEntryPoint' : 15,
'OpExecutionMode' : 16,
'OpCapability' : 17,
'OpTypeVoid' : 19,
'OpTypeBool' : 20,
'OpTypeInt' : 21,
'OpTypeFloat' : 22,
'OpTypeVector' : 23,
'OpTypeMatrix' : 24,
'OpTypeImage' : 25,
'OpTypeSampler' : 26,
'OpTypeSampledImage' : 27,
'OpTypeArray' : 28,
'OpTypeRuntimeArray' : 29,
'OpTypeStruct' : 30,
'OpTypeOpaque' : 31,
'OpTypePointer' : 32,
'OpTypeFunction' : 33,
'OpTypeEvent' : 34,
'OpTypeDeviceEvent' : 35,
'OpTypeReserveId' : 36,
'OpTypeQueue' : 37,
'OpTypePipe' : 38,
'OpTypeForwardPointer' : 39,
'OpConstantTrue' : 41,
'OpConstantFalse' : 42,
'OpConstant' : 43,
'OpConstantComposite' : 44,
'OpConstantSampler' : 45,
'OpConstantNull' : 46,
'OpSpecConstantTrue' : 48,
'OpSpecConstantFalse' : 49,
'OpSpecConstant' : 50,
'OpSpecConstantComposite' : 51,
'OpSpecConstantOp' : 52,
'OpFunction' : 54,
'OpFunctionParameter' : 55,
'OpFunctionEnd' : 56,
'OpFunctionCall' : 57,
'OpVariable' : 59,
'OpImageTexelPointer' : 60,
'OpLoad' : 61,
'OpStore' : 62,
'OpCopyMemory' : 63,
'OpCopyMemorySized' : 64,
'OpAccessChain' : 65,
'OpInBoundsAccessChain' : 66,
'OpPtrAccessChain' : 67,
'OpArrayLength' : 68,
'OpGenericPtrMemSemantics' : 69,
'OpInBoundsPtrAccessChain' : 70,
'OpDecorate' : 71,
'OpMemberDecorate' : 72,
'OpDecorationGroup' : 73,
'OpGroupDecorate' : 74,
'OpGroupMemberDecorate' : 75,
'OpVectorExtractDynamic' : 77,
'OpVectorInsertDynamic' : 78,
'OpVectorShuffle' : 79,
'OpCompositeConstruct' : 80,
'OpCompositeExtract' : 81,
'OpCompositeInsert' : 82,
'OpCopyObject' : 83,
'OpTranspose' : 84,
'OpSampledImage' : 86,
'OpImageSampleImplicitLod' : 87,
'OpImageSampleExplicitLod' : 88,
'OpImageSampleDrefImplicitLod' : 89,
'OpImageSampleDrefExplicitLod' : 90,
'OpImageSampleProjImplicitLod' : 91,
'OpImageSampleProjExplicitLod' : 92,
'OpImageSampleProjDrefImplicitLod' : 93,
'OpImageSampleProjDrefExplicitLod' : 94,
'OpImageFetch' : 95,
'OpImageGather' : 96,
'OpImageDrefGather' : 97,
'OpImageRead' : 98,
'OpImageWrite' : 99,
'OpImage' : 100,
'OpImageQueryFormat' : 101,
'OpImageQueryOrder' : 102,
'OpImageQuerySizeLod' : 103,
'OpImageQuerySize' : 104,
'OpImageQueryLod' : 105,
'OpImageQueryLevels' : 106,
'OpImageQuerySamples' : 107,
'OpConvertFToU' : 109,
'OpConvertFToS' : 110,
'OpConvertSToF' : 111,
'OpConvertUToF' : 112,
'OpUConvert' : 113,
'OpSConvert' : 114,
'OpFConvert' : 115,
'OpQuantizeToF16' : 116,
'OpConvertPtrToU' : 117,
'OpSatConvertSToU' : 118,
'OpSatConvertUToS' : 119,
'OpConvertUToPtr' : 120,
'OpPtrCastToGeneric' : 121,
'OpGenericCastToPtr' : 122,
'OpGenericCastToPtrExplicit' : 123,
'OpBitcast' : 124,
'OpSNegate' : 126,
'OpFNegate' : 127,
'OpIAdd' : 128,
'OpFAdd' : 129,
'OpISub' : 130,
'OpFSub' : 131,
'OpIMul' : 132,
'OpFMul' : 133,
'OpUDiv' : 134,
'OpSDiv' : 135,
'OpFDiv' : 136,
'OpUMod' : 137,
'OpSRem' : 138,
'OpSMod' : 139,
'OpFRem' : 140,
'OpFMod' : 141,
'OpVectorTimesScalar' : 142,
'OpMatrixTimesScalar' : 143,
'OpVectorTimesMatrix' : 144,
'OpMatrixTimesVector' : 145,
'OpMatrixTimesMatrix' : 146,
'OpOuterProduct' : 147,
'OpDot' : 148,
'OpIAddCarry' : 149,
'OpISubBorrow' : 150,
'OpUMulExtended' : 151,
'OpSMulExtended' : 152,
'OpAny' : 154,
'OpAll' : 155,
'OpIsNan' : 156,
'OpIsInf' : 157,
'OpIsFinite' : 158,
'OpIsNormal' : 159,
'OpSignBitSet' : 160,
'OpLessOrGreater' : 161,
'OpOrdered' : 162,
'OpUnordered' : 163,
'OpLogicalEqual' : 164,
'OpLogicalNotEqual' : 165,
'OpLogicalOr' : 166,
'OpLogicalAnd' : 167,
'OpLogicalNot' : 168,
'OpSelect' : 169,
'OpIEqual' : 170,
'OpINotEqual' : 171,
'OpUGreaterThan' : 172,
'OpSGreaterThan' : 173,
'OpUGreaterThanEqual' : 174,
'OpSGreaterThanEqual' : 175,
'OpULessThan' : 176,
'OpSLessThan' : 177,
'OpULessThanEqual' : 178,
'OpSLessThanEqual' : 179,
'OpFOrdEqual' : 180,
'OpFUnordEqual' : 181,
'OpFOrdNotEqual' : 182,
'OpFUnordNotEqual' : 183,
'OpFOrdLessThan' : 184,
'OpFUnordLessThan' : 185,
'OpFOrdGreaterThan' : 186,
'OpFUnordGreaterThan' : 187,
'OpFOrdLessThanEqual' : 188,
'OpFUnordLessThanEqual' : 189,
'OpFOrdGreaterThanEqual' : 190,
'OpFUnordGreaterThanEqual' : 191,
'OpShiftRightLogical' : 194,
'OpShiftRightArithmetic' : 195,
'OpShiftLeftLogical' : 196,
'OpBitwiseOr' : 197,
'OpBitwiseXor' : 198,
'OpBitwiseAnd' : 199,
'OpNot' : 200,
'OpBitFieldInsert' : 201,
'OpBitFieldSExtract' : 202,
'OpBitFieldUExtract' : 203,
'OpBitReverse' : 204,
'OpBitCount' : 205,
'OpDPdx' : 207,
'OpDPdy' : 208,
'OpFwidth' : 209,
'OpDPdxFine' : 210,
'OpDPdyFine' : 211,
'OpFwidthFine' : 212,
'OpDPdxCoarse' : 213,
'OpDPdyCoarse' : 214,
'OpFwidthCoarse' : 215,
'OpEmitVertex' : 218,
'OpEndPrimitive' : 219,
'OpEmitStreamVertex' : 220,
'OpEndStreamPrimitive' : 221,
'OpControlBarrier' : 224,
'OpMemoryBarrier' : 225,
'OpAtomicLoad' : 227,
'OpAtomicStore' : 228,
'OpAtomicExchange' : 229,
'OpAtomicCompareExchange' : 230,
'OpAtomicCompareExchangeWeak' : 231,
'OpAtomicIIncrement' : 232,
'OpAtomicIDecrement' : 233,
'OpAtomicIAdd' : 234,
'OpAtomicISub' : 235,
'OpAtomicSMin' : 236,
'OpAtomicUMin' : 237,
'OpAtomicSMax' : 238,
'OpAtomicUMax' : 239,
'OpAtomicAnd' : 240,
'OpAtomicOr' : 241,
'OpAtomicXor' : 242,
'OpPhi' : 245,
'OpLoopMerge' : 246,
'OpSelectionMerge' : 247,
'OpLabel' : 248,
'OpBranch' : 249,
'OpBranchConditional' : 250,
'OpSwitch' : 251,
'OpKill' : 252,
'OpReturn' : 253,
'OpReturnValue' : 254,
'OpUnreachable' : 255,
'OpLifetimeStart' : 256,
'OpLifetimeStop' : 257,
'OpGroupAsyncCopy' : 259,
'OpGroupWaitEvents' : 260,
'OpGroupAll' : 261,
'OpGroupAny' : 262,
'OpGroupBroadcast' : 263,
'OpGroupIAdd' : 264,
'OpGroupFAdd' : 265,
'OpGroupFMin' : 266,
'OpGroupUMin' : 267,
'OpGroupSMin' : 268,
'OpGroupFMax' : 269,
'OpGroupUMax' : 270,
'OpGroupSMax' : 271,
'OpReadPipe' : 274,
'OpWritePipe' : 275,
'OpReservedReadPipe' : 276,
'OpReservedWritePipe' : 277,
'OpReserveReadPipePackets' : 278,
'OpReserveWritePipePackets' : 279,
'OpCommitReadPipe' : 280,
'OpCommitWritePipe' : 281,
'OpIsValidReserveId' : 282,
'OpGetNumPipePackets' : 283,
'OpGetMaxPipePackets' : 284,
'OpGroupReserveReadPipePackets' : 285,
'OpGroupReserveWritePipePackets' : 286,
'OpGroupCommitReadPipe' : 287,
'OpGroupCommitWritePipe' : 288,
'OpEnqueueMarker' : 291,
'OpEnqueueKernel' : 292,
'OpGetKernelNDrangeSubGroupCount' : 293,
'OpGetKernelNDrangeMaxSubGroupSize' : 294,
'OpGetKernelWorkGroupSize' : 295,
'OpGetKernelPreferredWorkGroupSizeMultiple' : 296,
'OpRetainEvent' : 297,
'OpReleaseEvent' : 298,
'OpCreateUserEvent' : 299,
'OpIsValidEvent' : 300,
'OpSetUserEventStatus' : 301,
'OpCaptureEventProfilingInfo' : 302,
'OpGetDefaultQueue' : 303,
'OpBuildNDRange' : 304,
'OpImageSparseSampleImplicitLod' : 305,
'OpImageSparseSampleExplicitLod' : 306,
'OpImageSparseSampleDrefImplicitLod' : 307,
'OpImageSparseSampleDrefExplicitLod' : 308,
'OpImageSparseSampleProjImplicitLod' : 309,
'OpImageSparseSampleProjExplicitLod' : 310,
'OpImageSparseSampleProjDrefImplicitLod' : 311,
'OpImageSparseSampleProjDrefExplicitLod' : 312,
'OpImageSparseFetch' : 313,
'OpImageSparseGather' : 314,
'OpImageSparseDrefGather' : 315,
'OpImageSparseTexelsResident' : 316,
'OpNoLine' : 317,
'OpAtomicFlagTestAndSet' : 318,
'OpAtomicFlagClear' : 319,
'OpImageSparseRead' : 320,
'OpSizeOf' : 321,
'OpTypePipeStorage' : 322,
'OpConstantPipeStorage' : 323,
'OpCreatePipeFromPipeStorage' : 324,
'OpGetKernelLocalSizeForSubgroupCount' : 325,
'OpGetKernelMaxNumSubgroups' : 326,
'OpTypeNamedBarrier' : 327,
'OpNamedBarrierInitialize' : 328,
'OpMemoryNamedBarrier' : 329,
'OpModuleProcessed' : 330,
'OpExecutionModeId' : 331,
'OpDecorateId' : 332,
'OpGroupNonUniformElect' : 333,
'OpGroupNonUniformAll' : 334,
'OpGroupNonUniformAny' : 335,
'OpGroupNonUniformAllEqual' : 336,
'OpGroupNonUniformBroadcast' : 337,
'OpGroupNonUniformBroadcastFirst' : 338,
'OpGroupNonUniformBallot' : 339,
'OpGroupNonUniformInverseBallot' : 340,
'OpGroupNonUniformBallotBitExtract' : 341,
'OpGroupNonUniformBallotBitCount' : 342,
'OpGroupNonUniformBallotFindLSB' : 343,
'OpGroupNonUniformBallotFindMSB' : 344,
'OpGroupNonUniformShuffle' : 345,
'OpGroupNonUniformShuffleXor' : 346,
'OpGroupNonUniformShuffleUp' : 347,
'OpGroupNonUniformShuffleDown' : 348,
'OpGroupNonUniformIAdd' : 349,
'OpGroupNonUniformFAdd' : 350,
'OpGroupNonUniformIMul' : 351,
'OpGroupNonUniformFMul' : 352,
'OpGroupNonUniformSMin' : 353,
'OpGroupNonUniformUMin' : 354,
'OpGroupNonUniformFMin' : 355,
'OpGroupNonUniformSMax' : 356,
'OpGroupNonUniformUMax' : 357,
'OpGroupNonUniformFMax' : 358,
'OpGroupNonUniformBitwiseAnd' : 359,
'OpGroupNonUniformBitwiseOr' : 360,
'OpGroupNonUniformBitwiseXor' : 361,
'OpGroupNonUniformLogicalAnd' : 362,
'OpGroupNonUniformLogicalOr' : 363,
'OpGroupNonUniformLogicalXor' : 364,
'OpGroupNonUniformQuadBroadcast' : 365,
'OpGroupNonUniformQuadSwap' : 366,
'OpSubgroupBallotKHR' : 4421,
'OpSubgroupFirstInvocationKHR' : 4422,
'OpSubgroupAllKHR' : 4428,
'OpSubgroupAnyKHR' : 4429,
'OpSubgroupAllEqualKHR' : 4430,
'OpSubgroupReadInvocationKHR' : 4432,
'OpGroupIAddNonUniformAMD' : 5000,
'OpGroupFAddNonUniformAMD' : 5001,
'OpGroupFMinNonUniformAMD' : 5002,
'OpGroupUMinNonUniformAMD' : 5003,
'OpGroupSMinNonUniformAMD' : 5004,
'OpGroupFMaxNonUniformAMD' : 5005,
'OpGroupUMaxNonUniformAMD' : 5006,
'OpGroupSMaxNonUniformAMD' : 5007,
'OpFragmentMaskFetchAMD' : 5011,
'OpFragmentFetchAMD' : 5012,
'OpImageSampleFootprintNV' : 5283,
'OpGroupNonUniformPartitionNV' : 5296,
'OpWritePackedPrimitiveIndices4x8NV' : 5299,
'OpReportIntersectionNV' : 5334,
'OpIgnoreIntersectionNV' : 5335,
'OpTerminateRayNV' : 5336,
'OpTraceNV' : 5337,
'OpTypeAccelerationStructureNV' : 5341,
'OpExecuteCallableNV' : 5344,
'OpTypeCooperativeMatrixNV' : 5358,
'OpCooperativeMatrixLoadNV' : 5359,
'OpCooperativeMatrixStoreNV' : 5360,
'OpCooperativeMatrixMulAddNV' : 5361,
'OpCooperativeMatrixLengthNV' : 5362,
'OpSubgroupShuffleINTEL' : 5571,
'OpSubgroupShuffleDownINTEL' : 5572,
'OpSubgroupShuffleUpINTEL' : 5573,
'OpSubgroupShuffleXorINTEL' : 5574,
'OpSubgroupBlockReadINTEL' : 5575,
'OpSubgroupBlockWriteINTEL' : 5576,
'OpSubgroupImageBlockReadINTEL' : 5577,
'OpSubgroupImageBlockWriteINTEL' : 5578,
'OpSubgroupImageMediaBlockReadINTEL' : 5580,
'OpSubgroupImageMediaBlockWriteINTEL' : 5581,
'OpDecorateStringGOOGLE' : 5632,
'OpMemberDecorateStringGOOGLE' : 5633,
'OpVmeImageINTEL' : 5699,
'OpTypeVmeImageINTEL' : 5700,
'OpTypeAvcImePayloadINTEL' : 5701,
'OpTypeAvcRefPayloadINTEL' : 5702,
'OpTypeAvcSicPayloadINTEL' : 5703,
'OpTypeAvcMcePayloadINTEL' : 5704,
'OpTypeAvcMceResultINTEL' : 5705,
'OpTypeAvcImeResultINTEL' : 5706,
'OpTypeAvcImeResultSingleReferenceStreamoutINTEL' : 5707,
'OpTypeAvcImeResultDualReferenceStreamoutINTEL' : 5708,
'OpTypeAvcImeSingleReferenceStreaminINTEL' : 5709,
'OpTypeAvcImeDualReferenceStreaminINTEL' : 5710,
'OpTypeAvcRefResultINTEL' : 5711,
'OpTypeAvcSicResultINTEL' : 5712,
'OpSubgroupAvcMceGetDefaultInterBaseMultiReferencePenaltyINTEL' : 5713,
'OpSubgroupAvcMceSetInterBaseMultiReferencePenaltyINTEL' : 5714,
'OpSubgroupAvcMceGetDefaultInterShapePenaltyINTEL' : 5715,
'OpSubgroupAvcMceSetInterShapePenaltyINTEL' : 5716,
'OpSubgroupAvcMceGetDefaultInterDirectionPenaltyINTEL' : 5717,
'OpSubgroupAvcMceSetInterDirectionPenaltyINTEL' : 5718,
'OpSubgroupAvcMceGetDefaultIntraLumaShapePenaltyINTEL' : 5719,
'OpSubgroupAvcMceGetDefaultInterMotionVectorCostTableINTEL' : 5720,
'OpSubgroupAvcMceGetDefaultHighPenaltyCostTableINTEL' : 5721,
'OpSubgroupAvcMceGetDefaultMediumPenaltyCostTableINTEL' : 5722,
'OpSubgroupAvcMceGetDefaultLowPenaltyCostTableINTEL' : 5723,
'OpSubgroupAvcMceSetMotionVectorCostFunctionINTEL' : 5724,
'OpSubgroupAvcMceGetDefaultIntraLumaModePenaltyINTEL' : 5725,
'OpSubgroupAvcMceGetDefaultNonDcLumaIntraPenaltyINTEL' : 5726,
'OpSubgroupAvcMceGetDefaultIntraChromaModeBasePenaltyINTEL' : 5727,
'OpSubgroupAvcMceSetAcOnlyHaarINTEL' : 5728,
'OpSubgroupAvcMceSetSourceInterlacedFieldPolarityINTEL' : 5729,
'OpSubgroupAvcMceSetSingleReferenceInterlacedFieldPolarityINTEL' : 5730,
'OpSubgroupAvcMceSetDualReferenceInterlacedFieldPolaritiesINTEL' : 5731,
'OpSubgroupAvcMceConvertToImePayloadINTEL' : 5732,
'OpSubgroupAvcMceConvertToImeResultINTEL' : 5733,
'OpSubgroupAvcMceConvertToRefPayloadINTEL' : 5734,
'OpSubgroupAvcMceConvertToRefResultINTEL' : 5735,
'OpSubgroupAvcMceConvertToSicPayloadINTEL' : 5736,
'OpSubgroupAvcMceConvertToSicResultINTEL' : 5737,
'OpSubgroupAvcMceGetMotionVectorsINTEL' : 5738,
'OpSubgroupAvcMceGetInterDistortionsINTEL' : 5739,
'OpSubgroupAvcMceGetBestInterDistortionsINTEL' : 5740,
'OpSubgroupAvcMceGetInterMajorShapeINTEL' : 5741,
'OpSubgroupAvcMceGetInterMinorShapeINTEL' : 5742,
'OpSubgroupAvcMceGetInterDirectionsINTEL' : 5743,
'OpSubgroupAvcMceGetInterMotionVectorCountINTEL' : 5744,
'OpSubgroupAvcMceGetInterReferenceIdsINTEL' : 5745,
'OpSubgroupAvcMceGetInterReferenceInterlacedFieldPolaritiesINTEL' : 5746,
'OpSubgroupAvcImeInitializeINTEL' : 5747,
'OpSubgroupAvcImeSetSingleReferenceINTEL' : 5748,
'OpSubgroupAvcImeSetDualReferenceINTEL' : 5749,
'OpSubgroupAvcImeRefWindowSizeINTEL' : 5750,
'OpSubgroupAvcImeAdjustRefOffsetINTEL' : 5751,
'OpSubgroupAvcImeConvertToMcePayloadINTEL' : 5752,
'OpSubgroupAvcImeSetMaxMotionVectorCountINTEL' : 5753,
'OpSubgroupAvcImeSetUnidirectionalMixDisableINTEL' : 5754,
'OpSubgroupAvcImeSetEarlySearchTerminationThresholdINTEL' : 5755,
'OpSubgroupAvcImeSetWeightedSadINTEL' : 5756,
'OpSubgroupAvcImeEvaluateWithSingleReferenceINTEL' : 5757,
'OpSubgroupAvcImeEvaluateWithDualReferenceINTEL' : 5758,
'OpSubgroupAvcImeEvaluateWithSingleReferenceStreaminINTEL' : 5759,
'OpSubgroupAvcImeEvaluateWithDualReferenceStreaminINTEL' : 5760,
'OpSubgroupAvcImeEvaluateWithSingleReferenceStreamoutINTEL' : 5761,
'OpSubgroupAvcImeEvaluateWithDualReferenceStreamoutINTEL' : 5762,
'OpSubgroupAvcImeEvaluateWithSingleReferenceStreaminoutINTEL' : 5763,
'OpSubgroupAvcImeEvaluateWithDualReferenceStreaminoutINTEL' : 5764,
'OpSubgroupAvcImeConvertToMceResultINTEL' : 5765,
'OpSubgroupAvcImeGetSingleReferenceStreaminINTEL' : 5766,
'OpSubgroupAvcImeGetDualReferenceStreaminINTEL' : 5767,
'OpSubgroupAvcImeStripSingleReferenceStreamoutINTEL' : 5768,
'OpSubgroupAvcImeStripDualReferenceStreamoutINTEL' : 5769,
'OpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeMotionVectorsINTEL' : 5770,
'OpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeDistortionsINTEL' : 5771,
'OpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeReferenceIdsINTEL' : 5772,
'OpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeMotionVectorsINTEL' : 5773,
'OpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeDistortionsINTEL' : 5774,
'OpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeReferenceIdsINTEL' : 5775,
'OpSubgroupAvcImeGetBorderReachedINTEL' : 5776,
'OpSubgroupAvcImeGetTruncatedSearchIndicationINTEL' : 5777,
'OpSubgroupAvcImeGetUnidirectionalEarlySearchTerminationINTEL' : 5778,
'OpSubgroupAvcImeGetWeightingPatternMinimumMotionVectorINTEL' : 5779,
'OpSubgroupAvcImeGetWeightingPatternMinimumDistortionINTEL' : 5780,
'OpSubgroupAvcFmeInitializeINTEL' : 5781,
'OpSubgroupAvcBmeInitializeINTEL' : 5782,
'OpSubgroupAvcRefConvertToMcePayloadINTEL' : 5783,
'OpSubgroupAvcRefSetBidirectionalMixDisableINTEL' : 5784,
'OpSubgroupAvcRefSetBilinearFilterEnableINTEL' : 5785,
'OpSubgroupAvcRefEvaluateWithSingleReferenceINTEL' : 5786,
'OpSubgroupAvcRefEvaluateWithDualReferenceINTEL' : 5787,
'OpSubgroupAvcRefEvaluateWithMultiReferenceINTEL' : 5788,
'OpSubgroupAvcRefEvaluateWithMultiReferenceInterlacedINTEL' : 5789,
'OpSubgroupAvcRefConvertToMceResultINTEL' : 5790,
'OpSubgroupAvcSicInitializeINTEL' : 5791,
'OpSubgroupAvcSicConfigureSkcINTEL' : 5792,
'OpSubgroupAvcSicConfigureIpeLumaINTEL' : 5793,
'OpSubgroupAvcSicConfigureIpeLumaChromaINTEL' : 5794,
'OpSubgroupAvcSicGetMotionVectorMaskINTEL' : 5795,
'OpSubgroupAvcSicConvertToMcePayloadINTEL' : 5796,
'OpSubgroupAvcSicSetIntraLumaShapePenaltyINTEL' : 5797,
'OpSubgroupAvcSicSetIntraLumaModeCostFunctionINTEL' : 5798,
'OpSubgroupAvcSicSetIntraChromaModeCostFunctionINTEL' : 5799,
'OpSubgroupAvcSicSetBilinearFilterEnableINTEL' : 5800,
'OpSubgroupAvcSicSetSkcForwardTransformEnableINTEL' : 5801,
'OpSubgroupAvcSicSetBlockBasedRawSkipSadINTEL' : 5802,
'OpSubgroupAvcSicEvaluateIpeINTEL' : 5803,
'OpSubgroupAvcSicEvaluateWithSingleReferenceINTEL' : 5804,
'OpSubgroupAvcSicEvaluateWithDualReferenceINTEL' : 5805,
'OpSubgroupAvcSicEvaluateWithMultiReferenceINTEL' : 5806,
'OpSubgroupAvcSicEvaluateWithMultiReferenceInterlacedINTEL' : 5807,
'OpSubgroupAvcSicConvertToMceResultINTEL' : 5808,
'OpSubgroupAvcSicGetIpeLumaShapeINTEL' : 5809,
'OpSubgroupAvcSicGetBestIpeLumaDistortionINTEL' : 5810,
'OpSubgroupAvcSicGetBestIpeChromaDistortionINTEL' : 5811,
'OpSubgroupAvcSicGetPackedIpeLumaModesINTEL' : 5812,
'OpSubgroupAvcSicGetIpeChromaModeINTEL' : 5813,
'OpSubgroupAvcSicGetPackedSkcLumaCountThresholdINTEL' : 5814,
'OpSubgroupAvcSicGetPackedSkcLumaSumThresholdINTEL' : 5815,
'OpSubgroupAvcSicGetInterRawSadsINTEL' : 5816,
},
}
| attilaz/bgfx | 3rdparty/spirv-headers/include/spirv/unified1/spirv.py | Python | bsd-2-clause | 43,923 |
from __future__ import absolute_import, unicode_literals
# `None` and empty string aren't valid JSON but it's safer to include them as potential empty values.
EMPTY_SERIALIZED_JSON_VALUES = (None, '', '[]', '{}')
| gasman/wagtaildraftail | wagtaildraftail/validators.py | Python | mit | 214 |
import math
from datastructures.array import Array
from util import between
def recursive_matrix_chain(p, m, i, j):
if i == j:
return 0
m[i, j] = math.inf
for k in between(i, j - 1):
q = recursive_matrix_chain(p, m, i, k) + recursive_matrix_chain(p, m, k + 1, j) + p[i - 1] * p[k] * p[j]
if q < m[i, j]:
m[i, j] = q
return m[i, j]
def memoized_matrix_chain(p):
n = p.length - 1
m = Array([Array.indexed(1, n) for _ in between(1, n)])
for i in between(1, n):
for j in between(i, n):
m[i, j] = math.inf
return lookup_chain(p, m, 1, n)
def lookup_chain(p, m, i, j):
if m[i, j] < math.inf:
return m[i, j]
if i == j:
m[i, j] = 0
else:
for k in between(i, j - 1):
q = lookup_chain(p, m, i, k) + lookup_chain(p, m, k + 1, j) + p[i - 1] * p[k] * p[j]
if q < m[i, j]:
m[i, j] = q
return m[i, j]
| wojtask/CormenPy | src/chapter15/textbook15_3.py | Python | gpl-3.0 | 961 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Version 2 of class Optimizer."""
# pylint: disable=g-bad-name
import abc
import contextlib
import functools
import warnings
from tensorflow.python.distribute import central_storage_strategy
from tensorflow.python.distribute import distribution_strategy_context as distribute_ctx
from tensorflow.python.distribute import parameter_server_strategy
from tensorflow.python.distribute import parameter_server_strategy_v2
from tensorflow.python.distribute import values as ds_values
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras import backend
from tensorflow.python.keras import initializers
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.keras.optimizer_v2 import learning_rate_schedule
from tensorflow.python.keras.optimizer_v2 import utils as optimizer_utils
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.keras.utils import layer_utils
from tensorflow.python.keras.utils import tf_inspect
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_resource_variable_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.saved_model import revived_types
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import keras_export
_DEFAULT_VALID_DTYPES = frozenset([
dtypes.float16, dtypes.bfloat16, dtypes.float32, dtypes.float64,
dtypes.complex64, dtypes.complex128
])
def _deduplicate_indexed_slices(values, indices):
"""Sums `values` associated with any non-unique `indices`.
Args:
values: A `Tensor` with rank >= 1.
indices: A one-dimensional integer `Tensor`, indexing into the first
dimension of `values` (as in an IndexedSlices object).
Returns:
A tuple of (`summed_values`, `unique_indices`) where `unique_indices` is a
de-duplicated version of `indices` and `summed_values` contains the sum of
`values` slices associated with each unique index.
"""
unique_indices, new_index_positions = array_ops.unique(indices)
summed_values = math_ops.unsorted_segment_sum(
values, new_index_positions,
array_ops.shape(unique_indices)[0])
return (summed_values, unique_indices)
class NullContextmanager(object):
def __init__(self, *args, **kwargs):
pass
def __enter__(self):
pass
def __exit__(self, type_arg, value_arg, traceback_arg):
return False # False values do not suppress exceptions
def name_scope_only_in_function_or_graph(name):
"""Internal-only entry point for `name_scope*`.
Enters a compat.v1.name_scope only when in a function or graph,
not when running fully eagerly.
Args:
name: The name argument that is passed to the op function.
Returns:
`name_scope*` context manager.
"""
if not context.executing_eagerly():
return ops.name_scope_v1(name)
else:
return NullContextmanager()
@keras_export("keras.optimizers.Optimizer", metaclass=abc.ABCMeta)
class OptimizerV2(trackable.Trackable):
"""Base class for Keras optimizers.
You should not use this class directly, but instead instantiate one of its
subclasses such as `tf.keras.optimizers.SGD`, `tf.keras.optimizers.Adam`, etc.
### Usage
```python
# Create an optimizer with the desired parameters.
opt = tf.keras.optimizers.SGD(learning_rate=0.1)
# `loss` is a callable that takes no argument and returns the value
# to minimize.
loss = lambda: 3 * var1 * var1 + 2 * var2 * var2
# In graph mode, returns op that minimizes the loss by updating the listed
# variables.
opt_op = opt.minimize(loss, var_list=[var1, var2])
opt_op.run()
# In eager mode, simply call minimize to update the list of variables.
opt.minimize(loss, var_list=[var1, var2])
```
### Usage in custom training loops
In Keras models, sometimes variables are created when the model is first
called, instead of construction time. Examples include 1) sequential models
without input shape pre-defined, or 2) subclassed models. Pass var_list as
callable in these cases.
Example:
```python
opt = tf.keras.optimizers.SGD(learning_rate=0.1)
model = tf.keras.Sequential()
model.add(tf.keras.layers.Dense(num_hidden, activation='relu'))
model.add(tf.keras.layers.Dense(num_classes, activation='sigmoid'))
loss_fn = lambda: tf.keras.losses.mse(model(input), output)
var_list_fn = lambda: model.trainable_weights
for input, output in data:
opt.minimize(loss_fn, var_list_fn)
```
### Processing gradients before applying them
Calling `minimize()` takes care of both computing the gradients and
applying them to the variables. If you want to process the gradients
before applying them you can instead use the optimizer in three steps:
1. Compute the gradients with `tf.GradientTape`.
2. Process the gradients as you wish.
3. Apply the processed gradients with `apply_gradients()`.
Example:
```python
# Create an optimizer.
opt = tf.keras.optimizers.SGD(learning_rate=0.1)
# Compute the gradients for a list of variables.
with tf.GradientTape() as tape:
loss = <call_loss_function>
vars = <list_of_variables>
grads = tape.gradient(loss, vars)
# Process the gradients, for example cap them, etc.
# capped_grads = [MyCapper(g) for g in grads]
processed_grads = [process_gradient(g) for g in grads]
# Ask the optimizer to apply the processed gradients.
opt.apply_gradients(zip(processed_grads, var_list))
```
### Use with `tf.distribute.Strategy`
This optimizer class is `tf.distribute.Strategy` aware, which means it
automatically sums gradients across all replicas. To average gradients,
you divide your loss by the global batch size, which is done
automatically if you use `tf.keras` built-in training or evaluation loops.
See the `reduction` argument of your loss which should be set to
`tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE` for averaging or
`tf.keras.losses.Reduction.SUM` for not.
To aggregate gradients yourself, call `apply_gradients` with
`experimental_aggregate_gradients` set to False. This is useful if you need to
process aggregated gradients.
If you are not using these and you want to average gradients, you should use
`tf.math.reduce_sum` to add up your per-example losses and then divide by the
global batch size. Note that when using `tf.distribute.Strategy`, the first
component of a tensor's shape is the *replica-local* batch size, which is off
by a factor equal to the number of replicas being used to compute a single
step. As a result, using `tf.math.reduce_mean` will give the wrong answer,
resulting in gradients that can be many times too big.
### Variable Constraints
All Keras optimizers respect variable constraints. If constraint function is
passed to any variable, the constraint will be applied to the variable after
the gradient has been applied to the variable.
Important: If gradient is sparse tensor, variable constraint is not supported.
### Thread Compatibility
The entire optimizer is currently thread compatible, not thread-safe. The user
needs to perform synchronization if necessary.
### Slots
Many optimizer subclasses, such as `Adam` and `Adagrad` allocate and manage
additional variables associated with the variables to train. These are called
<i>Slots</i>. Slots have names and you can ask the optimizer for the names of
the slots that it uses. Once you have a slot name you can ask the optimizer
for the variable it created to hold the slot value.
This can be useful if you want to log debug a training algorithm, report stats
about the slots, etc.
### Hyperparameters
These are arguments passed to the optimizer subclass constructor
(the `__init__` method), and then passed to `self._set_hyper()`.
They can be either regular Python values (like 1.0), tensors, or
callables. If they are callable, the callable will be called during
`apply_gradients()` to get the value for the hyper parameter.
Hyperparameters can be overwritten through user code:
Example:
```python
# Create an optimizer with the desired parameters.
opt = tf.keras.optimizers.SGD(learning_rate=0.1)
# `loss` is a callable that takes no argument and returns the value
# to minimize.
loss = lambda: 3 * var1 + 2 * var2
# In eager mode, simply call minimize to update the list of variables.
opt.minimize(loss, var_list=[var1, var2])
# update learning rate
opt.learning_rate = 0.05
opt.minimize(loss, var_list=[var1, var2])
```
### Callable learning rate
Optimizer accepts a callable learning rate in two ways. The first way is
through built-in or customized
`tf.keras.optimizers.schedules.LearningRateSchedule`. The schedule will be
called on each iteration with `schedule(iteration)`, a `tf.Variable`
owned by the optimizer.
Example:
>>> var = tf.Variable(np.random.random(size=(1,)))
>>> learning_rate = tf.keras.optimizers.schedules.ExponentialDecay(
... initial_learning_rate=.01, decay_steps=20, decay_rate=.1)
>>> opt = tf.keras.optimizers.SGD(learning_rate=learning_rate)
>>> loss = lambda: 3 * var
>>> opt.minimize(loss, var_list=[var])
<tf.Variable...
The second way is through a callable function that
does not accept any arguments.
Example:
>>> var = tf.Variable(np.random.random(size=(1,)))
>>> def lr_callable():
... return .1
>>> opt = tf.keras.optimizers.SGD(learning_rate=lr_callable)
>>> loss = lambda: 3 * var
>>> opt.minimize(loss, var_list=[var])
<tf.Variable...
### Creating a custom optimizer
If you intend to create your own optimization algorithm, simply inherit from
this class and override the following methods:
- `_resource_apply_dense` (update variable given gradient tensor is a dense
`tf.Tensor`)
- `_resource_apply_sparse` (update variable given gradient tensor is a
sparse `tf.IndexedSlices`. The most common way for this to happen
is if you are taking the gradient through a `tf.gather`.)
- `_create_slots`
(if your optimizer algorithm requires additional variables)
- `get_config`
(serialization of the optimizer, include all hyper parameters)
"""
# Subclasses should set this to True unless they override `apply_gradients`
# with a version that does not have the `experimental_aggregate_gradients`
# argument. Older versions of Keras did not have this argument so custom
# optimizers may have overridden `apply_gradients` without the
# `experimental_aggregate_gradients` argument. Keras only passes
# `experimental_aggregate_gradients` if this attribute is True.
# Note: This attribute will likely be removed in an upcoming release.
_HAS_AGGREGATE_GRAD = False
def __init__(self,
name,
gradient_aggregator=None,
gradient_transformers=None,
**kwargs):
"""Create a new Optimizer.
This must be called by the constructors of subclasses.
Note that Optimizer instances should not bind to a single graph,
and so shouldn't keep Tensors as member variables. Generally
you should be able to use the _set_hyper()/state.get_hyper()
facility instead.
This class is stateful and thread-compatible.
Example of custom gradient transformations:
```python
def my_gradient_transformer(grads_and_vars):
# Simple example, double the gradients.
return [(2. * g, v) for g, v in grads_and_vars]
optimizer = tf.keras.optimizers.SGD(
1e-3, gradient_transformers=[my_gradient_transformer])
```
Args:
name: String. The name to use for momentum accumulator weights created
by the optimizer.
gradient_aggregator: The function to use to aggregate gradients across
devices (when using `tf.distribute.Strategy`). If `None`, defaults to
summing the gradients across devices. The function should accept and
return a list of `(gradient, variable)` tuples.
gradient_transformers: Optional. List of functions to use to transform
gradients before applying updates to Variables. The functions are
applied after `gradient_aggregator`. The functions should accept and
return a list of `(gradient, variable)` tuples.
**kwargs: keyword arguments. Allowed arguments are `clipvalue`,
`clipnorm`, `global_clipnorm`.
If `clipvalue` (float) is set, the gradient of each weight
is clipped to be no higher than this value.
If `clipnorm` (float) is set, the gradient of each weight
is individually clipped so that its norm is no higher than this value.
If `global_clipnorm` (float) is set the gradient of all weights is
clipped so that their global norm is no higher than this value.
Raises:
ValueError: in case of any invalid argument.
"""
allowed_kwargs = {"clipnorm", "clipvalue", "lr", "decay", "global_clipnorm"}
for k in kwargs:
if k not in allowed_kwargs:
raise TypeError("Unexpected keyword argument "
"passed to optimizer: " + str(k))
# checks that all keyword arguments are non-negative.
if kwargs[k] is not None and kwargs[k] < 0:
raise ValueError("Expected {} >= 0, received: {}".format(k, kwargs[k]))
if k == "lr":
warnings.warn(
"The `lr` argument is deprecated, use `learning_rate` instead.")
self._use_locking = True
self._init_set_name(name)
self._hyper = {}
# dict: {variable name : {slot name : variable}}
self._slots = {}
self._slot_names = []
self._weights = []
self._iterations = None
# For implementing Trackable. Stores information about how to restore
# slot variables which have not yet been created
# (trackable._CheckpointPosition objects).
# {slot_name :
# {_var_key(variable_to_train): [checkpoint_position, ... ], ... },
# ... }
self._deferred_slot_restorations = {}
decay = kwargs.pop("decay", 0.0)
if decay < 0.:
raise ValueError("decay cannot be less than 0: {}".format(decay))
self._initial_decay = decay
self._hypers_created = False
# Store the distribution strategy object if the optimizer is created inside
# strategy scope, so it could be used to create variables later.
if distribute_ctx.has_strategy():
self._distribution_strategy = distribute_ctx.get_strategy()
else:
self._distribution_strategy = None
# Configure gradient transformations.
if gradient_aggregator is None:
gradient_aggregator = optimizer_utils.all_reduce_sum_gradients
self.gradient_aggregator = gradient_aggregator
if gradient_transformers is None:
gradient_transformers = []
self.gradient_transformers = gradient_transformers
self.clipnorm = kwargs.pop("clipnorm", None)
self.global_clipnorm = kwargs.pop("global_clipnorm", None)
if self.clipnorm is not None and self.global_clipnorm is not None:
raise ValueError("Cannot accept both `clipnorm` and `global_clipnorm`, "
"passed `clipnorm` {}, `global_clipnorm` {}".format(
self.clipnorm, self.global_clipnorm))
self.clipvalue = kwargs.pop("clipvalue", None)
@property
def clipnorm(self):
"""`float` or `None`. If set, clips gradients to a maximum norm."""
return self._clipnorm
@property
def global_clipnorm(self):
"""`float` or `None`. If set, clips gradients to a maximum norm."""
return self._global_clipnorm
@clipnorm.setter
def clipnorm(self, val):
if val is not None and self.gradient_transformers:
raise ValueError("`clipnorm` cannot be set when `gradient_transformers` "
"is set. Instead, use the `gradient_transformers` to "
"specify clipping and other transformations.")
self._clipnorm = val
self._clipnorm_fn = optimizer_utils.make_gradient_clipnorm_fn(
self._clipnorm)
@global_clipnorm.setter
def global_clipnorm(self, val):
if val is not None and self.gradient_transformers:
raise ValueError("`clipnorm` cannot be set when `gradient_transformers` "
"is set. Instead, use the `gradient_transformers` to "
"specify clipping and other transformations.")
self._global_clipnorm = val
self._global_clipnorm_fn = optimizer_utils.make_global_gradient_clipnorm_fn(
self._global_clipnorm)
@property
def clipvalue(self):
"""`float` or `None`. If set, clips gradients to a maximum value."""
return self._clipvalue
@clipvalue.setter
def clipvalue(self, val):
if val is not None and self.gradient_transformers:
raise ValueError("`clipvalue` cannot be set when `gradient_transformers` "
"is set. Instead, use the `gradient_transformers` to "
"specify clipping and other transformations.")
self._clipvalue = val
self._clipvalue_fn = optimizer_utils.make_gradient_clipvalue_fn(
self._clipvalue)
def _transform_loss(self, loss):
"""Called in `.minimize` to transform loss before computing gradients."""
return loss
def _get_gradients(self, tape, loss, var_list, grad_loss=None):
"""Called in `minimize` to compute gradients from loss."""
grads = tape.gradient(loss, var_list, grad_loss)
return list(zip(grads, var_list))
def _transform_unaggregated_gradients(self, grads_and_vars):
"""Called in `apply_gradients` before gradient aggregation."""
return grads_and_vars
def _aggregate_gradients(self, grads_and_vars):
"""Called in `apply_gradients` to aggregate gradients across devices.
Note that user subclasses may override this, so the interface should not be
changed.
Args:
grads_and_vars: List of (gradient, variable) pairs.
Returns:
A list of (aggregrated_gradient, variable) pairs. By default, this calls
`self.gradient_aggregator`.
"""
return self.gradient_aggregator(grads_and_vars)
def _transform_gradients(self, grads_and_vars):
"""Called in `apply_gradients` after aggregation."""
if self._clipvalue is not None:
grads_and_vars = self._clipvalue_fn(grads_and_vars)
if self._clipnorm is not None:
grads_and_vars = self._clipnorm_fn(grads_and_vars)
if self._global_clipnorm is not None:
grads_and_vars = self._global_clipnorm_fn(grads_and_vars)
for fn in self.gradient_transformers:
grads_and_vars = fn(grads_and_vars)
return grads_and_vars
def minimize(self, loss, var_list, grad_loss=None, name=None, tape=None):
"""Minimize `loss` by updating `var_list`.
This method simply computes gradient using `tf.GradientTape` and calls
`apply_gradients()`. If you want to process the gradient before applying
then call `tf.GradientTape` and `apply_gradients()` explicitly instead
of using this function.
Args:
loss: `Tensor` or callable. If a callable, `loss` should take no arguments
and return the value to minimize. If a `Tensor`, the `tape` argument
must be passed.
var_list: list or tuple of `Variable` objects to update to minimize
`loss`, or a callable returning the list or tuple of `Variable` objects.
Use callable when the variable list would otherwise be incomplete before
`minimize` since the variables are created at the first time `loss` is
called.
grad_loss: (Optional). A `Tensor` holding the gradient computed for
`loss`.
name: (Optional) str. Name for the returned operation.
tape: (Optional) `tf.GradientTape`. If `loss` is provided as a `Tensor`,
the tape that computed the `loss` must be provided.
Returns:
An `Operation` that updates the variables in `var_list`. The `iterations`
will be automatically increased by 1.
Raises:
ValueError: If some of the variables are not `Variable` objects.
"""
grads_and_vars = self._compute_gradients(
loss, var_list=var_list, grad_loss=grad_loss, tape=tape)
return self.apply_gradients(grads_and_vars, name=name)
def _compute_gradients(self, loss, var_list, grad_loss=None, tape=None):
"""Compute gradients of `loss` for the variables in `var_list`.
This is the first part of `minimize()`. It returns a list
of (gradient, variable) pairs where "gradient" is the gradient
for "variable". Note that "gradient" can be a `Tensor`, an
`IndexedSlices`, or `None` if there is no gradient for the
given variable.
Args:
loss: `Tensor` or callable. If a callable, `loss` should take no
arguments and return the value to minimize. If a `Tensor`, the `tape`
argument must be passed.
var_list: list or tuple of `Variable` objects to update to minimize
`loss`, or a callable returning the list or tuple of `Variable` objects.
Use callable when the variable list would otherwise be incomplete before
`minimize` and the variables are created at the first time when `loss`
is called.
grad_loss: Optional. A `Tensor` holding the gradient computed for `loss`.
tape: (Optional) `tf.GradientTape`. If `loss` is provided as a `Tensor`,
the tape that computed the `loss` must be provided.
Returns:
A list of (gradient, variable) pairs. Variable is always present, but
gradient can be `None`.
Raises:
TypeError: If `var_list` contains anything else than `Variable` objects.
ValueError: If some arguments are invalid, or var_list is None.
"""
# TODO(josh11b): Test that we handle weight decay in a reasonable way.
if not callable(loss) and tape is None:
raise ValueError("`tape` is required when a `Tensor` loss is passed.")
tape = tape if tape is not None else backprop.GradientTape()
if callable(loss):
with tape:
if not callable(var_list):
tape.watch(var_list)
loss = loss()
if callable(var_list):
var_list = var_list()
with tape:
loss = self._transform_loss(loss)
var_list = nest.flatten(var_list)
with ops.name_scope_v2(self._name + "/gradients"):
grads_and_vars = self._get_gradients(tape, loss, var_list, grad_loss)
self._assert_valid_dtypes([
v for g, v in grads_and_vars
if g is not None and v.dtype != dtypes.resource
])
return grads_and_vars
def apply_gradients(self,
grads_and_vars,
name=None,
experimental_aggregate_gradients=True):
"""Apply gradients to variables.
This is the second part of `minimize()`. It returns an `Operation` that
applies gradients.
The method sums gradients from all replicas in the presence of
`tf.distribute.Strategy` by default. You can aggregate gradients yourself by
passing `experimental_aggregate_gradients=False`.
Example:
```python
grads = tape.gradient(loss, vars)
grads = tf.distribute.get_replica_context().all_reduce('sum', grads)
# Processing aggregated gradients.
optimizer.apply_gradients(zip(grads, vars),
experimental_aggregate_gradients=False)
```
Args:
grads_and_vars: List of (gradient, variable) pairs.
name: Optional name for the returned operation. Default to the name passed
to the `Optimizer` constructor.
experimental_aggregate_gradients: Whether to sum gradients from different
replicas in the presense of `tf.distribute.Strategy`. If False, it's
user responsibility to aggregate the gradients. Default to True.
Returns:
An `Operation` that applies the specified gradients. The `iterations`
will be automatically increased by 1.
Raises:
TypeError: If `grads_and_vars` is malformed.
ValueError: If none of the variables have gradients.
RuntimeError: If called in a cross-replica context.
"""
grads_and_vars = optimizer_utils.filter_empty_gradients(grads_and_vars)
var_list = [v for (_, v) in grads_and_vars]
with ops.name_scope_v2(self._name):
# Create iteration if necessary.
with ops.init_scope():
self._create_all_weights(var_list)
if not grads_and_vars:
# Distribution strategy does not support reducing an empty list of
# gradients
return control_flow_ops.no_op()
if distribute_ctx.in_cross_replica_context():
raise RuntimeError(
"`apply_gradients() cannot be called in cross-replica context. "
"Use `tf.distribute.Strategy.run` to enter replica "
"context.")
strategy = distribute_ctx.get_strategy()
if (not experimental_aggregate_gradients and strategy and
isinstance(strategy,
(parameter_server_strategy.ParameterServerStrategyV1,
parameter_server_strategy_v2.ParameterServerStrategyV2,
central_storage_strategy.CentralStorageStrategy,
central_storage_strategy.CentralStorageStrategyV1))):
raise NotImplementedError(
"`experimental_aggregate_gradients=False is not supported for "
"ParameterServerStrategy and CentralStorageStrategy")
apply_state = self._prepare(var_list)
if experimental_aggregate_gradients:
grads_and_vars = self._transform_unaggregated_gradients(grads_and_vars)
grads_and_vars = self._aggregate_gradients(grads_and_vars)
grads_and_vars = self._transform_gradients(grads_and_vars)
if optimizer_utils.strategy_supports_no_merge_call():
return self._distributed_apply(strategy, grads_and_vars, name,
apply_state)
else:
return distribute_ctx.get_replica_context().merge_call(
functools.partial(self._distributed_apply, apply_state=apply_state),
args=(grads_and_vars,),
kwargs={
"name": name,
})
def _distributed_apply(self, distribution, grads_and_vars, name, apply_state):
"""`apply_gradients` using a `DistributionStrategy`."""
def apply_grad_to_update_var(var, grad):
"""Apply gradient to variable."""
if isinstance(var, ops.Tensor):
raise NotImplementedError("Trying to update a Tensor ", var)
apply_kwargs = {}
if isinstance(grad, ops.IndexedSlices):
if var.constraint is not None:
raise RuntimeError(
"Cannot use a constraint function on a sparse variable.")
if "apply_state" in self._sparse_apply_args:
apply_kwargs["apply_state"] = apply_state
return self._resource_apply_sparse_duplicate_indices(
grad.values, var, grad.indices, **apply_kwargs)
if "apply_state" in self._dense_apply_args:
apply_kwargs["apply_state"] = apply_state
update_op = self._resource_apply_dense(grad, var, **apply_kwargs)
if var.constraint is not None:
with ops.control_dependencies([update_op]):
return var.assign(var.constraint(var))
else:
return update_op
eagerly_outside_functions = ops.executing_eagerly_outside_functions()
update_ops = []
with name_scope_only_in_function_or_graph(name or self._name):
for grad, var in grads_and_vars:
# Colocate the update with variables to avoid unnecessary communication
# delays. See b/136304694.
with distribution.extended.colocate_vars_with(var):
with name_scope_only_in_function_or_graph(
"update" if eagerly_outside_functions else "update_" +
var.op.name):
update_op = distribution.extended.update(
var, apply_grad_to_update_var, args=(grad,), group=False)
if distribute_ctx.in_cross_replica_context():
# In cross-replica context, extended.update returns a list of
# update ops from all replicas (group=False).
update_ops.extend(update_op)
else:
# In replica context, extended.update return the single update op
# of current replica.
update_ops.append(update_op)
any_symbolic = any(isinstance(i, ops.Operation) or
tf_utils.is_symbolic_tensor(i) for i in update_ops)
if not context.executing_eagerly() or any_symbolic:
# If the current context is graph mode or any of the update ops are
# symbolic then the step update should be carried out under a graph
# context. (eager updates execute immediately)
with backend._current_graph(update_ops).as_default(): # pylint: disable=protected-access
with ops.control_dependencies([control_flow_ops.group(update_ops)]):
return self._iterations.assign_add(1, read_value=False)
return self._iterations.assign_add(1)
def get_gradients(self, loss, params):
"""Returns gradients of `loss` with respect to `params`.
Should be used only in legacy v1 graph mode.
Args:
loss: Loss tensor.
params: List of variables.
Returns:
List of gradient tensors.
Raises:
ValueError: In case any gradient cannot be computed (e.g. if gradient
function not implemented).
"""
params = nest.flatten(params)
with backend.get_graph().as_default(), backend.name_scope(self._name +
"/gradients"):
grads = gradients.gradients(loss, params)
for grad, param in zip(grads, params):
if grad is None:
raise ValueError("Variable {} has `None` for gradient. "
"Please make sure that all of your ops have a "
"gradient defined (i.e. are differentiable). "
"Common ops without gradient: "
"K.argmax, K.round, K.eval.".format(param))
return grads
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
grads_and_vars = list(zip(grads, params))
self._assert_valid_dtypes([
v for g, v in grads_and_vars
if g is not None and v.dtype != dtypes.resource
])
return [self.apply_gradients(grads_and_vars)]
def _set_hyper(self, name, value):
"""set hyper `name` to value. value can be callable, tensor, numeric."""
if isinstance(value, trackable.Trackable):
self._track_trackable(value, name, overwrite=True)
if name not in self._hyper:
self._hyper[name] = value
else:
prev_value = self._hyper[name]
if (callable(prev_value)
or isinstance(prev_value,
(ops.Tensor, int, float,
learning_rate_schedule.LearningRateSchedule))
or isinstance(value, learning_rate_schedule.LearningRateSchedule)):
self._hyper[name] = value
else:
backend.set_value(self._hyper[name], value)
def _get_hyper(self, name, dtype=None):
if not self._hypers_created:
self._create_hypers()
value = self._hyper[name]
if isinstance(value, learning_rate_schedule.LearningRateSchedule):
return value
if callable(value):
value = value()
if dtype:
return math_ops.cast(value, dtype)
else:
return value
def _create_slots(self, var_list):
pass
def _create_all_weights(self, var_list):
"""Creates all weights, including iterations, hyperparameters and slot vars.
This will add newly created variables to `optimizer.weights`.
New variables are only created when this method is called the first time, or
when called with different variables in the var_list.
Args:
var_list: list or tuple of `Variable` objects that will be minimized
using this optimizer.
"""
_ = self.iterations
self._create_hypers()
self._create_slots(var_list)
def __getattribute__(self, name):
"""Overridden to support hyperparameter access."""
try:
return super(OptimizerV2, self).__getattribute__(name)
except AttributeError as e:
# Needed to avoid infinite recursion with __setattr__.
if name == "_hyper":
raise e
# Backwards compatibility with Keras optimizers.
if name == "lr":
name = "learning_rate"
if name in self._hyper:
return self._get_hyper(name)
raise e
def __dir__(self):
result = set(super(OptimizerV2, self).__dir__())
if "_hyper" in result:
result |= self._hyper.keys()
if "learning_rate" in self._hyper.keys():
result.add("lr")
return list(result)
def __setattr__(self, name, value):
"""Override setattr to support dynamic hyperparameter setting."""
# Backwards compatibility with Keras optimizers.
if name == "lr":
name = "learning_rate"
if hasattr(self, "_hyper") and name in self._hyper:
self._set_hyper(name, value)
else:
super(OptimizerV2, self).__setattr__(name, value)
def get_slot_names(self):
"""A list of names for this optimizer's slots."""
return self._slot_names
def add_slot(self, var, slot_name, initializer="zeros", shape=None):
"""Add a new slot variable for `var`.
A slot variable is an additional variable associated with `var` to train.
It is allocated and managed by optimizers, e.g. `Adam`.
Args:
var: a `Variable` object.
slot_name: name of the slot variable.
initializer: initializer of the slot variable
shape: (Optional) shape of the slot variable. If not set, it will default
to the shape of `var`.
Returns:
A slot variable.
"""
if slot_name not in self._slot_names:
self._slot_names.append(slot_name)
var_key = _var_key(var)
slot_dict = self._slots.setdefault(var_key, {})
weight = slot_dict.get(slot_name, None)
if weight is None:
if isinstance(initializer, str) or callable(initializer):
initializer = initializers.get(initializer)
if isinstance(
initializer,
trackable.CheckpointInitialValueCallable) or (shape is not None):
slot_shape = shape
else:
slot_shape = var.shape
initial_value = functools.partial(
initializer, shape=slot_shape, dtype=var.dtype)
else:
initial_value = initializer
with self._distribution_strategy_scope():
strategy = distribute_ctx.get_strategy()
if not strategy.extended.variable_created_in_scope(var):
raise ValueError(
"Trying to create optimizer slot variable under the scope for "
"tf.distribute.Strategy ({}), which is different from the scope "
"used for the original variable ({}). Make sure the slot "
"variables are created under the same strategy scope. This may "
"happen if you're restoring from a checkpoint outside the scope"
.format(strategy, var))
with strategy.extended.colocate_vars_with(var):
weight = tf_variables.Variable(
name="%s/%s" % (var._shared_name, slot_name), # pylint: disable=protected-access
dtype=var.dtype,
trainable=False,
initial_value=initial_value)
backend.track_variable(weight)
slot_dict[slot_name] = weight
self._restore_slot_variable(
slot_name=slot_name, variable=var,
slot_variable=weight)
self._weights.append(weight)
return weight
def get_slot(self, var, slot_name):
var_key = _var_key(var)
slot_dict = self._slots[var_key]
return slot_dict[slot_name]
def _prepare(self, var_list):
keys = set()
for var in var_list:
if isinstance(var, ds_values.DistributedValues):
var_devices = var._devices # pylint: disable=protected-access
else:
var_devices = [var.device]
var_dtype = var.dtype.base_dtype
for var_device in var_devices:
keys.add((var_device, var_dtype))
apply_state = {}
for var_device, var_dtype in keys:
apply_state[(var_device, var_dtype)] = {}
with ops.device(var_device):
self._prepare_local(var_device, var_dtype, apply_state)
return apply_state
def _prepare_local(self, var_device, var_dtype, apply_state):
if "learning_rate" in self._hyper:
lr_t = array_ops.identity(self._decayed_lr(var_dtype))
apply_state[(var_device, var_dtype)]["lr_t"] = lr_t
def _fallback_apply_state(self, var_device, var_dtype):
"""Compatibility for subclasses that don't pass apply_state through."""
apply_state = {(var_device, var_dtype): {}}
self._prepare_local(var_device, var_dtype, apply_state)
return apply_state[(var_device, var_dtype)]
def _create_hypers(self):
if self._hypers_created:
return
with self._distribution_strategy_scope():
# Iterate hyper values deterministically.
for name, value in sorted(self._hyper.items()):
if isinstance(value,
(ops.Tensor, tf_variables.Variable)) or callable(value):
# The check for `callable` covers the usage when `value` is a
# `LearningRateSchedule`, in which case it does not need to create a
# variable.
continue
else:
self._hyper[name] = self.add_weight(
name,
shape=[],
trainable=False,
initializer=value,
aggregation=tf_variables.VariableAggregation.ONLY_FIRST_REPLICA)
self._hypers_created = True
@property
def iterations(self):
"""Variable. The number of training steps this Optimizer has run."""
if self._iterations is None:
with self._distribution_strategy_scope():
self._iterations = self.add_weight(
"iter",
shape=[],
dtype=dtypes.int64,
trainable=False,
aggregation=tf_variables.VariableAggregation.ONLY_FIRST_REPLICA)
self._weights.append(self._iterations)
return self._iterations
@iterations.setter
def iterations(self, variable):
if self._iterations is not None:
raise RuntimeError("Cannot set `iterations` to a new Variable after "
"the Optimizer weights have been created")
self._iterations = variable
self._weights.append(self._iterations)
def _decayed_lr(self, var_dtype):
"""Get decayed learning rate as a Tensor with dtype=var_dtype."""
lr_t = self._get_hyper("learning_rate", var_dtype)
if isinstance(lr_t, learning_rate_schedule.LearningRateSchedule):
local_step = math_ops.cast(self.iterations, var_dtype)
lr_t = math_ops.cast(lr_t(local_step), var_dtype)
if self._initial_decay > 0.:
local_step = math_ops.cast(self.iterations, var_dtype)
decay_t = math_ops.cast(self._initial_decay, var_dtype)
lr_t = lr_t / (1. + decay_t * local_step)
return lr_t
@abc.abstractmethod
def get_config(self):
"""Returns the config of the optimizer.
An optimizer config is a Python dictionary (serializable)
containing the configuration of an optimizer.
The same optimizer can be reinstantiated later
(without any saved state) from this configuration.
Returns:
Python dictionary.
"""
config = {"name": self._name}
if self.clipnorm is not None:
config["clipnorm"] = self.clipnorm
if self.clipvalue is not None:
config["clipvalue"] = self.clipvalue
if self.global_clipnorm is not None:
config["global_clipnorm"] = self.global_clipnorm
return config
@classmethod
def from_config(cls, config, custom_objects=None):
"""Creates an optimizer from its config.
This method is the reverse of `get_config`,
capable of instantiating the same optimizer from the config
dictionary.
Args:
config: A Python dictionary, typically the output of get_config.
custom_objects: A Python dictionary mapping names to additional Python
objects used to create this optimizer, such as a function used for a
hyperparameter.
Returns:
An optimizer instance.
"""
if "lr" in config:
config["learning_rate"] = config.pop("lr")
if "learning_rate" in config:
if isinstance(config["learning_rate"], dict):
config["learning_rate"] = learning_rate_schedule.deserialize(
config["learning_rate"], custom_objects=custom_objects)
return cls(**config)
def _serialize_hyperparameter(self, hyperparameter_name):
"""Serialize a hyperparameter that can be a float, callable, or Tensor."""
value = self._hyper[hyperparameter_name]
if isinstance(value, learning_rate_schedule.LearningRateSchedule):
return learning_rate_schedule.serialize(value)
if callable(value):
return value()
if tensor_util.is_tf_type(value):
return backend.get_value(value)
return value
def variables(self):
"""Returns variables of this Optimizer based on the order created."""
return self._weights
@property
def weights(self):
"""Returns variables of this Optimizer based on the order created."""
return self._weights
def get_weights(self):
"""Returns the current weights of the optimizer.
The weights of an optimizer are its state (ie, variables).
This function returns the weight values associated with this
optimizer as a list of Numpy arrays. The first value is always the
iterations count of the optimizer, followed by the optimizer's state
variables in the order they were created. The returned list can in turn
be used to load state into similarly parameterized optimizers.
For example, the RMSprop optimizer for this simple model returns a list of
three values-- the iteration count, followed by the root-mean-square value
of the kernel and bias of the single Dense layer:
>>> opt = tf.keras.optimizers.RMSprop()
>>> m = tf.keras.models.Sequential([tf.keras.layers.Dense(10)])
>>> m.compile(opt, loss='mse')
>>> data = np.arange(100).reshape(5, 20)
>>> labels = np.zeros(5)
>>> print('Training'); results = m.fit(data, labels)
Training ...
>>> len(opt.get_weights())
3
Returns:
Weights values as a list of numpy arrays.
"""
params = self.weights
return backend.batch_get_value(params)
# TODO(tanzheny): Maybe share this logic with base_layer.
def set_weights(self, weights):
"""Set the weights of the optimizer.
The weights of an optimizer are its state (ie, variables).
This function takes the weight values associated with this
optimizer as a list of Numpy arrays. The first value is always the
iterations count of the optimizer, followed by the optimizer's state
variables in the order they are created. The passed values are used to set
the new state of the optimizer.
For example, the RMSprop optimizer for this simple model takes a list of
three values-- the iteration count, followed by the root-mean-square value
of the kernel and bias of the single Dense layer:
>>> opt = tf.keras.optimizers.RMSprop()
>>> m = tf.keras.models.Sequential([tf.keras.layers.Dense(10)])
>>> m.compile(opt, loss='mse')
>>> data = np.arange(100).reshape(5, 20)
>>> labels = np.zeros(5)
>>> print('Training'); results = m.fit(data, labels)
Training ...
>>> new_weights = [np.array(10), np.ones([20, 10]), np.zeros([10])]
>>> opt.set_weights(new_weights)
>>> opt.iterations
<tf.Variable 'RMSprop/iter:0' shape=() dtype=int64, numpy=10>
Args:
weights: weight values as a list of numpy arrays.
"""
params = self.weights
if len(params) != len(weights):
raise ValueError(
"You called `set_weights(weights)` on optimizer " + self._name +
" with a weight list of length " + str(len(weights)) +
", but the optimizer was expecting " + str(len(params)) +
" weights. Provided weights: " + str(weights)[:50] + "...")
if not params:
return
weight_value_tuples = []
param_values = backend.batch_get_value(params)
for pv, p, w in zip(param_values, params, weights):
if pv.shape != w.shape:
raise ValueError("Optimizer weight shape " + str(pv.shape) +
" not compatible with "
"provided weight shape " + str(w.shape))
weight_value_tuples.append((p, w))
backend.batch_set_value(weight_value_tuples)
def add_weight(self,
name,
shape,
dtype=None,
initializer="zeros",
trainable=None,
synchronization=tf_variables.VariableSynchronization.AUTO,
aggregation=tf_variables.VariableAggregation.NONE):
if dtype is None:
dtype = dtypes.float32
if isinstance(initializer, str) or callable(initializer):
initializer = initializers.get(initializer)
if synchronization == tf_variables.VariableSynchronization.ON_READ:
if trainable:
raise ValueError(
"Synchronization value can be set to "
"VariableSynchronization.ON_READ only for non-trainable variables. "
"You have specified trainable=True and "
"synchronization=VariableSynchronization.ON_READ.")
else:
# Set trainable to be false when variable is to be synced on read.
trainable = False
elif trainable is None:
trainable = True
variable = self._add_variable_with_custom_getter(
name=name,
shape=shape,
getter=base_layer_utils.make_variable,
overwrite=True,
initializer=initializer,
dtype=dtype,
trainable=trainable,
use_resource=True,
synchronization=synchronization,
aggregation=aggregation)
backend.track_variable(variable)
return variable
def _init_set_name(self, name, zero_based=True):
if not name:
self._name = backend.unique_object_name(
generic_utils.to_snake_case(self.__class__.__name__),
zero_based=zero_based)
else:
self._name = name
def _assert_valid_dtypes(self, tensors):
"""Asserts tensors are all valid types (see `_valid_dtypes`).
Args:
tensors: Tensors to check.
Raises:
ValueError: If any tensor is not a valid type.
"""
valid_dtypes = self._valid_dtypes()
for t in tensors:
dtype = t.dtype.base_dtype
if dtype not in valid_dtypes:
raise ValueError("Invalid type %r for %s, expected: %s." %
(dtype, t.name, [v for v in valid_dtypes]))
def _valid_dtypes(self):
"""Valid types for loss, variables and gradients.
Subclasses should override to allow other float types.
Returns:
Valid types for loss, variables and gradients.
"""
return _DEFAULT_VALID_DTYPES
def _call_if_callable(self, param):
"""Call the function if param is callable."""
return param() if callable(param) else param
def _resource_apply_dense(self, grad, handle, apply_state):
"""Add ops to apply dense gradients to the variable `handle`.
Args:
grad: a `Tensor` representing the gradient.
handle: a `Tensor` of dtype `resource` which points to the variable to be
updated.
apply_state: A dict which is used across multiple apply calls.
Returns:
An `Operation` which updates the value of the variable.
"""
raise NotImplementedError("Must be implemented in subclasses.")
def _resource_apply_sparse_duplicate_indices(self, grad, handle, indices,
**kwargs):
"""Add ops to apply sparse gradients to `handle`, with repeated indices.
Optimizers which override this method must deal with repeated indices. See
the docstring of `_apply_sparse_duplicate_indices` for details. By default
the correct behavior, to sum non-unique indices and their associated
gradients, is enforced by first pre-processing `grad` and `indices` and
passing them on to `_resource_apply_sparse`. Optimizers which deal correctly
with duplicate indices may instead override this method to avoid the
overhead of summing.
Args:
grad: a `Tensor` representing the gradient for the affected indices.
handle: a `Tensor` of dtype `resource` which points to the variable to be
updated.
indices: a `Tensor` of integral type representing the indices for which
the gradient is nonzero. Indices may be repeated.
**kwargs: May optionally contain `apply_state`
Returns:
An `Operation` which updates the value of the variable.
"""
summed_grad, unique_indices = _deduplicate_indexed_slices(
values=grad, indices=indices)
return self._resource_apply_sparse(summed_grad, handle, unique_indices,
**kwargs)
def _resource_apply_sparse(self, grad, handle, indices, apply_state):
"""Add ops to apply sparse gradients to the variable `handle`.
Similar to `_apply_sparse`, the `indices` argument to this method has been
de-duplicated. Optimizers which deal correctly with non-unique indices may
instead override `_resource_apply_sparse_duplicate_indices` to avoid this
overhead.
Args:
grad: a `Tensor` representing the gradient for the affected indices.
handle: a `Tensor` of dtype `resource` which points to the variable to be
updated.
indices: a `Tensor` of integral type representing the indices for which
the gradient is nonzero. Indices are unique.
apply_state: A dict which is used across multiple apply calls.
Returns:
An `Operation` which updates the value of the variable.
"""
raise NotImplementedError("Must be implemented in subclasses.")
def _resource_scatter_add(self, x, i, v):
with ops.control_dependencies([
gen_resource_variable_ops.ResourceScatterAdd(
resource=x.handle, indices=i, updates=v)
]):
return x.value()
def _resource_scatter_update(self, x, i, v):
with ops.control_dependencies(
[gen_resource_variable_ops.ResourceScatterUpdate(
resource=x.handle, indices=i, updates=v)]):
return x.value()
@property
@layer_utils.cached_per_instance
def _dense_apply_args(self):
return tf_inspect.getfullargspec(self._resource_apply_dense).args
@property
@layer_utils.cached_per_instance
def _sparse_apply_args(self):
return tf_inspect.getfullargspec(self._resource_apply_sparse).args
# ---------------
# For implementing the trackable interface
# ---------------
def _restore_slot_variable(self, slot_name, variable, slot_variable):
"""Restore a newly created slot variable's value."""
variable_key = _var_key(variable)
deferred_restorations = self._deferred_slot_restorations.get(
slot_name, {}).pop(variable_key, [])
# Iterate over restores, highest restore UID first to minimize the number
# of assignments.
deferred_restorations.sort(key=lambda position: position.restore_uid,
reverse=True)
for checkpoint_position in deferred_restorations:
checkpoint_position.restore(slot_variable)
def _create_or_restore_slot_variable(
self, slot_variable_position, slot_name, variable):
"""Restore a slot variable's value, possibly creating it.
Called when a variable which has an associated slot variable is created or
restored. When executing eagerly, we create the slot variable with a
restoring initializer.
No new variables are created when graph building. Instead,
_restore_slot_variable catches these after normal creation and adds restore
ops to the graph. This method is nonetheless important when graph building
for the case when a slot variable has already been created but `variable`
has just been added to a dependency graph (causing us to realize that the
slot variable needs to be restored).
Args:
slot_variable_position: A `trackable._CheckpointPosition` object
indicating the slot variable `Trackable` object to be restored.
slot_name: The name of this `Optimizer`'s slot to restore into.
variable: The variable object this slot is being created for.
"""
variable_key = _var_key(variable)
slot_dict = self._slots.get(variable_key, {})
slot_variable = slot_dict.get(slot_name, None)
if (slot_variable is None and context.executing_eagerly() and
slot_variable_position.is_simple_variable()
# Defer slot variable creation if there is an active variable creator
# scope. Generally we'd like to eagerly create/restore slot variables
# when possible, but this may mean that scopes intended to catch
# `variable` also catch its eagerly created slot variable
# unintentionally (specifically make_template would add a dependency on
# a slot variable if not for this case). Deferring is mostly harmless
# (aside from double initialization), and makes variable creator scopes
# behave the same way they do when graph building.
#
# One notable case is with distribution strategy, which uses variable
# creator scope but always desires the `variable` and the slot to use
# the same scope, thus we can safely eagerly create/restore slot
# variables.
and (not ops.get_default_graph()._variable_creator_stack or # pylint: disable=protected-access
self._distribution_strategy)):
initializer = trackable.CheckpointInitialValueCallable(
checkpoint_position=slot_variable_position)
slot_variable = self.add_slot(
var=variable,
initializer=initializer,
slot_name=slot_name,
shape=slot_variable_position.value_shape())
# Slot variables are not owned by any one object (because we don't want to
# save the slot variable if the optimizer is saved without the non-slot
# variable, or if the non-slot variable is saved without the optimizer;
# it's a dependency hypergraph with edges of the form (optimizer, non-slot
# variable, variable)). So we don't _track_ slot variables anywhere, and
# instead special-case this dependency and otherwise pretend it's a normal
# graph.
if slot_variable is not None:
# If we've either made this slot variable, or if we've pulled out an
# existing slot variable, we should restore it.
slot_variable_position.restore(slot_variable)
else:
# We didn't make the slot variable. Defer restoring until it gets created
# normally. We keep a list rather than the one with the highest restore
# UID in case slot variables have their own dependencies, in which case
# those could differ between restores.
self._deferred_slot_restorations.setdefault(
slot_name, {}).setdefault(variable_key, []).append(
slot_variable_position)
@contextlib.contextmanager
def _distribution_strategy_scope(self):
"""Returns the `tf.distribute.Strategy` this optimizer was created under."""
if self._distribution_strategy and not distribute_ctx.has_strategy():
with self._distribution_strategy.scope():
yield self._distribution_strategy.scope()
else:
yield
def _var_key(var):
"""Key for representing a primary variable, for looking up slots.
In graph mode the name is derived from the var shared name.
In eager mode the name is derived from the var unique id.
If distribution strategy exists, get the primary variable first.
Args:
var: the variable.
Returns:
the unique name of the variable.
"""
# pylint: disable=protected-access
# Get the distributed variable if it exists.
if hasattr(var, "_distributed_container"):
var = var._distributed_container()
if var._in_graph_mode:
return var._shared_name
return var._unique_id
def _get_slot_key_from_var(var, slot_name):
"""Get the slot key for the variable: var_name/slot_name."""
name = _var_key(var)
return name + "/" + slot_name
class RestoredOptimizer(OptimizerV2):
"""A non-functional Optimizer implementation for checkpoint compatibility.
Holds slot variables and hyperparameters when an optimizer is restored from a
SavedModel. These variables may be referenced in functions along with ops
created by the original optimizer, but currently we do not support using the
optimizer object iself (e.g. through `apply_gradients`).
"""
# TODO(allenl): Make the restored optimizer functional by tracing its apply
# methods.
def __init__(self):
super(RestoredOptimizer, self).__init__("RestoredOptimizer")
self._hypers_created = True
def get_config(self):
# TODO(allenl): Save and restore the Optimizer's config
raise NotImplementedError(
"Restoring functional Optimizers from SavedModels is not currently "
"supported. Please file a feature request if this limitation bothers "
"you.")
revived_types.register_revived_type(
"tf_deprecated_optimizer",
lambda obj: isinstance(obj, OptimizerV2),
versions=[revived_types.VersionedTypeRegistration(
object_factory=lambda proto: RestoredOptimizer(),
version=1,
min_producer_version=1,
min_consumer_version=1,
setter=RestoredOptimizer._set_hyper # pylint: disable=protected-access
)])
| frreiss/tensorflow-fred | tensorflow/python/keras/optimizer_v2/optimizer_v2.py | Python | apache-2.0 | 58,624 |
'''
Created on 25/1/2015
@author: USUARIO
'''
if __name__ == '__main__':
pass | rfedmi/reposdmpdos | davidmp/milton.py | Python | gpl-2.0 | 90 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-06-01 21:10
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('menu', '0007_menulink_weight'),
]
operations = [
migrations.RemoveField(
model_name='menulink',
name='weight',
),
]
| IVaN4B/maugli | maugli/menu/migrations/0008_remove_menulink_weight.py | Python | gpl-3.0 | 388 |
#!/usr/bin/env python
#
# -----------------------------------------------------------------------------
# Copyright (C) 2015 Daniel Standage <daniel.standage@gmail.com>
#
# This file is part of tag (http://github.com/standage/tag) and is licensed
# under the BSD 3-clause license: see LICENSE.
# -----------------------------------------------------------------------------
"""Package-wide configuration"""
try:
import __builtin__ as builtins
except ImportError: # pragma: no cover
import builtins
from tag.comment import Comment
from tag.directive import Directive
from tag.feature import Feature
from tag.sequence import Sequence
from tag.range import Range
from tag.reader import GFF3Reader
from tag.writer import GFF3Writer
from tag.score import Score
from tag import bae
from tag import cli
from tag import index
from tag import locus
from tag import select
from tag import transcript
from gzip import open as gzopen
import sys
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
def open(filename, mode):
if mode not in ['r', 'w']:
raise ValueError('invalid mode "{}"'.format(mode))
if filename in ['-', None]: # pragma: no cover
filehandle = sys.stdin if mode == 'r' else sys.stdout
return filehandle
openfunc = builtins.open
if filename.endswith('.gz'):
openfunc = gzopen
mode += 't'
return openfunc(filename, mode)
| standage/tag | tag/__init__.py | Python | bsd-3-clause | 1,438 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Cumulus Networks <ce-ceng@cumulusnetworks.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cl_ports
version_added: "2.1"
author: "Cumulus Networks (@CumulusNetworks)"
short_description: Configure Cumulus Switch port attributes (ports.conf)
deprecated:
removed_in: "2.5"
why: The M(nclu) module is designed to be easier to use for individuals who are new to Cumulus Linux by exposing the NCLU interface in an automatable way.
alternative: Use M(nclu) instead.
description:
- Set the initial port attribute defined in the Cumulus Linux ports.conf,
file. This module does not do any error checking at the moment. Be careful
to not include ports that do not exist on the switch. Carefully read the
original ports.conf file for any exceptions or limitations.
For more details go the Configure Switch Port Attribute Documentation at
U(http://docs.cumulusnetworks.com).
options:
speed_10g:
description:
- List of ports to run initial run at 10G.
speed_40g:
description:
- List of ports to run initial run at 40G.
speed_4_by_10g:
description:
- List of 40G ports that will be unganged to run as 4 10G ports.
speed_40g_div_4:
description:
- List of 10G ports that will be ganged to form a 40G port.
'''
EXAMPLES = '''
# Use cl_ports module to manage the switch attributes defined in the
# ports.conf file on Cumulus Linux
## Unganged port configuration on certain ports
- name: configure ports.conf setup
cl_ports:
speed_4_by_10g:
- swp1
- swp32
speed_40g:
- swp2-31
## Unganged port configuration on certain ports
- name: configure ports.conf setup
cl_ports:
speed_4_by_10g:
- swp1-3
- swp6
speed_40g:
- swp4-5
- swp7-32
'''
RETURN = '''
changed:
description: whether the interface was changed
returned: changed
type: bool
sample: True
msg:
description: human-readable report of success or failure
returned: always
type: string
sample: "interface bond0 config updated"
'''
from ansible.module_utils.common.removed import removed_module
if __name__ == '__main__':
removed_module()
| hryamzik/ansible | lib/ansible/modules/network/cumulus/_cl_ports.py | Python | gpl-3.0 | 2,580 |
from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import User, Group
from optparse import make_option
from sys import stdout
from csv import writer
FORMATS = [
'address',
'google',
'outlook',
'linkedin',
'vcard',
]
def full_name(first_name, last_name, username, **extra):
name = u" ".join(n for n in [first_name, last_name] if n)
if not name:
return username
return name
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--group', '-g', action='store', dest='group', default=None,
help='Limit to users which are part of the supplied group name'),
make_option('--format', '-f', action='store', dest='format', default=FORMATS[0],
help="output format. May be one of '" + "', '".join(FORMATS) + "'."),
)
help = ("Export user email address list in one of a number of formats.")
args = "[output file]"
label = 'filename to save to'
requires_model_validation = True
can_import_settings = True
encoding = 'utf-8' # RED_FLAG: add as an option -DougN
def handle(self, *args, **options):
if len(args) > 1:
raise CommandError("extra arguments supplied")
group = options['group']
if group and not Group.objects.filter(name=group).count() == 1:
names = u"', '".join(g['name'] for g in Group.objects.values('name')).encode('utf-8')
if names:
names = "'" + names + "'."
raise CommandError("Unknown group '" + group + "'. Valid group names are: " + names)
if len(args) and args[0] != '-':
outfile = file(args[0], 'w')
else:
outfile = stdout
qs = User.objects.all().order_by('last_name', 'first_name', 'username', 'email')
if group:
qs = qs.filter(group__name=group).distinct()
qs = qs.values('last_name', 'first_name', 'username', 'email')
getattr(self, options['format'])(qs, outfile)
def address(self, qs, out):
"""simple single entry per line in the format of:
"full name" <my@address.com>;
"""
out.write(u"\n".join(u'"%s" <%s>;' % (full_name(**ent), ent['email'])
for ent in qs).encode(self.encoding))
out.write("\n")
def google(self, qs, out):
"""CSV format suitable for importing into google GMail
"""
csvf = writer(out)
csvf.writerow(['Name', 'Email'])
for ent in qs:
csvf.writerow([full_name(**ent).encode(self.encoding),
ent['email'].encode(self.encoding)])
def outlook(self, qs, out):
"""CSV format suitable for importing into outlook
"""
csvf = writer(out)
columns = ['Name', 'E-mail Address', 'Notes', 'E-mail 2 Address', 'E-mail 3 Address',
'Mobile Phone', 'Pager', 'Company', 'Job Title', 'Home Phone', 'Home Phone 2',
'Home Fax', 'Home Address', 'Business Phone', 'Business Phone 2',
'Business Fax', 'Business Address', 'Other Phone', 'Other Fax', 'Other Address']
csvf.writerow(columns)
empty = [''] * (len(columns) - 2)
for ent in qs:
csvf.writerow([full_name(**ent).encode(self.encoding),
ent['email'].encode(self.encoding)] + empty)
def linkedin(self, qs, out):
"""CSV format suitable for importing into linkedin Groups.
perfect for pre-approving members of a linkedin group.
"""
csvf = writer(out)
csvf.writerow(['First Name', 'Last Name', 'Email'])
for ent in qs:
csvf.writerow([ent['first_name'].encode(self.encoding),
ent['last_name'].encode(self.encoding),
ent['email'].encode(self.encoding)])
def vcard(self, qs, out):
try:
import vobject
except ImportError:
print self.style.ERROR("Please install python-vobject to use the vcard export format.")
import sys
sys.exit(1)
for ent in qs:
card = vobject.vCard()
card.add('fn').value = full_name(**ent)
if not ent['last_name'] and not ent['first_name']:
# fallback to fullname, if both first and lastname are not declared
card.add('n').value = vobject.vcard.Name(full_name(**ent))
else:
card.add('n').value = vobject.vcard.Name(ent['last_name'], ent['first_name'])
emailpart = card.add('email')
emailpart.value = ent['email']
emailpart.type_param = 'INTERNET'
out.write(card.serialize().encode(self.encoding))
| waseem18/oh-mainline | vendor/packages/django-extensions/django_extensions/management/commands/export_emails.py | Python | agpl-3.0 | 4,801 |
"""
Client for communicating with a cmdserver.
"""
import requests
import logging
from common import RpcException
logger = logging.getLogger(__name__)
class CmdClient(object):
def __init__(self, user, host, port):
self.user = user
self.host = host
self.port = port
def check(self):
"""Check server health.
Returns a boolean as to wether the server was reached.
"""
try:
res = requests.get(self._url("check"))
resj = res.json()
return resj["ok"]=="ok"
except requests.exceptions.ConnectionError:
return False
except Exception as ex:
# Actually, what kind of exceptions appear here?
raise ex
if res.status_code != 200:
return False
def kill(self):
"""Kills the server.
Returns: Boolean whether the kill command went through.
"""
try:
res = requests.post(self._url("kill"))
if res.status_code == 200:
return True
else:
return False
except requests.exceptions.RequestException as ex:
logging.debug("server not killed %s", ex)
return False
def run_remote(self, file_name, function_name, *args, **kwargs):
payload = {
"function_name": function_name,
"args": args,
"kwargs" : kwargs,
"function_file": file_name
}
try:
res = requests.post(self._url("run"), json=payload)
except Exception as ex:
raise RpcException("Could not connect to RPC server.", ex)
if res.status_code != 200:
raise RpcException("RPC server returned code {}".format(res.status_code))
resj = res.json()
if "error" in resj:
raise RpcException(resj["error"])
# return resj["return"]
return
def _url(self, path):
return "http://{}:{}/{}".format(self.host, self.port, path)
| anpere/goaway | goaway/cmdclient.py | Python | mit | 2,032 |
# does this even work?
class InvalidToken(Exception, object):
"""Raise an invalid token """
def __init__(self, message, payload):
super(InvalidToken, self).__init__(message, payload)
self.message = message
self.payload = payload | kovarus/vmworld-us-hackathon-2017 | vrealize-pysdk/vralib/vraexceptions.py | Python | apache-2.0 | 261 |
from atlassian import Bitbucket
url = "http://localhost:7990"
username = "admin"
password = "admin"
proj = "PROJ"
repo = "test-repo"
pr_id = 123
bitbucket = Bitbucket(url=url, username=username, password=password, advanced_mode=True)
diff = bitbucket.get_pull_requests_changes(proj, repo, pr_id).json()
for item in diff.get("values", []):
print(item.get("path", {}).get("toString"))
| AstroTech/atlassian-python-api | examples/bitbucket/bitbucket_pullrequest_get_changed_files.py | Python | apache-2.0 | 391 |
import os
from pkg_resources import resource_filename
import time
import arcpy
import numpy
import nose.tools as nt
import numpy.testing as nptest
import tidegates.testing as tgtest
import mock
import tidegates
from tidegates import utils
@nt.nottest
class MockResult(object):
def __init__(self, path):
self.path = path
def getOutput(*args, **kwargs):
return self.path
def test_RasterTemplate():
size, x, y = 8, 1, 2
template = utils.RasterTemplate(size, x, y)
nt.assert_equal(template.meanCellWidth, size)
nt.assert_equal(template.meanCellHeight, size)
nt.assert_equal(template.extent.lowerLeft.X, x)
nt.assert_equal(template.extent.lowerLeft.Y, y)
def test_RasterTemplate_from_raster():
_raster = resource_filename('tidegates.testing._Template', 'dem.tif')
raster = utils.load_data(_raster, 'raster')
template = utils.RasterTemplate.from_raster(raster)
nt.assert_equal(template.meanCellWidth, raster.meanCellWidth)
nt.assert_equal(template.meanCellHeight, raster.meanCellHeight)
nt.assert_equal(template.extent.lowerLeft.X, raster.extent.lowerLeft.X)
nt.assert_equal(template.extent.lowerLeft.Y, raster.extent.lowerLeft.Y)
class Test_EasyMapDoc(object):
def setup(self):
self.mxd = resource_filename("tidegates.testing.EasyMapDoc", "test.mxd")
self.ezmd = utils.EasyMapDoc(self.mxd)
self.knownlayer_names = ['ZOI', 'wetlands', 'ZOI_first_few', 'wetlands_first_few']
self.knowndataframe_names = ['Main', 'Subset']
self.add_layer_path = resource_filename("tidegates.testing.EasyMapDoc", "ZOI.shp")
def test_layers(self):
nt.assert_true(hasattr(self.ezmd, 'layers'))
layers_names = [layer.name for layer in self.ezmd.layers]
nt.assert_list_equal(layers_names, self.knownlayer_names)
def test_dataframes(self):
nt.assert_true(hasattr(self.ezmd, 'dataframes'))
df_names = [df.name for df in self.ezmd.dataframes]
nt.assert_list_equal(df_names, self.knowndataframe_names)
def test_findLayerByName(self):
name = 'ZOI_first_few'
lyr = self.ezmd.findLayerByName(name)
nt.assert_true(isinstance(lyr, arcpy.mapping.Layer))
nt.assert_equal(lyr.name, name)
def test_add_layer_with_path(self):
nt.assert_equal(len(self.ezmd.layers), 4)
self.ezmd.add_layer(self.add_layer_path)
nt.assert_equal(len(self.ezmd.layers), 5)
def test_add_layer_with_layer_and_other_options(self):
layer = arcpy.mapping.Layer(self.add_layer_path)
nt.assert_equal(len(self.ezmd.layers), 4)
self.ezmd.add_layer(layer, position='bottom', df=self.ezmd.dataframes[1])
nt.assert_equal(len(self.ezmd.layers), 5)
@nt.raises(ValueError)
def test_bad_layer(self):
self.ezmd.add_layer(123456)
@nt.raises(ValueError)
def test_bad_position(self):
self.ezmd.add_layer(self.add_layer_path, position='junk')
class Test_Extension(object):
def setup(self):
self.known_available = 'spatial'
self.known_unavailable = 'Datareviewer'
@nt.raises(RuntimeError)
def test_unlicensed_extension(self):
with utils.Extension(self.known_unavailable):
pass
def test_licensed_extension(self):
nt.assert_equal(arcpy.CheckExtension(self.known_available), u'Available')
with utils.Extension(self.known_available) as ext:
nt.assert_equal(ext, 'CheckedOut')
nt.assert_equal(arcpy.CheckExtension(self.known_available), u'Available')
def teardown(self):
arcpy.CheckExtension(self.known_available)
class Test_OverwriteState(object):
def test_true_true(self):
arcpy.env.overwriteOutput = True
nt.assert_true(arcpy.env.overwriteOutput)
with utils.OverwriteState(True):
nt.assert_true(arcpy.env.overwriteOutput)
nt.assert_true(arcpy.env.overwriteOutput)
def test_false_false(self):
arcpy.env.overwriteOutput = False
nt.assert_false(arcpy.env.overwriteOutput)
with utils.OverwriteState(False):
nt.assert_false(arcpy.env.overwriteOutput)
nt.assert_false(arcpy.env.overwriteOutput)
def test_true_false(self):
arcpy.env.overwriteOutput = True
nt.assert_true(arcpy.env.overwriteOutput)
with utils.OverwriteState(False):
nt.assert_false(arcpy.env.overwriteOutput)
nt.assert_true(arcpy.env.overwriteOutput)
def test_false_true(self):
arcpy.env.overwriteOutput = False
nt.assert_false(arcpy.env.overwriteOutput)
with utils.OverwriteState(True):
nt.assert_true(arcpy.env.overwriteOutput)
nt.assert_false(arcpy.env.overwriteOutput)
class Test_WorkSpace(object):
def setup(self):
self.baseline = os.getcwd()
self.new_ws = u'C:/Users'
arcpy.env.workspace = self.baseline
def test_workspace(self):
nt.assert_equal(arcpy.env.workspace, self.baseline)
with utils.WorkSpace(self.new_ws):
nt.assert_equal(arcpy.env.workspace, self.new_ws)
nt.assert_equal(arcpy.env.workspace, self.baseline)
class Test_create_temp_filename():
def setup(self):
self.folderworkspace = os.path.join('some', 'other', 'folder')
self.geodbworkspace = os.path.join('another', 'geodb.gdb')
def test_folderworkspace_withsubfolder(self):
with utils.WorkSpace(self.folderworkspace):
known_raster = os.path.join(self.folderworkspace, 'subfolder', '_temp_test.tif')
temp_raster = utils.create_temp_filename(os.path.join('subfolder', 'test'), filetype='raster')
nt.assert_equal(temp_raster, known_raster)
known_shape = os.path.join(self.folderworkspace, 'subfolder', '_temp_test.shp')
temp_shape = utils.create_temp_filename(os.path.join('subfolder','test'), filetype='shape')
nt.assert_equal(temp_shape, known_shape)
def test_folderworkspace_withsubfolder_with_num(self):
with utils.WorkSpace(self.folderworkspace):
known_raster = os.path.join(self.folderworkspace, 'subfolder', '_temp_test_1.tif')
temp_raster = utils.create_temp_filename(os.path.join('subfolder', 'test'), filetype='raster', num=1)
nt.assert_equal(temp_raster, known_raster)
known_shape = os.path.join(self.folderworkspace, 'subfolder', '_temp_test_12.shp')
temp_shape = utils.create_temp_filename(os.path.join('subfolder','test'), filetype='shape', num=12)
nt.assert_equal(temp_shape, known_shape)
def test_folderworkspace_barefile(self):
with utils.WorkSpace(self.folderworkspace):
known_raster = os.path.join(self.folderworkspace, '_temp_test.tif')
temp_raster = utils.create_temp_filename('test', filetype='raster')
nt.assert_equal(temp_raster, known_raster)
known_shape = os.path.join(self.folderworkspace, '_temp_test.shp')
temp_shape = utils.create_temp_filename('test', filetype='shape')
nt.assert_equal(temp_shape, known_shape)
def test_folderworkspace_barefile_with_num(self):
with utils.WorkSpace(self.folderworkspace):
known_raster = os.path.join(self.folderworkspace, '_temp_test_14.tif')
temp_raster = utils.create_temp_filename('test', filetype='raster', num=14)
nt.assert_equal(temp_raster, known_raster)
known_shape = os.path.join(self.folderworkspace, '_temp_test_3.shp')
temp_shape = utils.create_temp_filename('test', filetype='shape', num=3)
nt.assert_equal(temp_shape, known_shape)
def test_geodb_barefile(self):
with utils.WorkSpace(self.geodbworkspace):
known_raster = os.path.join(self.geodbworkspace, '_temp_test')
temp_raster = utils.create_temp_filename('test', filetype='raster')
nt.assert_equal(temp_raster, known_raster)
known_shape = os.path.join(self.geodbworkspace, '_temp_test')
temp_shape = utils.create_temp_filename('test', filetype='shape')
nt.assert_equal(temp_shape, known_shape)
def test_geodb_barefile_with_num(self):
with utils.WorkSpace(self.geodbworkspace):
known_raster = os.path.join(self.geodbworkspace, '_temp_test_7')
temp_raster = utils.create_temp_filename('test', filetype='raster', num=7)
nt.assert_equal(temp_raster, known_raster)
known_shape = os.path.join(self.geodbworkspace, '_temp_test_22')
temp_shape = utils.create_temp_filename('test', filetype='shape', num=22)
nt.assert_equal(temp_shape, known_shape)
def test_geodb_as_subfolder(self):
with utils.WorkSpace(self.folderworkspace):
filename = os.path.join(self.geodbworkspace, 'test')
known_raster = os.path.join(self.folderworkspace, self.geodbworkspace, '_temp_test')
temp_raster = utils.create_temp_filename(filename, filetype='raster')
nt.assert_equal(temp_raster, known_raster)
known_shape = os.path.join(self.folderworkspace, self.geodbworkspace, '_temp_test')
temp_shape = utils.create_temp_filename(filename, filetype='shape')
nt.assert_equal(temp_shape, known_shape)
def test_geodb_as_subfolder_with_num(self):
with utils.WorkSpace(self.folderworkspace):
filename = os.path.join(self.geodbworkspace, 'test')
known_raster = os.path.join(self.folderworkspace, self.geodbworkspace, '_temp_test_5')
temp_raster = utils.create_temp_filename(filename, filetype='raster', num=5)
nt.assert_equal(temp_raster, known_raster)
known_shape = os.path.join(self.folderworkspace, self.geodbworkspace, '_temp_test_99')
temp_shape = utils.create_temp_filename(filename, filetype='shape', num=99)
nt.assert_equal(temp_shape, known_shape)
def test_with_extension_geodb(self):
with utils.WorkSpace(self.folderworkspace):
filename = os.path.join(self.geodbworkspace, 'test')
known_raster = os.path.join(self.folderworkspace, self.geodbworkspace, '_temp_test')
temp_raster = utils.create_temp_filename(filename + '.tif', filetype='raster')
nt.assert_equal(temp_raster, known_raster)
known_shape = os.path.join(self.folderworkspace, self.geodbworkspace, '_temp_test')
temp_shape = utils.create_temp_filename(filename + '.tif', filetype='shape')
nt.assert_equal(temp_shape, known_shape)
def test_with_extension_geodb_with_num(self):
with utils.WorkSpace(self.folderworkspace):
filename = os.path.join(self.geodbworkspace, 'test')
known_raster = os.path.join(self.folderworkspace, self.geodbworkspace, '_temp_test_2000')
temp_raster = utils.create_temp_filename(filename + '.tif', filetype='raster', num=2000)
nt.assert_equal(temp_raster, known_raster)
known_shape = os.path.join(self.folderworkspace, self.geodbworkspace, '_temp_test_999')
temp_shape = utils.create_temp_filename(filename + '.tif', filetype='shape', num=999)
nt.assert_equal(temp_shape, known_shape)
def test_with_extension_folder(self):
with utils.WorkSpace(self.folderworkspace):
filename = 'test'
known_raster = os.path.join(self.folderworkspace, '_temp_test.tif')
temp_raster = utils.create_temp_filename(filename + '.tif', filetype='raster')
nt.assert_equal(temp_raster, known_raster)
known_shape = os.path.join(self.folderworkspace, '_temp_test.shp')
temp_shape = utils.create_temp_filename(filename + '.shp', filetype='shape')
nt.assert_equal(temp_shape, known_shape)
def test_with_extension_folder_with_num(self):
with utils.WorkSpace(self.folderworkspace):
filename = 'test'
known_raster = os.path.join(self.folderworkspace, '_temp_test_4.tif')
temp_raster = utils.create_temp_filename(filename + '.tif', filetype='raster', num=4)
nt.assert_equal(temp_raster, known_raster)
known_shape = os.path.join(self.folderworkspace, '_temp_test_4.shp')
temp_shape = utils.create_temp_filename(filename + '.shp', filetype='shape', num=4)
nt.assert_equal(temp_shape, known_shape)
class Test__check_fields(object):
table = resource_filename("tidegates.testing.check_fields", "test_file.shp")
def test_should_exist_uni(self):
utils._check_fields(self.table, "Id", should_exist=True)
def test_should_exist_multi(self):
utils._check_fields(self.table, "Id", "existing", should_exist=True)
def test_should_exist_multi_witharea(self):
utils._check_fields(self.table, "Id", "existing", "SHAPE@AREA", should_exist=True)
@nt.raises(ValueError)
def test_should_exist_bad_vals(self):
utils._check_fields(self.table, "Id", "existing", "JUNK", "GARBAGE", should_exist=True)
def test_should_not_exist_uni(self):
utils._check_fields(self.table, "NEWFIELD", should_exist=False)
def test_should_not_exist_multi(self):
utils._check_fields(self.table, "NEWFIELD", "YANFIELD", should_exist=False)
def test_should_not_exist_multi_witharea(self):
utils._check_fields(self.table, "NEWFIELD", "YANFIELD", "SHAPE@AREA", should_exist=False)
@nt.raises(ValueError)
def test_should_not_exist_bad_vals(self):
utils._check_fields(self.table, "NEWFIELD", "YANFIELD", "existing", should_exist=False)
def test_result_to_raster():
mockResult = mock.Mock(spec=arcpy.Result)
mockRaster = mock.Mock(spec=arcpy.Raster)
with mock.patch('arcpy.Raster', mockRaster):
raster = utils.result_to_raster(mockResult)
mockResult.getOutput.assert_called_once_with(0)
def test_result_to_Layer():
mockResult = mock.Mock(spec=arcpy.Result)
mockLayer = mock.Mock(spec=arcpy.mapping.Layer)
with mock.patch('arcpy.mapping.Layer', mockLayer):
layer = utils.result_to_layer(mockResult)
mockResult.getOutput.assert_called_once_with(0)
class Test_rasters_to_arrays(object):
def setup(self):
from numpy import nan
self.known_array1 = numpy.array([
[ 0.0, 1.0, 2.0, 3.0, 4.0],
[ 5.0, 6.0, 7.0, 8.0, 9.0],
[10.0, 11.0, 12.0, 13.0, 14.0],
[15.0, 16.0, 17.0, 18.0, 19.0]
])
self.known_array2 = numpy.array([
[nan, 10.0, 20.0, 30.0, 40.0],
[nan, 60.0, 70.0, 80.0, 90.0],
[nan, 110.0, 120.0, 130.0, 140.0],
[nan, 160.0, 170.0, 180.0, 190.0]
])
self.known_array3 = numpy.array([
[ 00, 100, 200, 300, 400],
[ 500, 600, 700, 800, 900],
[1000, 1100, 1200, 1300, 1400],
[1500, 1600, 1700, 1800, 1900]
])
self.rasterfile1 = resource_filename("tidegates.testing.rasters_to_arrays", 'test_raster1')
self.rasterfile2 = resource_filename("tidegates.testing.rasters_to_arrays", 'test_raster2')
self.rasterfile3 = resource_filename("tidegates.testing.rasters_to_arrays", 'test_raster3')
def test_one_raster(self):
array = utils.rasters_to_arrays(self.rasterfile1)
nt.assert_true(isinstance(array, list))
nt.assert_equal(len(array), 1)
nptest.assert_array_almost_equal(array[0], self.known_array1)
def test_one_raster_squeezed(self):
array = utils.rasters_to_arrays(self.rasterfile1, squeeze=True)
nt.assert_true(isinstance(array, numpy.ndarray))
nptest.assert_array_almost_equal(array, self.known_array1)
def test_with_missing_values_squeeze(self):
array = utils.rasters_to_arrays(self.rasterfile2, squeeze=True)
nt.assert_true(isinstance(array, numpy.ndarray))
nptest.assert_array_almost_equal(array, self.known_array2)
def test_int_array(self):
array = utils.rasters_to_arrays(self.rasterfile3, squeeze=True)
nt.assert_true(isinstance(array, numpy.ndarray))
nptest.assert_array_almost_equal(array, self.known_array3)
def test_multiple_args(self):
arrays = utils.rasters_to_arrays(
self.rasterfile1,
self.rasterfile2,
self.rasterfile3,
squeeze=True
)
nt.assert_true(isinstance(arrays, list))
nt.assert_equal(len(arrays), 3)
for a, kn in zip(arrays, [self.known_array1, self.known_array2, self.known_array3]):
nt.assert_true(isinstance(a, numpy.ndarray))
nptest.assert_array_almost_equal(a, kn)
def test_array_to_raster():
template_file = resource_filename("tidegates.testing.array_to_raster", 'test_raster2')
template = arcpy.Raster(template_file)
array = numpy.arange(5, 25).reshape(4, 5).astype(float)
raster = utils.array_to_raster(array, template)
nt.assert_true(isinstance(raster, arcpy.Raster))
nt.assert_true(raster.extent.equals(template.extent))
nt.assert_equal(raster.meanCellWidth, template.meanCellWidth)
nt.assert_equal(raster.meanCellHeight, template.meanCellHeight)
class Test_load_data(object):
rasterpath = resource_filename("tidegates.testing.load_data", 'test_dem.tif')
vectorpath = resource_filename("tidegates.testing.load_data", 'test_wetlands.shp')
@nt.raises(ValueError)
def test_bad_datatype(self):
utils.load_data(self.rasterpath, 'JUNK')
@nt.raises(ValueError)
def test_datapath_doesnt_exist(self):
utils.load_data('junk.shp', 'grid')
@nt.raises(ValueError)
def test_datapath_bad_value(self):
utils.load_data(12345, 'grid')
@nt.raises(ValueError)
def test_vector_as_grid_should_fail(self):
x = utils.load_data(self.vectorpath, 'grid')
@nt.raises(ValueError)
def test_vector_as_raster_should_fail(self):
x = utils.load_data(self.vectorpath, 'raster')
def test_raster_as_raster(self):
x = utils.load_data(self.rasterpath, 'raster')
nt.assert_true(isinstance(x, arcpy.Raster))
def test_raster_as_grid_with_caps(self):
x = utils.load_data(self.rasterpath, 'gRId')
nt.assert_true(isinstance(x, arcpy.Raster))
def test_raster_as_layer_not_greedy(self):
x = utils.load_data(self.rasterpath, 'layer', greedyRasters=False)
nt.assert_true(isinstance(x, arcpy.mapping.Layer))
def test_raster_as_layer_greedy(self):
x = utils.load_data(self.rasterpath, 'layer')
nt.assert_true(isinstance(x, arcpy.Raster))
def test_vector_as_shape(self):
x = utils.load_data(self.vectorpath, 'shape')
nt.assert_true(isinstance(x, arcpy.mapping.Layer))
def test_vector_as_layer_with_caps(self):
x = utils.load_data(self.vectorpath, 'LAyeR')
nt.assert_true(isinstance(x, arcpy.mapping.Layer))
def test_already_a_layer(self):
lyr = arcpy.mapping.Layer(self.vectorpath)
x = utils.load_data(lyr, 'layer')
nt.assert_equal(x, lyr)
def test_already_a_raster(self):
raster = arcpy.Raster(self.rasterpath)
x = utils.load_data(raster, 'raster')
nt.assert_true(isinstance(x, arcpy.Raster))
nptest.assert_array_almost_equal(*utils.rasters_to_arrays(x, raster))
class _polygons_to_raster_mixin(object):
testfile = resource_filename("tidegates.testing.polygons_to_raster", "test_zones.shp")
known_values = numpy.array([-999, 16, 150])
@nptest.dec.skipif(not tgtest.has_spatial)
def test_process(self):
raster = utils.polygons_to_raster(self.testfile, "GeoID", **self.kwargs)
nt.assert_true(isinstance(raster, arcpy.Raster))
array = utils.rasters_to_arrays(raster, squeeze=True)
arcpy.management.Delete(raster)
flat_arr = array.flatten()
bins = numpy.bincount(flat_arr[flat_arr > 0])
nptest.assert_array_almost_equal(numpy.unique(array), self.known_values)
nptest.assert_array_almost_equal(bins[bins > 0], self.known_counts)
nt.assert_tuple_equal(array.shape, self.known_shape)
class Test_polygons_to_raster_default(_polygons_to_raster_mixin):
def setup(self):
self.kwargs = {}
self.known_shape = (854, 661)
self.known_counts = numpy.array([95274, 36674])
class Test_polygons_to_raster_x02(_polygons_to_raster_mixin):
def setup(self):
self.kwargs = {'cellsize': 2}
self.known_shape = (1709, 1322)
self.known_counts = numpy.array([381211, 146710])
class Test_polygons_to_raster_x08(_polygons_to_raster_mixin):
def setup(self):
self.kwargs = {'cellsize': 8}
self.known_shape = (427, 330)
self.known_counts = numpy.array([23828, 9172])
@nptest.dec.skipif(not tgtest.has_spatial)
def test_actual_arrays(self):
known_raster_file = resource_filename("tidegates.testing.polygons_to_raster", "test_zones_raster.tif")
known_raster = utils.load_data(known_raster_file, 'raster')
raster = utils.polygons_to_raster(self.testfile, "GeoID", **self.kwargs)
arrays = utils.rasters_to_arrays(raster, known_raster)
arcpy.management.Delete(raster)
nptest.assert_array_almost_equal(*arrays)
class Test_polygons_to_raster_x16(_polygons_to_raster_mixin):
def setup(self):
self.kwargs = {'cellsize': 16}
self.known_shape = (214, 165)
self.known_counts = numpy.array([5953, 2288])
def test_clip_dem_to_zones():
demfile = resource_filename("tidegates.testing.clip_dem_to_zones", 'test_dem.tif')
zonefile = resource_filename("tidegates.testing.clip_dem_to_zones", "test_zones_raster_small.tif")
raster = utils.clip_dem_to_zones(demfile, zonefile)
zone_r = utils.load_data(zonefile, 'raster')
arrays = utils.rasters_to_arrays(raster, zone_r)
dem_a, zone_a = arrays[0], arrays[1]
arcpy.management.Delete(raster)
nt.assert_true(isinstance(raster, arcpy.Raster))
known_shape = (146, 172)
nt.assert_tuple_equal(dem_a.shape, zone_a.shape)
@nptest.dec.skipif(not tgtest.has_fiona)
def test_raster_to_polygons():
zonefile = resource_filename("tidegates.testing.raster_to_polygons", "input_raster_to_polygon.tif")
knownfile = resource_filename("tidegates.testing.raster_to_polygons", "known_polygons_from_raster_1.shp")
testfile = resource_filename("tidegates.testing.raster_to_polygons", "test_polygons_from_raster_1.shp")
with utils.OverwriteState(True):
zones = utils.load_data(zonefile, 'raster')
known = utils.load_data(knownfile, 'layer')
test = utils.raster_to_polygons(zones, testfile)
tgtest.assert_shapefiles_are_close(test.dataSource, known.dataSource)
utils.cleanup_temp_results(testfile)
@nptest.dec.skipif(not tgtest.has_fiona)
def test_raster_to_polygons_with_new_field():
zonefile = resource_filename("tidegates.testing.raster_to_polygons", "input_raster_to_polygon.tif")
knownfile = resource_filename("tidegates.testing.raster_to_polygons", "known_polygons_from_raster_2.shp")
testfile = resource_filename("tidegates.testing.raster_to_polygons", "test_polygons_from_raster_2.shp")
with utils.OverwriteState(True):
zones = utils.load_data(zonefile, 'raster')
known = utils.load_data(knownfile, 'layer')
test = utils.raster_to_polygons(zones, testfile, newfield="GeoID")
tgtest.assert_shapefiles_are_close(test.dataSource, known.dataSource)
utils.cleanup_temp_results(testfile)
@nptest.dec.skipif(not tgtest.has_fiona)
def test_aggregate_polygons():
inputfile = resource_filename("tidegates.testing.aggregate_polygons", "input_polygons_from_raster.shp")
knownfile = resource_filename("tidegates.testing.aggregate_polygons", "known_dissolved_polygons.shp")
testfile = resource_filename("tidegates.testing.aggregate_polygons", "test_dissolved_polygons.shp")
with utils.OverwriteState(True):
raw = utils.load_data(inputfile, 'layer')
known = utils.load_data(knownfile, 'layer')
test = utils.aggregate_polygons(raw, "gridcode", testfile)
tgtest.assert_shapefiles_are_close(test.dataSource, known.dataSource)
utils.cleanup_temp_results(testfile)
def test_mask_array_with_flood():
zones = numpy.array([
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[ 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[ 1, 1, 1, 1, 2, 2, 2, 2, 0, 0, 0],
[ 0, 0, 0, 2, 2, 2, 2, 0, 0, 0, 0],
[ 2, 2, 2, 2, 2, 2, 2, 0, 0, 0, 0],
[ 2, 2, 2, 2, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
])
topo = numpy.array([
[ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10.],
[ 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11.],
[ 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.],
[ 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13.],
[ 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14.],
[ 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15.],
[ 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16.],
[ 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., 17.],
[ 8., 9., 10., 11., 12., 13., 14., 15., 16., 17., 18.],
[ 9., 10., 11., 12., 13., 14., 15., 16., 17., 18., 19.],
[10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20.],
])
known_flooded = numpy.array([
[ 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0],
[ 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[ 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0],
[ 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
])
flooded = utils.flood_zones(zones, topo, 6.0)
nptest.assert_array_almost_equal(flooded, known_flooded)
class Test_add_field_with_value(object):
def setup(self):
self.shapefile = resource_filename("tidegates.testing.add_field_with_value", 'field_adder.shp')
self.fields_added = ["_text", "_unicode", "_int", "_float", '_no_valstr', '_no_valnum']
def teardown(self):
field_names = [f.name for f in arcpy.ListFields(self.shapefile)]
for field in self.fields_added:
if field in field_names:
arcpy.management.DeleteField(self.shapefile, field)
def test_float(self):
name = "_float"
utils.add_field_with_value(self.shapefile, name,
field_value=5.0)
nt.assert_true(name in [f.name for f in arcpy.ListFields(self.shapefile)])
newfield = arcpy.ListFields(self.shapefile, name)[0]
nt.assert_equal(newfield.type, u'Double')
def test_int(self):
name = "_int"
utils.add_field_with_value(self.shapefile, name,
field_value=5)
nt.assert_true(name in [f.name for f in arcpy.ListFields(self.shapefile)])
newfield = arcpy.ListFields(self.shapefile, name)[0]
nt.assert_equal(newfield.type, u'Integer')
def test_string(self):
name = "_text"
utils.add_field_with_value(self.shapefile, name,
field_value="example_value",
field_length=15)
nt.assert_true(name in [f.name for f in arcpy.ListFields(self.shapefile)])
newfield = arcpy.ListFields(self.shapefile, name)[0]
nt.assert_equal(newfield.type, u'String')
nt.assert_true(newfield.length, 15)
def test_unicode(self):
name = "_unicode"
utils.add_field_with_value(self.shapefile, name,
field_value=u"example_value",
field_length=15)
nt.assert_true(name in [f.name for f in arcpy.ListFields(self.shapefile)])
newfield = arcpy.ListFields(self.shapefile, name)[0]
nt.assert_equal(newfield.type, u'String')
nt.assert_true(newfield.length, 15)
def test_no_value_string(self):
name = "_no_valstr"
utils.add_field_with_value(self.shapefile, name,
field_type='TEXT',
field_length=15)
nt.assert_true(name in [f.name for f in arcpy.ListFields(self.shapefile)])
newfield = arcpy.ListFields(self.shapefile, name)[0]
nt.assert_equal(newfield.type, u'String')
nt.assert_true(newfield.length, 15)
def test_no_value_number(self):
name = "_no_valnum"
utils.add_field_with_value(self.shapefile, name,
field_type='DOUBLE')
nt.assert_true(name in [f.name for f in arcpy.ListFields(self.shapefile)])
newfield = arcpy.ListFields(self.shapefile, name)[0]
nt.assert_equal(newfield.type, u'Double')
@nt.raises(ValueError)
def test_no_value_no_field_type(self):
utils.add_field_with_value(self.shapefile, "_willfail")
@nt.raises(ValueError)
def test_overwrite_existing_no(self):
utils.add_field_with_value(self.shapefile, "existing")
def test_overwrite_existing_yes(self):
utils.add_field_with_value(self.shapefile, "existing",
overwrite=True,
field_type="LONG")
class Test_cleanup_temp_results(object):
def setup(self):
self.workspace = os.path.abspath(resource_filename('tidegates.testing', 'cleanup_temp_results'))
self.template_file = resource_filename('tidegates.testing.cleanup_temp_results', 'test_dem.tif')
self.template = utils.load_data(self.template_file, 'raster')
raster1 = utils.array_to_raster(numpy.random.normal(size=(30, 30)), self.template)
raster2 = utils.array_to_raster(numpy.random.normal(size=(60, 60)), self.template)
self.name1 = 'temp_1.tif'
self.name2 = 'temp_2.tif'
self.path1 = os.path.join(self.workspace, self.name1)
self.path2 = os.path.join(self.workspace, self.name2)
with utils.OverwriteState(True), utils.WorkSpace(self.workspace):
raster1.save(self.path1)
raster2.save(self.path2)
@nt.nottest
def check_outcome(self):
nt.assert_false(os.path.exists(os.path.join(self.workspace, 'temp_1.tif')))
nt.assert_false(os.path.exists(os.path.join(self.workspace, 'temp_2.tif')))
def test_with_names_in_a_workspace(self):
with utils.WorkSpace(self.workspace):
utils.cleanup_temp_results(self.name1, self.name2)
self.check_outcome()
def test_with_paths_absolute(self):
utils.cleanup_temp_results(self.path1, self.path2)
self.check_outcome()
def test_with_rasters(self):
with utils.WorkSpace(self.workspace):
raster1 = utils.load_data(self.path1, 'raster')
raster2 = utils.load_data(self.path2, 'raster')
utils.cleanup_temp_results(raster1, raster2)
self.check_outcome()
def test_with_results(self):
with utils.WorkSpace(self.workspace):
res1 = arcpy.Result(toolname='Clip_management')
res2 = arcpy.Result(toolname='Clip_management')
with mock.patch.object(res1, 'getOutput', return_value='temp_1.tif'), \
mock.patch.object(res2, 'getOutput', return_value='temp_2.tif'):
utils.cleanup_temp_results(res1, res2)
self.check_outcome()
def test_with_layers(self):
with utils.WorkSpace(self.workspace):
lyr1 = utils.load_data('temp_1.tif', 'layer', greedyRasters=False)
lyr2 = utils.load_data('temp_2.tif', 'layer', greedyRasters=False)
utils.cleanup_temp_results(lyr1, lyr2)
self.check_outcome()
@nt.raises(ValueError)
def test_with_bad_input(self):
utils.cleanup_temp_results(1, 2, ['a', 'b', 'c'])
def teardown(self):
with utils.WorkSpace(self.workspace):
utils.cleanup_temp_results('temp_1.tif', 'temp_2.tif')
@nptest.dec.skipif(not tgtest.has_fiona)
def test_intersect_polygon_layers():
input1_file = resource_filename("tidegates.testing.intersect_polygons", "intersect_input1.shp")
input2_file = resource_filename("tidegates.testing.intersect_polygons", "intersect_input2.shp")
known_file = resource_filename("tidegates.testing.intersect_polygons", "intersect_known.shp")
output_file = resource_filename("tidegates.testing.intersect_polygons", "intersect_output.shp")
with utils.OverwriteState(True):
output = utils.intersect_polygon_layers(
output_file,
input1_file,
input2_file,
)
nt.assert_true(isinstance(output, arcpy.mapping.Layer))
tgtest.assert_shapefiles_are_close(output_file, known_file)
utils.cleanup_temp_results(output)
class Test_groupby_and_aggregate():
known_counts = {16.0: 32, 150.0: 2}
buildings = resource_filename("tidegates.testing.groupby_and_aggregate", "flooded_buildings.shp")
group_col = 'GeoID'
count_col = 'STRUCT_ID'
area_op = 'SHAPE@AREA'
areas = resource_filename("tidegates.testing.groupby_and_aggregate", "intersect_input1.shp")
known_areas = {2: 1327042.1024, 7: 1355433.0192, 12: 1054529.2882}
def test_defaults(self):
counts = utils.groupby_and_aggregate(
self.buildings,
self.group_col,
self.count_col,
aggfxn=None
)
nt.assert_dict_equal(counts, self.known_counts)
def test_area(self):
areadict = utils.groupby_and_aggregate(
self.areas,
self.group_col,
self.area_op,
aggfxn=lambda g: sum([row[1] for row in g])
)
for key in areadict.keys():
nt.assert_almost_equal(
areadict[key],
self.known_areas[key],
delta=0.01
)
def test_recarry_sort_no_args(self):
known = numpy.array([
('A', 1.), ('A', 2.), ('A', 3.), ('A', 4.),
('B', 1.), ('B', 2.), ('B', 3.), ('B', 4.),
('C', 1.), ('C', 2.), ('C', 3.), ('C', 4.),
], dtype=[('GeoID', 'S4'), ('Area', float)])
test = numpy.array([
('A', 1.), ('B', 1.), ('C', 3.), ('A', 4.),
('C', 4.), ('A', 2.), ('C', 1.), ('A', 3.),
('B', 2.), ('C', 2.), ('B', 4.), ('B', 3.),
], dtype=[('GeoID', 'S4'), ('Area', float)])
test.sort()
nptest.assert_array_equal(test, known)
@nt.raises(ValueError)
def test_bad_group_col(self):
counts = utils.groupby_and_aggregate(
self.buildings,
"JUNK",
self.count_col
)
@nt.raises(ValueError)
def test_bad_count_col(self):
counts = utils.groupby_and_aggregate(
self.buildings,
self.group_col,
"JUNK"
)
@nt.raises(NotImplementedError)
def test_rename_column():
layer = resource_filename("tidegates.testing.rename_column", "rename_col.dbf")
oldname = "existing"
newname = "exists"
#layer = utils.load_data(inputfile, "layer")
utils.rename_column(layer, oldname, newname)
utils._check_fields(layer, newname, should_exist=True)
utils._check_fields(layer, oldname, should_exist=False)
utils.rename_column(layer, newname, oldname)
utils._check_fields(layer, newname, should_exist=False)
utils._check_fields(layer, oldname, should_exist=True)
class Test_populate_field(object):
def setup(self):
self.shapefile = resource_filename("tidegates.testing.populate_field", 'populate_field.shp')
self.field_added = "newfield"
def teardown(self):
arcpy.management.DeleteField(self.shapefile, self.field_added)
def test_with_dictionary(self):
value_dict = {n: n for n in range(7)}
value_fxn = lambda row: value_dict.get(row[0], -1)
utils.add_field_with_value(self.shapefile, self.field_added, field_type="LONG")
utils.populate_field(
self.shapefile,
lambda row: value_dict.get(row[0], -1),
self.field_added,
"FID"
)
with arcpy.da.SearchCursor(self.shapefile, [self.field_added, "FID"]) as cur:
for row in cur:
nt.assert_equal(row[0], row[1])
def test_with_general_function(self):
utils.add_field_with_value(self.shapefile, self.field_added, field_type="LONG")
utils.populate_field(
self.shapefile,
lambda row: row[0]**2,
self.field_added,
"FID"
)
with arcpy.da.SearchCursor(self.shapefile, [self.field_added, "FID"]) as cur:
for row in cur:
nt.assert_equal(row[0], row[1] ** 2)
class Test_copy_data(object):
destfolder = resource_filename("tidegates.testing.copy_data", "output")
srclayers = [
resource_filename("tidegates.testing.copy_data", "copy2.shp"),
resource_filename("tidegates.testing.copy_data", "copy1.shp"),
]
output = [
resource_filename("tidegates.testing.copy_data.output", "copy2.shp"),
resource_filename("tidegates.testing.copy_data.output", "copy1.shp"),
]
def teardown(self):
utils.cleanup_temp_results(*self.output)
@nptest.dec.skipif(not tgtest.has_fiona)
def test_list(self):
with utils.OverwriteState(True):
newlayers = utils.copy_data(self.destfolder, *self.srclayers)
nt.assert_true(isinstance(newlayers, list))
for newlyr, newname, oldname in zip(newlayers, self.output, self.srclayers):
nt.assert_true(isinstance(newlyr, arcpy.mapping.Layer))
tgtest.assert_shapefiles_are_close(newname, oldname)
@nptest.dec.skipif(not tgtest.has_fiona)
def test_single_squeeze_false(self):
with utils.OverwriteState(True):
newlayers = utils.copy_data(self.destfolder, *self.srclayers[:1])
nt.assert_true(isinstance(newlayers, list))
for newlyr, newname, oldname in zip(newlayers[:1], self.output[:1], self.srclayers[:1]):
nt.assert_true(isinstance(newlyr, arcpy.mapping.Layer))
tgtest.assert_shapefiles_are_close(newname, oldname)
@nptest.dec.skipif(not tgtest.has_fiona)
def test_single_squeeze_true(self):
with utils.OverwriteState(True):
newlayer = utils.copy_data(self.destfolder, *self.srclayers[:1], squeeze=True)
nt.assert_true(isinstance(newlayer, arcpy.mapping.Layer))
nt.assert_true(isinstance(newlayer, arcpy.mapping.Layer))
tgtest.assert_shapefiles_are_close(self.output[0], self.srclayers[0])
@nptest.dec.skipif(not tgtest.has_fiona)
def test_concat_results():
known = resource_filename('tidegates.testing.concat_results', 'known.shp')
with utils.OverwriteState(True):
test = utils.concat_results(
resource_filename('tidegates.testing.concat_results', 'result.shp'),
resource_filename('tidegates.testing.concat_results', 'input1.shp'),
resource_filename('tidegates.testing.concat_results', 'input2.shp')
)
nt.assert_true(isinstance(test, arcpy.mapping.Layer))
tgtest.assert_shapefiles_are_close(test.dataSource, known)
utils.cleanup_temp_results(test)
@nptest.dec.skipif(not tgtest.has_fiona)
def test_join_results_to_baseline():
known = resource_filename('tidegates.testing.join_results', 'merge_result.shp')
with utils.OverwriteState(True):
test = utils.join_results_to_baseline(
resource_filename('tidegates.testing.join_results', 'merge_result.shp'),
resource_filename('tidegates.testing.join_results', 'merge_join.shp'),
resource_filename('tidegates.testing.join_results', 'merge_baseline.shp')
)
nt.assert_true(isinstance(test, arcpy.mapping.Layer))
tgtest.assert_shapefiles_are_close(test.dataSource, known)
utils.cleanup_temp_results(test)
| Geosyntec/python-tidegates | tidegates/tests/test_utils.py | Python | bsd-3-clause | 41,033 |
#!/usr/bin/env python
"""
Read in the time and temperature data for the
transient HRR example.
"""
import numpy as np
# Read in experimental data from file
data = np.genfromtxt('../Experimental_Data/time_temperature_data.csv',
delimiter=',', names=True)
# Set data variables
time = data['time']
temperature = data['temperature']
| koverholt/bayes-fire | Example_Cases/CFAST_Transient_HRR/Scripts/data_cfast_transient.py | Python | bsd-3-clause | 355 |
import RPIO.PWM as PWM
import time
from flask import Flask, url_for, request
import commands
#variable
app = Flask(__name__)
app.config.from_object(__name__)
GPIO_RED = 17
GPIO_GREEN = 27
GPIO_BLUE = 22
Correction_RED = 1.0
Correction_GREEN = 1.0
Correction_BLUE = 1.0
Correction_RED1 = 1.0
Correction_GREEN1 = 1.0
Correction_BLUE1 = 1.0
r = 0.0
g = 0.0
b = 0.0
CHANNEL = 0
# get ip
#intf = 'eth0'
#intf_ip = commands.getoutput("ip address show dev " + intf).split()
#intf_ip = intf_ip[intf_ip.index('inet') + 1].split('/')[0]
#hostIP = intf_ip
# or set ip
hostIP = '192.168.0.100'
PWM.setup()
PWM.init_channel(CHANNEL,4000)
#app.debug = True
@app.route('/')
def api_root():
return 'Welcome on %s' % hostIP
#PWM.set_loglevel(PWM.LOG_LEVEL_DEBUG)
@app.route('/colorchange', methods=['GET', 'POST'])
def api_colorchange():
#PWM.print_channel(CHANNEL)
if request.method == 'POST':
red_raw = request.form['r']
red = int(red_raw) * Correction_RED1
print "red %s" % red
green_raw = request.form['g']
green = int(green_raw) * Correction_GREEN1
print "green %s" % green
blue_raw = request.form['b']
blue = int(blue_raw) * Correction_BLUE1
print "blue %s" %blue
PWM.add_channel_pulse(CHANNEL, GPIO_RED, 0, int(red))
PWM.add_channel_pulse(CHANNEL, GPIO_GREEN, 0, int(green))
PWM.add_channel_pulse(CHANNEL, GPIO_BLUE, 0, int(blue))
return 'post %s' % r
else:
return 'get'
#red = session.get('r')
#green = session.get('g')
#blue = session.get('b')
#PWM.add_channel_pulse(CHANNEL, GPIO_RED, 0, int(red))
#PWM.add_channel_pulse(CHANNEL, GPIO_GREEN, 0, int(green))
#PWM.add_channel_pulse(CHANNEL, GPIO_BLUE, 0, int(blue))
#time.sleep(0.01)
#return x
#PWM.add_channel_pulse(CHANNEL, GPIO_BLUE, 100, 50)
#time.sleep(5)
# Stop PWM for specific GPIO on channel 0
#PWM.clear_channel_gpio(0, GPIO_BLUE)
#PWM.cleanup()
@app.route('/colorchange2/<string:red_raw>/<string:green_raw>/<string:blue_raw>', methods=['GET', 'POST'])
def api_colorchange2(red_raw,green_raw,blue_raw):
#PWM.print_channel(CHANNEL)
red = int(red_raw) * Correction_RED
print "red %s" % red
green = int(green_raw) * Correction_GREEN
print "green %s" % green
blue = int(blue_raw) * Correction_BLUE
print "blue %s" %blue
PWM.add_channel_pulse(CHANNEL, GPIO_RED, 0, int(red))
PWM.add_channel_pulse(CHANNEL, GPIO_GREEN, 0, int(green))
PWM.add_channel_pulse(CHANNEL, GPIO_BLUE, 0, int(blue))
return 'ok'
if __name__ == '__main__':
app.debug=True
app.run(host=hostIP) | davidecaminati/Domotics-Raspberry | Hardware/Color_LED/fade.py | Python | lgpl-3.0 | 2,645 |
'''
Created on Jan 21, 2010
@author: nayeem
'''
import pyvision as pv
import numpy as np
import scipy as sp
from pyvision.vector.SVM import SVM
import csv
import os.path
import sys
sys.setrecursionlimit(1500)
class multiSVM:
'''
classdocs
'''
def __init__(self):
'''
Constructor
'''
self.data = self.readData()
self.train_data = self.data[0]
self.train_labels = self.data[1]
self.test_data = self.data[2]
#self.runSVM()
def trainData(self):
return self.train_data
def trainLabels(self):
return self.train_labels
def testData(self):
return self.test_data
def runSVM(self):
svm = SVM()
print "I am in the SVM module now"
def readData(self):
IRIS_PATH = os.path.join(pv.__path__[0],'data','ml','iris.csv')
readcsv = csv.reader(open(IRIS_PATH,"rb"))
data = []
labels = []
readcsv.next()
train_data = []
test_data = []
train_labels = []
pred_labels = []
for row in readcsv:
data_point = map(float, row[1:5])
label = row[5]
data.append(data_point)
labels.append(label)
iris_data = np.array(data)
iris_labels = np.array(labels)
data_length = len(iris_data)
iris_training = np.arange(0, data_length, 2)
iris_testing = iris_training + 1
for i in iris_training:
train_data.append(iris_data[i, :])
train_labels.append(iris_labels[i])
for i in iris_testing:
test_data.append(iris_data[i, :])
train_data = np.array(train_data)
test_data = np.array(test_data)
train_labels = np.array(train_labels)
data = train_data, train_labels, test_data
return data
def mahalanobisDist(self,group1,group2):
mean1 = group1.mean(axis=0)
mean2 = group2.mean(axis=0)
gp1Centered = group1-mean1
gp2Centered = group2-mean2
n1 = np.size(gp1Centered,axis=0)
n2 = np.size(gp2Centered,axis=0)
cov1 = (np.dot(gp1Centered.T,gp1Centered)) / n1
cov2 = (np.dot(gp2Centered.T, gp2Centered)) / n2
weighted1 = n1*cov1
weighted2 = n2*cov2
pooledCov = (np.add(weighted1,weighted2))/(n1+n2)
meandiff = mean1-mean2
invpooledCov = np.linalg.inv(pooledCov)
prod1 = np.dot(meandiff,invpooledCov)
prod = np.dot(prod1,meandiff.T)
dist = np.sqrt(prod)
return dist
def rbfKernel(self,vecA,vecB,sigma):
vec_diff = vecA-vecB
return np.exp(-np.dot(vec_diff,vec_diff.T)/(2*sigma**2))
class Node:
def __init__(self):
self.classList = []
self.classData = []
self.pos = 0
self.leftChild = None
self.rightChild = None
class Tree:
root = Node()
def __init__(self):
self.root = None
def insert(self,classlist,classData, pos):
newNode = Node()
newNode.classList = classlist
newNode.classData = classData
newNode.pos = pos
if self.root == None:
self.root = self.newNode
else:
curr = self.root
parent = Node()
while True:
parent = curr
if newNode.pos == -1:
curr = curr.leftChild
if curr == None:
parent.leftChild = newNode()
return
else:
curr = curr.rightChild
if curr == None:
parent.rightChild = newNode()
return
ms = multiSVM()
trainingdata = ms.trainData()
traininglabels = ms.trainLabels()
testdata = ms.testData()
#print 'training data:\n' + repr(traininglabels)
#length = len(traininglabels)
#rows,cols = np.shape(traininglabels)
#print traininglabels[length-1]
classes = np.unique(traininglabels) # Unique classes
num_classes = len(classes)
num_features = np.size(trainingdata,axis=1) # Columns of training Data
num_samples = np.size(trainingdata,axis=0) # Number of samples
print '#classes: '+repr(num_classes)
print '#num_features: ' + repr(num_features)
print '#num_samples: ' + repr(num_samples)
means = []
covs = []
class_data = []
for i in np.arange(0,num_classes):
print classes[i]
mask = traininglabels==classes[i]
numThisClass = sum(mask)
print numThisClass
trThisClass = trainingdata[mask,:]
class_data.append(trThisClass)
centerThisClass = trThisClass.mean(axis=0)
print centerThisClass
means.append(centerThisClass)
print '**********************************************************************************'
covThisClass = np.cov(trThisClass)
covs.append(covThisClass)
# print '**********************************************************************************'
# print np.shape(covThisClass)
invCovMatThisClass = np.linalg.inv(covThisClass)
print np.shape(invCovMatThisClass)
# assert(0)
| tigerking/pyvision | src/pyvision/data/ml/mulSVM.py | Python | bsd-3-clause | 5,495 |
"""
See the NOTICE file distributed with this work for additional information
regarding copyright ownership.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from collections import deque
import fnmatch
import functools
import itertools
import os
from pathlib import Path
from typing import Callable, Deque, Dict, Iterator, List, Tuple, TypeVar, Union
from ..utils import to_list
# Create the PathLike type as an alias for supported types a path can be stored into
PathLike = TypeVar('PathLike', str, os.PathLike)
class DirCmp:
"""Directory comparison object to compare reference and target directory trees.
Args:
ref_path: Reference root path, e.g. ``/home/user/pipelines/reference``.
target_path: Target root path, e.g. ``/home/user/pipelines/target``.
Attributes:
ref_path (Path): Reference directory path.
target_path (Path): Target directory path.
common_files (Set[str]): Files shared between reference and target directories.
ref_only (Set[str]): Files/subdirectories only found in the reference directory.
target_only (Set[str]): Files/subdirectories only found in the target directory.
subdirs (Dict[Path, DirCmp]): Shared subdirectories between reference and target directories.
Raises:
OSError: If either reference or target directories do not exist.
"""
def __init__(self, ref_path: PathLike, target_path: PathLike) -> None:
self.ref_path = Path(ref_path)
if not self.ref_path.exists():
raise OSError(f"Reference directory '{ref_path}' not found")
self.target_path = Path(target_path)
if not self.target_path.exists():
raise OSError(f"Target directory '{target_path}' not found")
ref_dirnames, ref_filenames = next(os.walk(self.ref_path))[1:]
ref_dnames = set(ref_dirnames)
ref_fnames = set(ref_filenames)
target_dirnames, target_filenames = next(os.walk(self.target_path))[1:]
target_dnames = set(target_dirnames)
target_fnames = set(target_filenames)
self.common_files = ref_fnames & target_fnames
# Get files/subdirectories only present in the reference directory
self.ref_only = ref_fnames - target_fnames
for ref_only_dname in ref_dnames - target_dnames:
for path, dummy, files in os.walk(self.ref_path / ref_only_dname):
rel_path = os.path.relpath(path, self.ref_path)
self.ref_only |= {os.path.join(rel_path, fname) for fname in files}
# Get files/subdirectories only present in the target directory
self.target_only = target_fnames - ref_fnames
for target_only_dname in target_dnames - ref_dnames:
for path, dummy, files in os.walk(self.target_path / target_only_dname):
rel_path = os.path.relpath(path, self.target_path)
self.target_only |= {os.path.join(rel_path, fname) for fname in files}
self.subdirs = {} # type: Dict[Path, DirCmp]
for dirname in ref_dnames & target_dnames:
self.subdirs[Path(dirname)] = DirCmp(self.ref_path / dirname, self.target_path / dirname)
def _traverse(self, attr: str, patterns: Union[str, List] = None,
paths: Union[PathLike, List] = None) -> Iterator[str]:
"""Yields each element of the requested attribute found in the directory trees.
This method traverses the shared directory tree in breadth-first order.
Args:
attr: Attribute to return, i.e. ``common_files``, ``ref_only`` or ``target_only``.
patterns: Filenames yielded will match at least one of these glob patterns.
paths: Relative directory/file paths to traverse.
Raises:
ValueError: If one of `paths` is not part of the shared directory tree.
"""
nodes_left = deque() # type: Deque[Tuple[Path, DirCmp]]
# Fetch and append the root node of each relative path
for rel_path in to_list(paths):
try:
node = functools.reduce(lambda x, y: x.subdirs[Path(y)], Path(rel_path).parts, self)
except KeyError:
# Suppress exception context to display only the ValueError
raise ValueError(f"Path '{rel_path}' not found in shared directory tree") from None
nodes_left.append((Path(rel_path), node))
# If no nodes were added, add the root as the starting point
if not nodes_left:
nodes_left.append((Path(), self))
# Prefix each pattern with "**" to match also files within subdirectories (for reference- /
# target-only files)
patterns = [f"**{glob}" for glob in to_list(patterns)]
while nodes_left:
dirname, node = nodes_left.pop()
# Append subdirectories to the list of directories left to traverse
nodes_left.extend([(dirname / subdir, subnode) for subdir, subnode in node.subdirs.items()])
if patterns:
# Get every element of the requested attribute that matches at least one of the patterns
mapping = map(functools.partial(fnmatch.filter, getattr(node, attr)), patterns)
# Remove element repetitions, result of its name matching more than one pattern
elements = set(itertools.chain(*mapping))
else:
elements = getattr(node, attr)
for ename in elements:
yield str(dirname / str(ename))
def apply_test(self, test_func: Callable, patterns: Union[str, List] = None,
paths: Union[PathLike, List] = None) -> List[str]:
"""Returns the files in the shared directory tree for which the test function returns True.
Args:
test_func: Test function applied to each tuple reference- / target-file. It has to expect two
``PathLike`` parameters and return a boolean, like::
def test_func(ref_filepath: PathLike, target_filepath: PathLike) -> bool:
patterns: Filenames returned will match at least one of these glob patterns.
paths: Relative directory/file paths to evaluate (including their subdirectories).
"""
positives = []
for filepath in self._traverse('common_files', patterns, paths):
if test_func(self.ref_path / filepath, self.target_path / filepath):
positives.append(filepath)
return positives
def common_list(self, patterns: Union[str, List] = None, paths: Union[PathLike, List] = None
) -> List[str]:
"""Returns the files/directories found in the shared directory tree.
Args:
patterns: Filenames returned will match at least one of these glob patterns.
paths: Relative directory/file paths to return (including their subdirectories).
"""
return list(self._traverse('common_files', patterns, paths))
def ref_only_list(self, patterns: Union[str, List] = None, paths: Union[PathLike, List] = None
) -> List[str]:
"""Returns the files/directories only found in the reference directory tree.
Args:
patterns: Filenames returned will match at least one of these glob patterns.
paths: Relative directory/file paths to return (including their subdirectories).
"""
return list(self._traverse('ref_only', patterns, paths))
def target_only_list(self, patterns: Union[str, List] = None, paths: Union[PathLike, List] = None
) -> List[str]:
"""Returns the files/directories only found in the target directory tree.
Args:
patterns: Filenames returned will match at least one of these glob patterns.
paths: Relative directory/file paths to return (including their subdirectories).
"""
return list(self._traverse('target_only', patterns, paths))
| Ensembl/ensembl-compara | src/python/lib/ensembl/compara/filesys/dircmp.py | Python | apache-2.0 | 8,473 |
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 1 10:53:38 2016
@author: nwillemse
"""
from datetime import datetime
from sqlalchemy import (Integer, Column, String, Float, DateTime, Time,
BigInteger, ForeignKey, UniqueConstraint)
from sqlalchemy.orm import relationship
from ..sqlite_db import Base
class Symbol(Base):
"""
"""
__tablename__ = 'symbol'
id = Column(Integer, primary_key=True, autoincrement=True)
exchange_id = Column(Integer, ForeignKey('exchange.id'), nullable=False)
data_vendor_id = Column(Integer, ForeignKey('data_vendor.id'), nullable=False)
ticker = Column(String(50), nullable=False)
exchange_ticker = Column(String(50))
name = Column(String(50))
type = Column(String(25))
sector = Column(String(25))
currency = Column(String(10))
big_point_value = Column(Integer)
minimum_tick_size = Column(Float)
tick_value = Column(Float)
margin = Column(Float)
created_date = Column(DateTime, default=datetime.now)
last_updated_date = Column(DateTime, default=datetime.now)
UniqueConstraint(data_vendor_id, ticker, name='symbol_u1')
data_vendor = relationship("DataVendor", back_populates="symbols")
exchange = relationship("Exchange", back_populates="symbols")
bar_data = relationship("BarData", back_populates="symbols")
tick_data = relationship("TickData", back_populates="symbols")
def __repr__(self):
return ("<Symbol(id=%s exchange_id=%s data_vendor_id=%s ticker=%s "
"exchange_ticker=%s name=%s type=%s sector=%s currency=%s "
"big_point_value=%s minimum_tick_size=%s tick_value=%s "
"margin=%s created_date=%s last_updated_date=%s)>" % (
self.id, self.exchange_id, self.data_vendor_id, self.ticker,
self.exchange_ticker, self.name, self.type, self.sector,
self.currency, self.big_point_value, self.minimum_tick_size,
self.tick_value, self.margin, self.created_date,
self.last_updated_date
)
)
class DataVendor(Base):
"""
"""
__tablename__ = 'data_vendor'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String(50), nullable=False, unique=True)
website_url = Column(String(50))
support_email = Column(String(50))
created_date = Column(DateTime, default=datetime.now)
last_updated_date = Column(DateTime, default=datetime.now)
symbols = relationship(
"Symbol", order_by=Symbol.id, back_populates="data_vendor"
)
def __repr__(self):
return ('<DataVendor(id=%s name=%s website=%s support_email=%s '
'created_date=%s last_updated_date=%s)>' % (
self.id, self.name, self.website_url, self.support_email,
self.created_date, self.last_updated_date
)
)
class Exchange(Base):
"""
"""
__tablename__ = 'exchange'
id = Column(Integer, primary_key=True, autoincrement=True)
abbrev = Column(String(50), nullable=False)
name = Column(String(50), nullable=False, unique=True)
city = Column(String(50))
country = Column(String(50))
currency = Column(String(10))
timezone_offset = Column(Time)
created_date = Column(DateTime, default=datetime.now)
last_updated_date = Column(DateTime, default=datetime.now)
symbols = relationship(
"Symbol", order_by=Symbol.id, back_populates="exchange"
)
def __repr__(self):
return ('<Exchange(id=%s abbrev=%s name=%s city=%s '
'country=%s currency=%s timezone_offset=%s '
'created_date=%s last_updated_date=%s)>' % (
self.id, self.abbrev, self.name, self.city,
self.country, self.currency, self.timezone_offset,
self.created_date, self.last_updated_date
)
)
class BarData(Base):
"""
"""
__tablename__ = 'bar_data'
symbol_id = Column(Integer, ForeignKey('symbol.id'), primary_key=True)
bar_size = Column(String(10), nullable=False, primary_key=True)
timestamp = Column(DateTime, primary_key=True)
open_price = Column(Float, nullable=False)
high_price = Column(Float, nullable=False)
low_price = Column(Float, nullable=False)
close_price = Column(Float, nullable=False)
volume = Column(BigInteger)
open_interest = Column(BigInteger)
created_date = Column(DateTime, default=datetime.now)
last_updated_date = Column(DateTime, default=datetime.now)
symbols = relationship(
"Symbol", order_by=Symbol.id, back_populates="bar_data"
)
def __repr__(self):
return ('<BarData(symbol_id=%s bar_size=%s timestamp=%s open_price=%s '
'high_price=%s low_price=%s close_price=%s created_date=%s '
'last_updated_date=%s)>' % (
self.symbol_id, self.bar_size, self.timestamp,
self.open_price, self.high_price, self.low_price,
self.close_price, self.created_date, self.last_updated_date
)
)
class TickData(Base):
"""
"""
__tablename__ = 'tick_data'
symbol_id = Column(Integer, ForeignKey('symbol.id'), primary_key=True)
timestamp = Column(DateTime, primary_key=True)
bid_price = Column(Float, nullable=False)
ask_price = Column(Float, nullable=False)
created_date = Column(DateTime, default=datetime.now)
last_updated_date = Column(DateTime, default=datetime.now)
symbols = relationship(
"Symbol", order_by=Symbol.id, back_populates="tick_data")
def __repr__(self):
return ('<TickData(symbol_id=%s timestamp=%s bid_price=%s ask_price=%s'
' created_date=%s last_updated_date=%s)>' % (
self.symbol_id, self.timestamp, self.bid_price,
self.ask_price, self.created_date, self.last_updated_date
)
)
| nwillemse/nctrader | nctrader/price_handler/sqlite_db/models.py | Python | mit | 6,079 |
'''
Test proxy serving stale content when DNS lookup fails
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Test.ContinueOnFail = True
# Set up hierarchical caching processes
ts_child = Test.MakeATSProcess("ts_child")
ts_parent = Test.MakeATSProcess("ts_parent")
server_name = "http://unknown.domain.com/"
Test.testName = "STALE"
# Config child proxy to route to parent proxy
ts_child.Disk.records_config.update({
'proxy.config.url_remap.pristine_host_hdr': 1,
'proxy.config.http.cache.max_stale_age': 10,
'proxy.config.http.parent_proxy.self_detect': 0,
})
ts_child.Disk.parent_config.AddLine(
f'dest_domain=. parent=localhost:{ts_parent.Variables.port} round_robin=consistent_hash go_direct=false'
)
ts_child.Disk.remap_config.AddLine(
f'map http://localhost:{ts_child.Variables.port} {server_name}'
)
# Configure parent proxy
ts_parent.Disk.records_config.update({
'proxy.config.url_remap.pristine_host_hdr': 1,
'proxy.config.http.cache.max_stale_age': 10,
})
ts_parent.Disk.remap_config.AddLine(
f'map http://localhost:{ts_parent.Variables.port} {server_name}'
)
ts_parent.Disk.remap_config.AddLine(
f'map {server_name} {server_name}'
)
# Object to push to proxies
stale_5 = "HTTP/1.1 200 OK\nServer: ATS/10.0.0\nAccept-Ranges: bytes\nContent-Length: 6\nCache-Control: public, max-age=5\n\nCACHED"
stale_10 = "HTTP/1.1 200 OK\nServer: ATS/10.0.0\nAccept-Ranges: bytes\nContent-Length: 6\nCache-Control: public, max-age=10\n\nCACHED"
# Testing scenarios
child_curl_request = (
# Test child serving stale with failed DNS OS lookup
f'curl -X PUSH -d "{stale_5}" "http://localhost:{ts_child.Variables.port}";'
f'curl -X PUSH -d "{stale_10}" "http://localhost:{ts_parent.Variables.port}";'
f'sleep 7; curl -s -v http://localhost:{ts_child.Variables.port};'
f'sleep 15; curl -s -v http://localhost:{ts_child.Variables.port};'
# Test parent serving stale with failed DNS OS lookup
f'curl -X PUSH -d "{stale_5}" "http://localhost:{ts_parent.Variables.port}";'
f'sleep 7; curl -s -v http://localhost:{ts_parent.Variables.port};'
f'sleep 15; curl -s -v http://localhost:{ts_parent.Variables.port};'
)
# Test case for when parent server is down but child proxy can serve cache object
tr = Test.AddTestRun()
tr.Processes.Default.Command = child_curl_request
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.StartBefore(ts_child)
tr.Processes.Default.StartBefore(ts_parent)
tr.Processes.Default.Streams.stderr = "gold/serve_stale_dns_fail.gold"
tr.StillRunningAfter = ts_child
tr.StillRunningAfter = ts_parent
| duke8253/trafficserver | tests/gold_tests/proxy_protocol/proxy_serve_stale_dns_fail.test.py | Python | apache-2.0 | 3,341 |
# -*- mode: python; coding: utf-8 -*-
# Copyright (C) 2017 Laboratoire de Recherche et Développement
# de l'Epita
#
# This file is part of Spot, a model checking library.
#
# Spot is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# Spot is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import spot
phi1 = """GFb
X(!b | GF!a)
GFa
G(a M b)
(!a | b) & GFb
(a U Xa) | G(b & Fa)
GF!b
(b & GF!b) | (!b & FGb)
b | (a & XF(b R a)) | (!a & XG(!b U !a))"""
def equivalent(a, phi):
negphi = spot.formula.Not(phi)
nega = spot.dualize(a)
return not (spot.translate(negphi).intersects(a)
or spot.translate(phi).intersects(nega))
def test_phi(phi):
a = spot.translate(phi, 'TGBA', 'SBAcc')
res = spot.to_weak_alternating(spot.dualize(a))
assert equivalent(res, spot.formula.Not(spot.formula(phi)))
for p in phi1.split('\n'):
print(p)
test_phi(p)
phi2 = spot.formula("(G((F a) U b)) W a")
a2 = spot.automaton("""
HOA: v1
States: 7
Start: 1
AP: 2 "a" "b"
acc-name: generalized-Buchi 2
Acceptance: 2 Inf(0)&Inf(1)
properties: trans-labels explicit-labels trans-acc complete univ-branch
--BODY--
State: 0
[t] 0 {0 1}
State: 1
[0] 0 {0 1}
[1] 1&2 {0 1}
[0&!1] 1&3 {0 1}
[!0&!1] 1&4 {0 1}
State: 2
[1] 2 {0 1}
[0&!1] 3 {0 1}
[!0&!1] 4 {0 1}
State: 3
[1] 2 {0 1}
[0&!1] 3 {1}
[!0&!1] 4 {1}
State: 4
[0&1] 2 {0 1}
[0&!1] 3 {1}
[!0&!1] 4 {1}
[!0&1] 5 {0 1}
State: 5
[0&1] 2 {0 1}
[0&!1] 3 {0 1}
[!0&1] 5 {0}
[!0&!1] 6 {0}
State: 6
[0&1] 2 {0 1}
[0&!1] 3 {1}
[!0&1] 5 {0}
[!0&!1] 6
--END--
""")
a2 = spot.to_weak_alternating(a2)
assert equivalent(a2, phi2)
| mcc-petrinets/formulas | spot/tests/python/toweak.py | Python | mit | 2,094 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Python Intro documentation build configuration file, created by
# sphinx-quickstart on Sat Jul 18 07:40:48 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.intersphinx',
'sphinx.ext.doctest',
]
intersphinx_mapping = {'python': ('https://docs.python.org/3/', None)}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Introduction to Programming using Python'
copyright = '2015'
author = 'birl.org'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'python'
html_theme_path = ['_themes']
# html_theme = 'sphinx13'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = { 'show_related': True }
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'PythonIntrodoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'PythonIntro.tex', 'Introductino to Programming with Python',
'Tara Birl', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pythonintro', 'Python Intro Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'PythonIntro', 'Python Intro Documentation',
author, 'PythonIntro', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
#epub_basename = project
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
| birlorg/pythonTutorial | conf.py | Python | bsd-2-clause | 11,512 |
"""Fix function attribute names (f.func_x -> f.__x__)."""
# Author: Collin Winter
# Local imports
from .. import fixer_base
from ..fixer_util import Name
class FixFuncattrs(fixer_base.BaseFix):
PATTERN = """
power< any+ trailer< '.' attr=('func_closure' | 'func_doc' | 'func_globals'
| 'func_name' | 'func_defaults' | 'func_code'
| 'func_dict') > any* >
"""
def transform(self, node, results):
attr = results["attr"][0]
attr.replace(Name((u"__%s__" % attr.value[5:]),
prefix=attr.prefix))
| babyliynfg/cross | tools/project-creator/Python2.6.6/Lib/lib2to3/fixes/fix_funcattrs.py | Python | mit | 638 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-09-21 10:43
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('flow', '0003_data_dependency_1'),
]
operations = [
migrations.AddField(
model_name='datadependency',
name='kind',
field=models.CharField(choices=[('io', 'Input/output dependency'), ('subprocess', 'Subprocess')], default='io', max_length=16),
preserve_default=False,
),
migrations.AlterField(
model_name='datadependency',
name='child',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='parents_dependency', to='flow.Data'),
),
migrations.AlterField(
model_name='datadependency',
name='parent',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='children_dependency', to='flow.Data'),
),
migrations.AlterModelTable(
name='datadependency',
table=None,
),
]
| jberci/resolwe | resolwe/flow/migrations/0004_data_dependency_2.py | Python | apache-2.0 | 1,201 |
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
# Copyright (C) 2007 Lukáš Lalinský
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import os
import sys
# Install gettext "noop" function in case const.py gets imported directly.
import __builtin__
__builtin__.__dict__['N_'] = lambda a: a
# Config directory
if sys.platform == "win32":
USER_DIR = os.environ.get("APPDATA", "~\\Application Data")
else:
USER_DIR = os.environ.get("XDG_CONFIG_HOME", "~/.config")
USER_DIR = os.path.join(
os.path.expanduser(USER_DIR), "MusicBrainz", "Picard"
)
USER_PLUGIN_DIR = os.path.join(USER_DIR, "plugins")
# AcoustID client API key
ACOUSTID_KEY = 'v8pQ6oyB'
ACOUSTID_HOST = 'api.acoustid.org'
ACOUSTID_PORT = 80
FPCALC_NAMES = ['fpcalc', 'pyfpcalc']
# MB OAuth client credentials
MUSICBRAINZ_OAUTH_CLIENT_ID = 'ACa9wsDX19cLp-AeEP-vVw'
MUSICBRAINZ_OAUTH_CLIENT_SECRET = 'xIsvXbIuntaLuRRhzuazOA'
# Cover art archive URL and port
CAA_HOST = "coverartarchive.org"
CAA_PORT = 80
# URLs
PICARD_URLS = {
'documentation': "https://picard.musicbrainz.org/docs/",
'troubleshooting': "https://picard.musicbrainz.org/docs/troubleshooting/",
'home': "https://picard.musicbrainz.org/",
'doc_options': "https://picard.musicbrainz.org/docs/options/",
'doc_scripting': "https://picard.musicbrainz.org/docs/scripting",
'plugins': "https://picard.musicbrainz.org/plugins/",
'forum': "https://community.metabrainz.org/c/picard",
'donate': "https://metabrainz.org/donate",
'chromaprint': "https://acoustid.org/chromaprint#download",
'acoustid_apikey': "https://acoustid.org/api-key",
'doc_cover_art_types': "https://musicbrainz.org/doc/Cover_Art/Types",
'acoustid_track': "https://acoustid.org/track/",
}
# Various Artists MBID
VARIOUS_ARTISTS_ID = '89ad4ac3-39f7-470e-963a-56509c546377'
# Special purpose track titles
SILENCE_TRACK_TITLE = '[silence]'
DATA_TRACK_TITLE = '[data track]'
# Release formats
from picard.const.attributes import MB_ATTRIBUTES
RELEASE_FORMATS = {}
RELEASE_PRIMARY_GROUPS = {}
RELEASE_SECONDARY_GROUPS = {}
for k, v in MB_ATTRIBUTES.iteritems():
if k.startswith(u'DB:medium_format/name:'):
RELEASE_FORMATS[v] = v
elif k.startswith(u'DB:release_group_primary_type/name:'):
RELEASE_PRIMARY_GROUPS[v] = v
elif k.startswith(u'DB:release_group_secondary_type/name:'):
RELEASE_SECONDARY_GROUPS[v] = v
# Release countries
from picard.const.countries import RELEASE_COUNTRIES
# List of available user interface languages
from picard.const.languages import UI_LANGUAGES
# List of alias locales
from picard.const.locales import ALIAS_LOCALES
# List of official musicbrainz servers - must support SSL for mblogin requests (such as collections).
MUSICBRAINZ_SERVERS = [
'musicbrainz.org',
'beta.musicbrainz.org',
]
# Plugins API
PLUGINS_API = {
'host': 'picard.musicbrainz.org',
'port': 80,
'endpoint': {
'plugins': '/api/v1/plugins/',
'download': '/api/v1/download/'
}
}
| dufferzafar/picard | picard/const/__init__.py | Python | gpl-2.0 | 3,754 |
""" This file contains defines parameters for nipy that we use to fill
settings in setup.py, the nipy top-level docstring, and for building the
docs. In setup.py in particular, we exec this file, so it cannot import nipy
"""
# nipy version information. An empty _version_extra corresponds to a
# full release. '.dev' as a _version_extra string means this is a development
# version
_version_major = 0
_version_minor = 9
_version_micro = 0
_version_extra = '.pre'
def get_nipype_gitversion():
"""Nipype version as reported by the last commit in git
Returns
-------
None or str
Version of NiPype according to git.
"""
import os
import subprocess
try:
import nipype
gitpath = os.path.realpath(os.path.join(os.path.dirname(nipype.__file__),
os.path.pardir))
except:
gitpath = os.getcwd()
gitpathgit = os.path.join(gitpath, '.git')
if not os.path.exists(gitpathgit):
return None
ver = None
try:
o, _ = subprocess.Popen('git describe', shell=True, cwd=gitpath,
stdout=subprocess.PIPE).communicate()
except Exception:
pass
else:
ver = o.strip().split('-')[-1]
return ver
if '.dev' in _version_extra:
gitversion = get_nipype_gitversion()
if gitversion:
_version_extra = '.' + gitversion + '-' + 'dev'
# Format expected by setup.py and doc/source/conf.py: string of form "X.Y.Z"
__version__ = "%s.%s.%s%s" % (_version_major,
_version_minor,
_version_micro,
_version_extra)
CLASSIFIERS = ["Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Scientific/Engineering"]
description = 'Neuroimaging in Python: Pipelines and Interfaces'
# Note: this long_description is actually a copy/paste from the top-level
# README.txt, so that it shows up nicely on PyPI. So please remember to edit
# it only in one place and sync it correctly.
long_description = \
"""
========================================================
NIPYPE: Neuroimaging in Python: Pipelines and Interfaces
========================================================
Current neuroimaging software offer users an incredible opportunity to
analyze data using a variety of different algorithms. However, this has
resulted in a heterogeneous collection of specialized applications
without transparent interoperability or a uniform operating interface.
*Nipype*, an open-source, community-developed initiative under the
umbrella of NiPy, is a Python project that provides a uniform interface
to existing neuroimaging software and facilitates interaction between
these packages within a single workflow. Nipype provides an environment
that encourages interactive exploration of algorithms from different
packages (e.g., SPM, FSL, FreeSurfer, AFNI, Slicer), eases the
design of workflows within and between packages, and reduces the
learning curve necessary to use different packages. Nipype is creating a
collaborative platform for neuroimaging software development in a
high-level language and addressing limitations of existing pipeline
systems.
*Nipype* allows you to:
* easily interact with tools from different software packages
* combine processing steps from different software packages
* develop new workflows faster by reusing common steps from old ones
* process data faster by running it in parallel on many cores/machines
* make your research easily reproducible
* share your processing workflows with the community
"""
# versions
NIBABEL_MIN_VERSION = '1.0'
NETWORKX_MIN_VERSION = '1.0'
NUMPY_MIN_VERSION = '1.3'
SCIPY_MIN_VERSION = '0.7'
TRAITS_MIN_VERSION = '4.0'
NAME = 'nipype'
MAINTAINER = "nipype developers"
MAINTAINER_EMAIL = "nipy-devel@neuroimaging.scipy.org"
DESCRIPTION = description
LONG_DESCRIPTION = long_description
URL = "http://nipy.org/nipype"
DOWNLOAD_URL = "http://github.com/nipy/nipype/archives/master"
LICENSE = "BSD license"
CLASSIFIERS = CLASSIFIERS
AUTHOR = "nipype developmers"
AUTHOR_EMAIL = "nipy-devel@neuroimaging.scipy.org"
PLATFORMS = "OS Independent"
MAJOR = _version_major
MINOR = _version_minor
MICRO = _version_micro
ISRELEASE = _version_extra == ''
VERSION = __version__
REQUIRES = ["nibabel (>=1.0)", "networkx (>=1.0)", "numpy (>=1.3)",
"scipy (>=0.7)", "traits (>=4.0)"]
STATUS = 'stable'
| FredLoney/nipype | nipype/info.py | Python | bsd-3-clause | 4,915 |
from __future__ import absolute_import
from mock import patch
from datadog.util.hostname import get_hostname
from sentry.metrics.datadog import DatadogMetricsBackend
from sentry.testutils import TestCase
class DatadogMetricsBackendTest(TestCase):
def setUp(self):
self.backend = DatadogMetricsBackend(prefix='sentrytest.')
@patch('datadog.threadstats.base.ThreadStats.increment')
def test_incr(self, mock_incr):
self.backend.incr('foo', instance='bar')
mock_incr.assert_called_once_with(
'sentrytest.foo', 1,
tags=['instance:bar'],
host=get_hostname(),
)
@patch('datadog.threadstats.base.ThreadStats.timing')
def test_timing(self, mock_timing):
self.backend.timing('foo', 30, instance='bar')
mock_timing.assert_called_once_with(
'sentrytest.foo', 30,
sample_rate=1,
tags=['instance:bar'],
host=get_hostname(),
)
| Kryz/sentry | tests/sentry/metrics/test_datadog.py | Python | bsd-3-clause | 979 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
from shimehari.testsuite import ShimehariTestCase
from shimehari.configuration import ConfigManager, Config
class TestConfigManager(ShimehariTestCase):
def testHasNotConfig(self):
ConfigManager.configrations = {}
rv = ConfigManager.hasConfig()
self.assertEqual(rv, False)
def testAddConfig(self):
config = Config()
self.assertNotEqual(ConfigManager.addConfig(config), TypeError)
def testAddConfigRaiseTypeError(self):
dummy = u'|/゚U゚|丿'
self.assertRaises(TypeError, ConfigManager.addConfig, dummy)
def testAddConfigs(self):
configA = Config()
configB = Config('production')
configC = Config('test')
self.assertNotEqual(ConfigManager.addConfigs([configA, configB, configC]), TypeError)
def testHasConfig(self):
config = Config()
ConfigManager.addConfig(config)
rv = ConfigManager.hasConfig()
self.assertEqual(rv, True)
def testGetConfigs(self):
configA = Config()
configB = Config('production')
ConfigManager.addConfigs([configA, configB])
self.assertNotEqual(ConfigManager.getConfigs(), {})
def testGetConfig(self):
config = Config()
ConfigManager.configrations = {}
ConfigManager.addConfig(config)
self.assertEqual(ConfigManager.getConfig('development'), config)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestConfigManager))
return suite
| glassesfactory/Shimehari | shimehari/testsuite/test_configuration.py | Python | bsd-3-clause | 1,575 |
from math import sqrt, cos, sin, fabs
from opencmiss.zincwidgets.sceneviewerwidget import SceneviewerWidget
import time
time_0 = time.time()
initial_view = [[49.79080069116709, 588.9318153465964, -363.0583058231066],
[2.285999298095703, -71.78712940216064, -44.651397705078125],
[0.022226151722452247, -0.43531795844346366, -0.9000023740170076]]
class PelvisViewerWidget(SceneviewerWidget):
def __init__(self, parent):
super(PelvisViewerWidget, self).__init__(parent)
self._initial_eye = None
self._initial_up = None
self._up_prev = None
self.graphicsInitialized.connect(self.setInitialView)
def resetView(self, gender):
sv = self.getSceneviewer()
# print(sv.getLookatParameters())
sv.setLookatParametersNonSkew(initial_view[0], initial_view[1], initial_view[2])
# if self._visibleGender == self._ui.radioButtonMale.text():
# self._ui.widgetScene.setLookatParameters(male_initial_view)
# else:
# self._ui.widgetScene.setLookatParameters(female_initial_view)
def setInitialView(self):
sv = self.getSceneviewer()
self._up_prev = initial_view[2][:]
sv.setLookatParametersNonSkew(initial_view[0], initial_view[1], initial_view[2])
def resetInitial(self):
sv = self.getSceneviewer()
print(sv.getLookatParameters())
_, self._initial_eye = sv.getEyePosition()
# _, la = sv.getLookatPosition()
_, self._initial_up = sv.getUpVector()
def updateFromIMU(self, axis, angle):
# print('axis = ', axis)
# print('angle = ', angle)
# print('time = {0}: angle = {1}, axis = {2}'.format(time.time() - time_0, angle, axis))
sv = self.getSceneviewer()
sv.beginChange()
# _, eye = sv.getEyePosition()
_, la = sv.getLookatPosition()
# _, up = sv.getUpVector()
eye = self._initial_eye[:]
up = self._initial_up[:]
# double a[3] = { axis[0], axis[1], axis[2] };
a = axis[:]
# /* get coordinate system moving with rotation, consisting of the axis a */
# /* and two othogonal vectors b and c in the plane normal to a. */
# /* v = vector towards viewer */
# v[0]=rel_eyex=scene_viewer->eyex-scene_viewer->lookatx;
# v[1]=rel_eyey=scene_viewer->eyey-scene_viewer->lookaty;
# v[2]=rel_eyez=scene_viewer->eyez-scene_viewer->lookatz;
v = [1.0, 0.0, 0.0]
v[0] = eye[0] - la[0]
v[1] = eye[1] - la[1]
v[2] = eye[2] - la[2]
view_vec = v[:]
# /* check v is not too closely in line with a */
# if (0.8 < fabs(v[0]*a[0]+v[1]*a[1]+v[2]*a[2]))
# {
# /* use up-vector instead */
# v[0]=scene_viewer->upx;
# v[1]=scene_viewer->upy;
# v[2]=scene_viewer->upz;
# }
if 0.8 < fabs(v[0] * a[0] + v[1] * a[1] + v[2] * a[2]):
v = up[:]
# normalize3(v);
v_mag = sqrt(v[0] * v[0] + v[1] * v[1] + v[2] * v[2])
v[0] /= v_mag
v[1] /= v_mag
v[2] /= v_mag
# /* b = axis (x) a, a vector in plane of rotation */
# b[0]=a[1]*v[2]-a[2]*v[1];
# b[1]=a[2]*v[0]-a[0]*v[2];
# b[2]=a[0]*v[1]-a[1]*v[0];
b = [1.0, 0.0, 0.0]
b[0] = a[1] * v[2] - a[2] * v[1]
b[1] = a[2] * v[0] - a[0] * v[2]
b[2] = a[0] * v[1] - a[1] * v[0]
# normalize3(b);
b_mag = sqrt(b[0] * b[0] + b[1] * b[1] + b[2] * b[2])
b[0] /= b_mag
b[1] /= b_mag
b[2] /= b_mag
# /* c = b (x) axis, another unit vector in plane of rotation */
# c[0]=a[1]*b[2]-a[2]*b[1];
# c[1]=a[2]*b[0]-a[0]*b[2];
# c[2]=a[0]*b[1]-a[1]*b[0];
c = [1.0, 0.0, 0.0]
c[0] = a[1] * b[2] - a[2] * b[1]
c[1] = a[2] * b[0] - a[0] * b[2]
c[2] = a[0] * b[1] - a[1] * b[0]
# /* define eye position and up vector relative to a, b and c */
# rel_eyea=a[0]*rel_eyex+a[1]*rel_eyey+a[2]*rel_eyez;
# rel_eyeb=b[0]*rel_eyex+b[1]*rel_eyey+b[2]*rel_eyez;
# rel_eyec=c[0]*rel_eyex+c[1]*rel_eyey+c[2]*rel_eyez;
view_vec_new = [1.0, 0.0, 0.0]
view_vec_new[0] = a[0] * view_vec[0] + a[1] * view_vec[1] + a[2] * view_vec[2]
view_vec_new[1] = b[0] * view_vec[0] + b[1] * view_vec[1] + b[2] * view_vec[2]
view_vec_new[2] = c[0] * view_vec[0] + c[1] * view_vec[1] + c[2] * view_vec[2]
# upa=a[0]*scene_viewer->upx+a[1]*scene_viewer->upy+a[2]*scene_viewer->upz;
# upb=b[0]*scene_viewer->upx+b[1]*scene_viewer->upy+b[2]*scene_viewer->upz;
# upc=c[0]*scene_viewer->upx+c[1]*scene_viewer->upy+c[2]*scene_viewer->upz;
# /* get new b and c from clockwise rotation by <angle> radians about a */
up_prime = [1.0, 0.0, 0.0]
up_prime[0] = a[0] * up[0] + a[1] * up[1] + a[2] * up[2]
up_prime[1] = b[0] * up[0] + b[1] * up[1] + b[2] * up[2]
up_prime[2] = c[0] * up[0] + c[1] * up[1] + c[2] * up[2]
# cos_angle=cos(angle);
# sin_angle=sin(angle);
cos_angle = cos(angle)
sin_angle = sin(angle)
# new_b[0]=cos_angle*b[0]+sin_angle*c[0];
# new_b[1]=cos_angle*b[1]+sin_angle*c[1];
# new_b[2]=cos_angle*b[2]+sin_angle*c[2];
b_new = [1.0, 0.0, 0.0]
b_new[0] = cos_angle * b[0] + sin_angle * c[0]
b_new[1] = cos_angle * b[1] + sin_angle * c[1]
b_new[2] = cos_angle * b[2] + sin_angle * c[2]
# new_c[0]=cos_angle*c[0]-sin_angle*b[0];
# new_c[1]=cos_angle*c[1]-sin_angle*b[1];
# new_c[2]=cos_angle*c[2]-sin_angle*b[2];
c_new = [1.0, 0.0, 0.0]
c_new[0] = cos_angle * c[0] - sin_angle * b[0]
c_new[1] = cos_angle * c[1] - sin_angle * b[1]
c_new[2] = cos_angle * c[2] - sin_angle * b[2]
# /* get eye position and up vector back in world coordinates */
# scene_viewer->eyex=scene_viewer->lookatx+
# a[0]*rel_eyea+new_b[0]*rel_eyeb+new_c[0]*rel_eyec;
# scene_viewer->eyey=scene_viewer->lookaty+
# a[1]*rel_eyea+new_b[1]*rel_eyeb+new_c[1]*rel_eyec;
# scene_viewer->eyez=scene_viewer->lookatz+
# a[2]*rel_eyea+new_b[2]*rel_eyeb+new_c[2]*rel_eyec;
eye_new = [1.0, 0.0, 0.0]
eye_new[0] = la[0] + a[0] * view_vec_new[0] + b_new[0] * view_vec_new[1] + c_new[0] * view_vec_new[2]
eye_new[1] = la[1] + a[1] * view_vec_new[0] + b_new[1] * view_vec_new[1] + c_new[1] * view_vec_new[2]
eye_new[2] = la[2] + a[2] * view_vec_new[0] + b_new[2] * view_vec_new[1] + c_new[2] * view_vec_new[2]
# scene_viewer->upx=a[0]*upa+new_b[0]*upb+new_c[0]*upc;
# scene_viewer->upy=a[1]*upa+new_b[1]*upb+new_c[1]*upc;
# scene_viewer->upz=a[2]*upa+new_b[2]*upb+new_c[2]*upc;
up_new = [1.0, 0.0, 0.0]
up_new[0] = a[0] * up_prime[0] + b_new[0] * up_prime[1] + c_new[0] * up_prime[2]
up_new[1] = a[1] * up_prime[0] + b_new[1] * up_prime[1] + c_new[1] * up_prime[2]
up_new[2] = a[2] * up_prime[0] + b_new[2] * up_prime[1] + c_new[2] * up_prime[2]
sv.setEyePosition(eye_new)
sv.setUpVector(up_new)
sv.endChange()
| ABI-Software/MedTechCoRE-Pelvis | src/medtechcore/pelvisdemo/widgets/pelvisviewerwidget.py | Python | apache-2.0 | 7,286 |
import urllib
from urllib.parse import urlparse
def build_url(base_url, path, args_dict=None):
# Returns a list in the structure of urlparse.ParseResult
url_parts = list(urlparse(base_url))
url_parts[2] = path
if args_dict is not None:
url_parts[4] = urllib.parse.urlencode(args_dict)
return urllib.parse.urlunparse(url_parts)
| sensidev/drf-requests-jwt | drf_requests_jwt/backends/utils.py | Python | mit | 357 |
# (c) 2013, Ovais Tariq <me@ovaistariq.net>
#
# This file is part of mha_helper
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# -*- coding: utf-8 -*-
__name__ = 'mha_helper'
__author__ = 'Ovais Tariq'
__email__ = 'me@ovaistariq.net'
__version__ = '0.4.2'
__url__ = 'https://github.com/ovaistariq/mha-helper'
| ovaistariq/mha-helper | mha_helper/__init__.py | Python | gpl-3.0 | 900 |
# -*- coding: utf-8 -*-
# Copyright 2015 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for the deploy module."""
from __future__ import print_function
import json
import multiprocessing
import os
import sys
from chromite.cli import command
from chromite.cli import deploy
from chromite.lib import cros_build_lib
from chromite.lib import cros_test_lib
from chromite.lib import portage_util
from chromite.lib import remote_access
pytestmark = [cros_test_lib.pytestmark_inside_only]
assert sys.version_info >= (3, 6), 'This module requires Python 3.6+'
if cros_build_lib.IsInsideChroot():
import portage # pylint: disable=import-error
# pylint: disable=protected-access
class ChromiumOSDeviceFake(object):
"""Fake for device."""
def __init__(self):
self.board = 'board'
self.hostname = None
self.username = None
self.port = None
self.lsb_release = None
self.cmds = []
self.work_dir = '/testdir/'
self.selinux_available = False
def MountRootfsReadWrite(self):
return True
def IsSELinuxAvailable(self):
return self.selinux_available
def IsSELinuxEnforced(self):
return True
def run(self, cmd, **_kwargs):
self.cmds.append(cmd)
def CopyToDevice(self, _src, _dest, _mode='rsync', **_kwargs):
return True
class ChromiumOSDeviceHandlerFake(object):
"""Fake for chromite.lib.remote_access.ChomiumOSDeviceHandler."""
class RemoteAccessFake(object):
"""Fake for chromite.lib.remote_access.RemoteAccess."""
def __init__(self):
self.remote_sh_output = None
def RemoteSh(self, *_args, **_kwargs):
return cros_build_lib.CommandResult(output=self.remote_sh_output)
def __init__(self, *_args, **_kwargs):
self._agent = self.RemoteAccessFake()
self.device = ChromiumOSDeviceFake()
# TODO(dpursell): Mock remote access object in cros_test_lib (brbug.com/986).
def GetAgent(self):
return self._agent
def __exit__(self, _type, _value, _traceback):
pass
def __enter__(self):
return self.device
class BrilloDeployOperationFake(deploy.BrilloDeployOperation):
"""Fake for deploy.BrilloDeployOperation."""
def __init__(self, pkg_count, emerge, queue):
super(BrilloDeployOperationFake, self).__init__(pkg_count, emerge)
self._queue = queue
def ParseOutput(self, output=None):
super(BrilloDeployOperationFake, self).ParseOutput(output)
self._queue.put('advance')
class DbApiFake(object):
"""Fake for Portage dbapi."""
def __init__(self, pkgs):
self.pkg_db = {}
for cpv, slot, rdeps_raw, build_time in pkgs:
self.pkg_db[cpv] = {
'SLOT': slot, 'RDEPEND': rdeps_raw, 'BUILD_TIME': build_time}
def cpv_all(self):
return list(self.pkg_db)
def aux_get(self, cpv, keys):
pkg_info = self.pkg_db[cpv]
return [pkg_info[key] for key in keys]
class PackageScannerFake(object):
"""Fake for PackageScanner."""
def __init__(self, packages, pkgs_attrs, packages_cpvs=None):
self.pkgs = packages
self.cpvs = packages_cpvs or packages
self.listed = []
self.num_updates = 0
self.pkgs_attrs = pkgs_attrs
def Run(self, _device, _root, _packages, _update, _deep, _deep_rev):
return self.cpvs, self.listed, self.num_updates, self.pkgs_attrs
class PortageTreeFake(object):
"""Fake for Portage tree."""
def __init__(self, dbapi):
self.dbapi = dbapi
class TestInstallPackageScanner(cros_test_lib.MockOutputTestCase):
"""Test the update package scanner."""
_BOARD = 'foo_board'
_BUILD_ROOT = '/build/%s' % _BOARD
_VARTREE = [
('foo/app1-1.2.3-r4', '0', 'foo/app2 !foo/app3', '1413309336'),
('foo/app2-4.5.6-r7', '0', '', '1413309336'),
('foo/app4-2.0.0-r1', '0', 'foo/app1 foo/app5', '1413309336'),
('foo/app5-3.0.7-r3', '0', '', '1413309336'),
]
def setUp(self):
"""Patch imported modules."""
self.PatchObject(cros_build_lib, 'GetChoice', return_value=0)
self.device = ChromiumOSDeviceHandlerFake()
self.scanner = deploy._InstallPackageScanner(self._BUILD_ROOT)
self.PatchObject(deploy, '_GetDLCInfo', return_value=(None, None))
def SetupVartree(self, vartree_pkgs):
self.device.GetAgent().remote_sh_output = json.dumps(vartree_pkgs)
def SetupBintree(self, bintree_pkgs):
bintree = PortageTreeFake(DbApiFake(bintree_pkgs))
build_root = os.path.join(self._BUILD_ROOT, '')
portage_db = {build_root: {'bintree': bintree}}
self.PatchObject(portage, 'create_trees', return_value=portage_db)
def ValidatePkgs(self, actual, expected, constraints=None):
# Containing exactly the same packages.
self.assertEqual(sorted(expected), sorted(actual))
# Packages appear in the right order.
if constraints is not None:
for needs, needed in constraints:
self.assertGreater(actual.index(needs), actual.index(needed))
def testRunUpdatedVersion(self):
self.SetupVartree(self._VARTREE)
app1 = 'foo/app1-1.2.5-r4'
self.SetupBintree([
(app1, '0', 'foo/app2 !foo/app3', '1413309336'),
('foo/app2-4.5.6-r7', '0', '', '1413309336'),
])
installs, listed, num_updates, _ = self.scanner.Run(
self.device, '/', ['app1'], True, True, True)
self.ValidatePkgs(installs, [app1])
self.ValidatePkgs(listed, [app1])
self.assertEqual(num_updates, 1)
def testRunUpdatedBuildTime(self):
self.SetupVartree(self._VARTREE)
app1 = 'foo/app1-1.2.3-r4'
self.SetupBintree([
(app1, '0', 'foo/app2 !foo/app3', '1413309350'),
('foo/app2-4.5.6-r7', '0', '', '1413309336'),
])
installs, listed, num_updates, _ = self.scanner.Run(
self.device, '/', ['app1'], True, True, True)
self.ValidatePkgs(installs, [app1])
self.ValidatePkgs(listed, [app1])
self.assertEqual(num_updates, 1)
def testRunExistingDepUpdated(self):
self.SetupVartree(self._VARTREE)
app1 = 'foo/app1-1.2.5-r2'
app2 = 'foo/app2-4.5.8-r3'
self.SetupBintree([
(app1, '0', 'foo/app2 !foo/app3', '1413309350'),
(app2, '0', '', '1413309350'),
])
installs, listed, num_updates, _ = self.scanner.Run(
self.device, '/', ['app1'], True, True, True)
self.ValidatePkgs(installs, [app1, app2], constraints=[(app1, app2)])
self.ValidatePkgs(listed, [app1])
self.assertEqual(num_updates, 2)
def testRunMissingDepUpdated(self):
self.SetupVartree(self._VARTREE)
app1 = 'foo/app1-1.2.5-r2'
app6 = 'foo/app6-1.0.0-r1'
self.SetupBintree([
(app1, '0', 'foo/app2 !foo/app3 foo/app6', '1413309350'),
('foo/app2-4.5.6-r7', '0', '', '1413309336'),
(app6, '0', '', '1413309350'),
])
installs, listed, num_updates, _ = self.scanner.Run(
self.device, '/', ['app1'], True, True, True)
self.ValidatePkgs(installs, [app1, app6], constraints=[(app1, app6)])
self.ValidatePkgs(listed, [app1])
self.assertEqual(num_updates, 1)
def testRunExistingRevDepUpdated(self):
self.SetupVartree(self._VARTREE)
app1 = 'foo/app1-1.2.5-r2'
app4 = 'foo/app4-2.0.1-r3'
self.SetupBintree([
(app1, '0', 'foo/app2 !foo/app3', '1413309350'),
(app4, '0', 'foo/app1 foo/app5', '1413309350'),
('foo/app5-3.0.7-r3', '0', '', '1413309336'),
])
installs, listed, num_updates, _ = self.scanner.Run(
self.device, '/', ['app1'], True, True, True)
self.ValidatePkgs(installs, [app1, app4], constraints=[(app4, app1)])
self.ValidatePkgs(listed, [app1])
self.assertEqual(num_updates, 2)
def testRunMissingRevDepNotUpdated(self):
self.SetupVartree(self._VARTREE)
app1 = 'foo/app1-1.2.5-r2'
app6 = 'foo/app6-1.0.0-r1'
self.SetupBintree([
(app1, '0', 'foo/app2 !foo/app3', '1413309350'),
(app6, '0', 'foo/app1', '1413309350'),
])
installs, listed, num_updates, _ = self.scanner.Run(
self.device, '/', ['app1'], True, True, True)
self.ValidatePkgs(installs, [app1])
self.ValidatePkgs(listed, [app1])
self.assertEqual(num_updates, 1)
def testRunTransitiveDepsUpdated(self):
self.SetupVartree(self._VARTREE)
app1 = 'foo/app1-1.2.5-r2'
app2 = 'foo/app2-4.5.8-r3'
app4 = 'foo/app4-2.0.0-r1'
app5 = 'foo/app5-3.0.8-r2'
self.SetupBintree([
(app1, '0', 'foo/app2 !foo/app3', '1413309350'),
(app2, '0', '', '1413309350'),
(app4, '0', 'foo/app1 foo/app5', '1413309350'),
(app5, '0', '', '1413309350'),
])
installs, listed, num_updates, _ = self.scanner.Run(
self.device, '/', ['app1'], True, True, True)
self.ValidatePkgs(installs, [app1, app2, app4, app5],
constraints=[(app1, app2), (app4, app1), (app4, app5)])
self.ValidatePkgs(listed, [app1])
self.assertEqual(num_updates, 4)
def testRunDisjunctiveDepsExistingUpdated(self):
self.SetupVartree(self._VARTREE)
app1 = 'foo/app1-1.2.5-r2'
self.SetupBintree([
(app1, '0', '|| ( foo/app6 foo/app2 ) !foo/app3', '1413309350'),
('foo/app2-4.5.6-r7', '0', '', '1413309336'),
])
installs, listed, num_updates, _ = self.scanner.Run(
self.device, '/', ['app1'], True, True, True)
self.ValidatePkgs(installs, [app1])
self.ValidatePkgs(listed, [app1])
self.assertEqual(num_updates, 1)
def testRunDisjunctiveDepsDefaultUpdated(self):
self.SetupVartree(self._VARTREE)
app1 = 'foo/app1-1.2.5-r2'
app7 = 'foo/app7-1.0.0-r1'
self.SetupBintree([
(app1, '0', '|| ( foo/app6 foo/app7 ) !foo/app3', '1413309350'),
(app7, '0', '', '1413309350'),
])
installs, listed, num_updates, _ = self.scanner.Run(
self.device, '/', ['app1'], True, True, True)
self.ValidatePkgs(installs, [app1, app7], constraints=[(app1, app7)])
self.ValidatePkgs(listed, [app1])
self.assertEqual(num_updates, 1)
class TestDeploy(cros_test_lib.ProgressBarTestCase):
"""Test deploy.Deploy."""
@staticmethod
def FakeGetPackagesByCPV(cpvs, _strip, _sysroot):
return ['/path/to/%s.tbz2' % cpv.pv for cpv in cpvs]
def setUp(self):
self.device = ChromiumOSDeviceHandlerFake()
self.PatchObject(
remote_access, 'ChromiumOSDeviceHandler', return_value=self.device)
self.PatchObject(cros_build_lib, 'GetBoard', return_value=None)
self.PatchObject(cros_build_lib, 'GetSysroot', return_value='sysroot')
self.package_scanner = self.PatchObject(deploy, '_InstallPackageScanner')
self.get_packages_paths = self.PatchObject(
deploy, '_GetPackagesByCPV', side_effect=self.FakeGetPackagesByCPV)
self.emerge = self.PatchObject(deploy, '_Emerge', return_value=None)
self.unmerge = self.PatchObject(deploy, '_Unmerge', return_value=None)
self.PatchObject(deploy, '_GetDLCInfo', return_value=(None, None))
def testDeployEmerge(self):
"""Test that deploy._Emerge is called for each package."""
_BINPKG = '/path/to/bar-1.2.5.tbz2'
def FakeIsFile(fname):
return fname == _BINPKG
packages = ['some/foo-1.2.3', _BINPKG, 'some/foobar-2.0']
cpvs = ['some/foo-1.2.3', 'to/bar-1.2.5', 'some/foobar-2.0']
self.package_scanner.return_value = PackageScannerFake(
packages,
{'some/foo-1.2.3': {}, _BINPKG: {}, 'some/foobar-2.0': {}},
cpvs)
self.PatchObject(os.path, 'isfile', side_effect=FakeIsFile)
deploy.Deploy(None, ['package'], force=True, clean_binpkg=False)
# Check that package names were correctly resolved into binary packages.
self.get_packages_paths.assert_called_once_with(
[portage_util.SplitCPV(p) for p in cpvs], True, 'sysroot')
# Check that deploy._Emerge is called the right number of times.
self.assertEqual(self.emerge.call_count, len(packages))
self.assertEqual(self.unmerge.call_count, 0)
def testDeployEmergeDLC(self):
"""Test that deploy._Emerge installs images for DLC packages."""
packages = ['some/foodlc-1.0', 'some/bardlc-2.0']
cpvs = ['some/foodlc-1.0', 'some/bardlc-2.0']
self.package_scanner.return_value = PackageScannerFake(
packages, {'some/foodlc-1.0': {}, 'some/bardlc-2.0': {}}, cpvs)
self.PatchObject(deploy, '_GetDLCInfo',
return_value=('foo_id', 'foo_package'))
deploy.Deploy(None, ['package'], force=True, clean_binpkg=False)
# Check that dlcservice is restarted (DLC modules are deployed).
self.assertTrue(['restart', 'dlcservice'] in self.device.device.cmds)
def testDeployEmergeSELinux(self):
"""Test deploy progress when the device has SELinux"""
_BINPKG = '/path/to/bar-1.2.5.tbz2'
def FakeIsFile(fname):
return fname == _BINPKG
def GetRestoreconCommand(pkgfile):
remote_path = os.path.join('/testdir/packages/to/', pkgfile)
return [['setenforce', '0'],
['cd', '/', '&&',
'tar', 'tf', remote_path, '|',
'restorecon', '-i', '-f', '-'],
['setenforce', '1']]
self.device.device.selinux_available = True
packages = ['some/foo-1.2.3', _BINPKG, 'some/foobar-2.0']
cpvs = ['some/foo-1.2.3', 'to/bar-1.2.5', 'some/foobar-2.0']
self.package_scanner.return_value = PackageScannerFake(
packages,
{'some/foo-1.2.3': {}, _BINPKG: {}, 'some/foobar-2.0': {}},
cpvs)
self.PatchObject(os.path, 'isfile', side_effect=FakeIsFile)
deploy.Deploy(None, ['package'], force=True, clean_binpkg=False)
# Check that package names were correctly resolved into binary packages.
self.get_packages_paths.assert_called_once_with(
[portage_util.SplitCPV(p) for p in cpvs], True, 'sysroot')
# Check that deploy._Emerge is called the right number of times.
self.assertEqual(self.emerge.call_count, len(packages))
self.assertEqual(self.unmerge.call_count, 0)
self.assertEqual(self.device.device.cmds,
GetRestoreconCommand('foo-1.2.3.tbz2') +
GetRestoreconCommand('bar-1.2.5.tbz2') +
GetRestoreconCommand('foobar-2.0.tbz2'))
def testDeployUnmerge(self):
"""Test that deploy._Unmerge is called for each package."""
packages = ['foo', 'bar', 'foobar', 'foodlc']
self.package_scanner.return_value = PackageScannerFake(
packages, {'foo': {}, 'bar': {}, 'foobar': {},
'foodlc': {deploy._DLC_ID: 'foodlc',
deploy._DLC_PACKAGE: 'foopackage'}})
deploy.Deploy(None, ['package'], force=True, clean_binpkg=False,
emerge=False)
# Check that deploy._Unmerge is called the right number of times.
self.assertEqual(self.emerge.call_count, 0)
self.assertEqual(self.unmerge.call_count, len(packages))
self.assertEqual(
self.device.device.cmds,
[['dlcservice_util', '--uninstall', '--dlc_ids=foodlc'],
['restart', 'dlcservice']])
def testDeployMergeWithProgressBar(self):
"""Test that BrilloDeployOperation.Run() is called for merge."""
packages = ['foo', 'bar', 'foobar']
self.package_scanner.return_value = PackageScannerFake(
packages, {'foo': {}, 'bar': {}, 'foobar': {}})
run = self.PatchObject(deploy.BrilloDeployOperation, 'Run',
return_value=None)
self.PatchObject(command, 'UseProgressBar', return_value=True)
deploy.Deploy(None, ['package'], force=True, clean_binpkg=False)
# Check that BrilloDeployOperation.Run was called.
self.assertTrue(run.called)
def testDeployUnmergeWithProgressBar(self):
"""Test that BrilloDeployOperation.Run() is called for unmerge."""
packages = ['foo', 'bar', 'foobar']
self.package_scanner.return_value = PackageScannerFake(
packages, {'foo': {}, 'bar': {}, 'foobar': {}})
run = self.PatchObject(deploy.BrilloDeployOperation, 'Run',
return_value=None)
self.PatchObject(command, 'UseProgressBar', return_value=True)
deploy.Deploy(None, ['package'], force=True, clean_binpkg=False,
emerge=False)
# Check that BrilloDeployOperation.Run was called.
self.assertTrue(run.called)
def testBrilloDeployMergeOperation(self):
"""Test that BrilloDeployOperation works for merge."""
def func(queue):
for event in op.MERGE_EVENTS:
queue.get()
print(event)
sys.stdout.flush()
queue = multiprocessing.Queue()
# Emerge one package.
op = BrilloDeployOperationFake(1, True, queue)
with self.OutputCapturer():
op.Run(func, queue)
# Check that the progress bar prints correctly.
self.AssertProgressBarAllEvents(len(op.MERGE_EVENTS))
def testBrilloDeployUnmergeOperation(self):
"""Test that BrilloDeployOperation works for unmerge."""
def func(queue):
for event in op.UNMERGE_EVENTS:
queue.get()
print(event)
sys.stdout.flush()
queue = multiprocessing.Queue()
# Unmerge one package.
op = BrilloDeployOperationFake(1, False, queue)
with self.OutputCapturer():
op.Run(func, queue)
# Check that the progress bar prints correctly.
self.AssertProgressBarAllEvents(len(op.UNMERGE_EVENTS))
| endlessm/chromium-browser | third_party/chromite/cli/deploy_unittest.py | Python | bsd-3-clause | 17,192 |
from django.core.exceptions import ImproperlyConfigured
from django.shortcuts import get_object_or_404
from django.utils.encoding import force_text
from . import utils
class TenantViewMixin(object):
"""Mixin for generic class-based views to handle tenant-enabled objects.
Use with:
* ListView
* DetailView
* CreateView
* UpdateView
* DeleteView
"""
success_url_name = None
def dispatch(self, request, *args, **kwargs):
"""Attach the tenant and group to the class."""
if utils.multitenancy_enabled():
from multitenancy.auth import get_user_groups, get_user_tenants
available_groups = get_user_groups(request.user)
self.group = get_object_or_404(available_groups, slug=kwargs['group_slug'])
available_tenants = get_user_tenants(request.user, self.group)
self.tenant = get_object_or_404(available_tenants, slug=kwargs['tenant_slug'])
else:
self.group = None
self.tenant = None
return super(TenantViewMixin, self).dispatch(request, *args, **kwargs)
def get_cancellation_url(self, *args, **kwargs):
"""Which URL to go to if the user cancels their action."""
if self.cancellation_url_name:
return utils.tenancy_reverse(
self.request, self.cancellation_url_name, *args, **kwargs)
raise ImproperlyConfigured("No cancellation URL known. Provide a "
"cancellation_url_name.")
def get_context_data(self, **kwargs):
"""Add the tenant and group to the template context."""
kwargs['group'] = self.group
kwargs['tenant'] = self.tenant
return super(TenantViewMixin, self).get_context_data(**kwargs)
def get_form_kwargs(self):
kwargs = super(TenantViewMixin, self).get_form_kwargs()
kwargs.setdefault('tenant', self.tenant)
return kwargs
def get_queryset(self):
"""Limit queryset based on tenant if multitenancy is enabled."""
qs = super(TenantViewMixin, self).get_queryset()
if utils.multitenancy_enabled():
if not hasattr(self.model, 'tenantlink'):
raise ImproperlyConfigured("TenantViewMixin can only be used "
"with tenant-enabled models.")
qs = qs.filter(tenantlink__tenant=self.tenant)
return qs
def get_success_url(self, *args, **kwargs):
"""Add group and tenant slugs to URL resolution if multitenancy is enabled."""
if self.success_url:
return force_text(self.success_url)
if self.success_url_name:
return utils.tenancy_reverse(self.request, self.success_url_name, *args, **kwargs)
raise ImproperlyConfigured("No URL to redirect to. Provide a "
"success_url or success_url_name.")
| caktus/rapidsms-decisiontree-app | decisiontree/multitenancy/views.py | Python | bsd-3-clause | 2,928 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ipa_role
author: Thomas Krahn (@Nosmoht)
short_description: Manage FreeIPA role
description:
- Add, modify and delete a role within FreeIPA server using FreeIPA API
options:
cn:
description:
- Role name.
- Can not be changed as it is the unique identifier.
required: true
aliases: ['name']
description:
description:
- A description of this role-group.
group:
description:
- List of group names assign to this role.
- If an empty list is passed all assigned groups will be unassigned from the role.
- If option is omitted groups will not be checked or changed.
- If option is passed all assigned groups that are not passed will be unassigned from the role.
host:
description:
- List of host names to assign.
- If an empty list is passed all assigned hosts will be unassigned from the role.
- If option is omitted hosts will not be checked or changed.
- If option is passed all assigned hosts that are not passed will be unassigned from the role.
hostgroup:
description:
- List of host group names to assign.
- If an empty list is passed all assigned host groups will be removed from the role.
- If option is omitted host groups will not be checked or changed.
- If option is passed all assigned hostgroups that are not passed will be unassigned from the role.
privilege:
description:
- List of privileges granted to the role.
- If an empty list is passed all assigned privileges will be removed.
- If option is omitted privileges will not be checked or changed.
- If option is passed all assigned privileges that are not passed will be removed.
version_added: "2.4"
service:
description:
- List of service names to assign.
- If an empty list is passed all assigned services will be removed from the role.
- If option is omitted services will not be checked or changed.
- If option is passed all assigned services that are not passed will be removed from the role.
state:
description: State to ensure
default: "present"
choices: ["present", "absent"]
user:
description:
- List of user names to assign.
- If an empty list is passed all assigned users will be removed from the role.
- If option is omitted users will not be checked or changed.
extends_documentation_fragment: ipa.documentation
version_added: "2.3"
'''
EXAMPLES = '''
# Ensure role is present
- ipa_role:
name: dba
description: Database Administrators
state: present
user:
- pinky
- brain
ipa_host: ipa.example.com
ipa_user: admin
ipa_pass: topsecret
# Ensure role with certain details
- ipa_role:
name: another-role
description: Just another role
group:
- editors
host:
- host01.example.com
hostgroup:
- hostgroup01
privilege:
- Group Administrators
- User Administrators
service:
- service01
# Ensure role is absent
- ipa_role:
name: dba
state: absent
ipa_host: ipa.example.com
ipa_user: admin
ipa_pass: topsecret
'''
RETURN = '''
role:
description: Role as returned by IPA API.
returned: always
type: dict
'''
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ipa import IPAClient, ipa_argument_spec
from ansible.module_utils._text import to_native
class RoleIPAClient(IPAClient):
def __init__(self, module, host, port, protocol):
super(RoleIPAClient, self).__init__(module, host, port, protocol)
def role_find(self, name):
return self._post_json(method='role_find', name=None, item={'all': True, 'cn': name})
def role_add(self, name, item):
return self._post_json(method='role_add', name=name, item=item)
def role_mod(self, name, item):
return self._post_json(method='role_mod', name=name, item=item)
def role_del(self, name):
return self._post_json(method='role_del', name=name)
def role_add_member(self, name, item):
return self._post_json(method='role_add_member', name=name, item=item)
def role_add_group(self, name, item):
return self.role_add_member(name=name, item={'group': item})
def role_add_host(self, name, item):
return self.role_add_member(name=name, item={'host': item})
def role_add_hostgroup(self, name, item):
return self.role_add_member(name=name, item={'hostgroup': item})
def role_add_service(self, name, item):
return self.role_add_member(name=name, item={'service': item})
def role_add_user(self, name, item):
return self.role_add_member(name=name, item={'user': item})
def role_remove_member(self, name, item):
return self._post_json(method='role_remove_member', name=name, item=item)
def role_remove_group(self, name, item):
return self.role_remove_member(name=name, item={'group': item})
def role_remove_host(self, name, item):
return self.role_remove_member(name=name, item={'host': item})
def role_remove_hostgroup(self, name, item):
return self.role_remove_member(name=name, item={'hostgroup': item})
def role_remove_service(self, name, item):
return self.role_remove_member(name=name, item={'service': item})
def role_remove_user(self, name, item):
return self.role_remove_member(name=name, item={'user': item})
def role_add_privilege(self, name, item):
return self._post_json(method='role_add_privilege', name=name, item={'privilege': item})
def role_remove_privilege(self, name, item):
return self._post_json(method='role_remove_privilege', name=name, item={'privilege': item})
def get_role_dict(description=None):
data = {}
if description is not None:
data['description'] = description
return data
def get_role_diff(client, ipa_role, module_role):
return client.get_diff(ipa_data=ipa_role, module_data=module_role)
def ensure(module, client):
state = module.params['state']
name = module.params['cn']
group = module.params['group']
host = module.params['host']
hostgroup = module.params['hostgroup']
privilege = module.params['privilege']
service = module.params['service']
user = module.params['user']
module_role = get_role_dict(description=module.params['description'])
ipa_role = client.role_find(name=name)
changed = False
if state == 'present':
if not ipa_role:
changed = True
if not module.check_mode:
ipa_role = client.role_add(name=name, item=module_role)
else:
diff = get_role_diff(client, ipa_role, module_role)
if len(diff) > 0:
changed = True
if not module.check_mode:
data = {}
for key in diff:
data[key] = module_role.get(key)
client.role_mod(name=name, item=data)
if group is not None:
changed = client.modify_if_diff(name, ipa_role.get('member_group', []), group,
client.role_add_group,
client.role_remove_group) or changed
if host is not None:
changed = client.modify_if_diff(name, ipa_role.get('member_host', []), host,
client.role_add_host,
client.role_remove_host) or changed
if hostgroup is not None:
changed = client.modify_if_diff(name, ipa_role.get('member_hostgroup', []), hostgroup,
client.role_add_hostgroup,
client.role_remove_hostgroup) or changed
if privilege is not None:
changed = client.modify_if_diff(name, ipa_role.get('memberof_privilege', []), privilege,
client.role_add_privilege,
client.role_remove_privilege) or changed
if service is not None:
changed = client.modify_if_diff(name, ipa_role.get('member_service', []), service,
client.role_add_service,
client.role_remove_service) or changed
if user is not None:
changed = client.modify_if_diff(name, ipa_role.get('member_user', []), user,
client.role_add_user,
client.role_remove_user) or changed
else:
if ipa_role:
changed = True
if not module.check_mode:
client.role_del(name)
return changed, client.role_find(name=name)
def main():
argument_spec = ipa_argument_spec()
argument_spec.update(cn=dict(type='str', required=True, aliases=['name']),
description=dict(type='str'),
group=dict(type='list'),
host=dict(type='list'),
hostgroup=dict(type='list'),
privilege=dict(type='list'),
service=dict(type='list'),
state=dict(type='str', default='present', choices=['present', 'absent']),
user=dict(type='list'))
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
client = RoleIPAClient(module=module,
host=module.params['ipa_host'],
port=module.params['ipa_port'],
protocol=module.params['ipa_prot'])
try:
client.login(username=module.params['ipa_user'],
password=module.params['ipa_pass'])
changed, role = ensure(module, client)
module.exit_json(changed=changed, role=role)
except Exception as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
if __name__ == '__main__':
main()
| alxgu/ansible | lib/ansible/modules/identity/ipa/ipa_role.py | Python | gpl-3.0 | 10,522 |
import os
import signal
import time
def signal_usr1(signum, frame):
"Callback invoked when a signal is received"
pid = os.getpid()
print('Received USR1 in process {}'.format(pid))
print('Forking...')
child_pid = os.fork()
if child_pid:
print('PARENT: Pausing before sending signal...')
time.sleep(1)
print('PARENT: Signaling {}'.format(child_pid))
os.kill(child_pid, signal.SIGUSR1)
else:
print('CHILD: Setting up signal handler')
signal.signal(signal.SIGUSR1, signal_usr1)
print('CHILD: Pausing to wait for signal')
time.sleep(5)
| jasonwee/asus-rt-n14uhp-mrtg | src/lesson_runtime_features/os_kill_example.py | Python | apache-2.0 | 580 |
import re
from wtforms import validators
def validatorCorreo(form, field):
if not re.match('\w+@(\w+)\.com|es',field.data):
raise validators.ValidationError('Esto no es un correo electronico')
def validatorVISA(form, field):
if not re.match('(((\d{4}-){3})|((\d{4} ){3}))\d{4}',field.data):
raise validators.ValidationError('Esto no es una Tarjeta de credito')
def validatorApellidos(form,field):
if not re.match('\w+ \w+',field.data):
raise validators.ValidationError('No has puesto tus apellidos')
| araluce/NextMedia | NextMedia/validators.py | Python | gpl-3.0 | 538 |
"""
Translation rules for the Surrey Roads.
Copyright 2011 Paul Norman.
"""
def translateName(rawname):
suffixlookup = {}
suffixlookup.update({'Ave':'Avenue'})
suffixlookup.update({'Rd':'Road'})
suffixlookup.update({'St':'Street'})
suffixlookup.update({'Pl':'Place'})
suffixlookup.update({'Cr':'Crescent'})
suffixlookup.update({'Blvd':'Boulevard'})
suffixlookup.update({'Dr':'Drive'})
suffixlookup.update({'Lane':'Lane'})
suffixlookup.update({'Crt':'Court'})
suffixlookup.update({'Gr':'Grove'})
suffixlookup.update({'Cl':'Close'})
suffixlookup.update({'Rwy':'Railway'})
suffixlookup.update({'Div':'Diversion'})
suffixlookup.update({'Hwy':'Highway'})
suffixlookup.update({'Hwy':'Highway'})
suffixlookup.update({'E':'East'})
suffixlookup.update({'S':'South'})
suffixlookup.update({'N':'North'})
suffixlookup.update({'W':'West'})
newName = ''
for partName in rawname.split():
newName = newName + ' ' + suffixlookup.get(partName,partName)
return newName.strip()
def filterTags(attrs):
if not attrs: return
tags = {}
#Add the source
tags.update({'source':'City of Surrey 2012 GIS Data'})
#automagically convert names
if attrs['ROAD_NAME']:
tags.update({'name':translateName(attrs['ROAD_NAME'].strip(' '))})
if attrs['YR']:
tags.update({'start_date':attrs['YR'].strip(' ')})
if attrs['MATERIAL']:
tags.update({'surface':attrs['MATERIAL'].strip(' ').lower()})
if attrs['SPEED']:
tags.update({'maxspeed': attrs['SPEED'].strip(' ')})
if attrs['NO_LANE']:
tags.update({'lanes': attrs['NO_LANE'].strip(' ')})
if 'RC_TYPE2' in attrs:
if attrs['RC_TYPE2'] == "Road" or attrs['RC_TYPE2'] == "Frontage Road": #TYPE=0 or 1
#some form of road
if attrs['STATUS'] and attrs['STATUS'] == "Unconstructed":
tags.update({'highway':'proposed'})
else:
#a road that's been completed
if attrs['STATUS'] and attrs['STATUS'] == "Closed to Traffic":
tags.update({'access':'no'})
if attrs['RD_CLASS'] and attrs['RD_CLASS'] == "Provincial Highway":
tags.update({'highway':'primary'})
elif attrs['RD_CLASS'] and attrs['RD_CLASS'] == "Arterial":
tags.update({'highway':'secondary'})
elif attrs['RD_CLASS'] and attrs['RD_CLASS'] == "Major Collector":
tags.update({'highway':'tertiary'})
elif attrs['RD_CLASS'] and attrs['RD_CLASS'] == "Local":
tags.update({'highway':'residential'})
elif attrs['RD_CLASS'] and attrs['RD_CLASS'] == "Translink":
tags.update({'highway':'road'})
else:
tags.update({'highway':'road'})
elif attrs['RC_TYPE2'] == "Highway Interchange": #type=1
tags.update({'highway':'primary_link'})
elif attrs['RC_TYPE2'] == "Street Lane" or attrs['RC_TYPE2'] == "Access Lane": #TYPE=3 or 4
tags.update({'highway':'service'})
tags.update({'service':'alley'})
elif attrs['RC_TYPE2'] == "Railway": #type 5
tags.update({'railway':'rail'})
# Truck route information
if 'ROUTE' in attrs:
if attrs['ROUTE'] == "Dangerous Goods Routes":
tags.update({'hazmat':'designated'})
tags.update({'hgv':'designated'})
if attrs['ROUTE'] == "Truck Routes":
tags.update({'hgv':'designated'})
if attrs['ROUTE'] == "Truck Routes Restrictions":
tags.update({'hgv':'no'})
#Truck todo
# Does ROUTE0=Secondary -ROUTE=* imply a truck route?
#Gritting (snow clearing) information
if 'WTR_PRIOR' in attrs or 'WTR_VEHCL' in attrs:
tags.update({'maintenance':'gritting'})
tags.update({'gritting_operator':'City of Surrey'})
if attrs['WTR_PRIOR'] and ("First Priority" in attrs['WTR_VEHCL']):
tags.update({'gritting':'priority_1'})
if attrs['WTR_PRIOR'] and ("Second Priority" in attrs['WTR_VEHCL']):
tags.update({'gritting':'priority_2'})
if 'GEODB_OID' in attrs:
tags.update({'surrey:geodb_oid': attrs['GEODB_OID'].strip(' ')})
return tags
| runetvilum/skolevej | ogr2osm/translations/surreyroad.py | Python | gpl-3.0 | 3,944 |
import attr
import pandas as pd
import re
from ..base import TohuBaseGenerator
from ..logging import logger
__all__ = ['get_tohu_items_name', 'make_tohu_items_class']
def make_tohu_items_class(clsname, attr_names):
"""
Parameters
----------
clsname: string
Name of the class to be created
attr_names: list of strings
Names of the attributes of the class to be created
"""
item_cls = attr.make_class(clsname, {name: attr.ib() for name in attr_names}, repr=False, cmp=True, frozen=True)
def new_repr(self):
all_fields = ', '.join([f'{name}={repr(value)}' for name, value in attr.asdict(self).items()])
return f'{clsname}({all_fields})'
orig_eq = item_cls.__eq__
def new_eq(self, other):
"""
Custom __eq__() method which also allows comparisons with
tuples and dictionaries. This is mostly for convenience
during testing.
"""
if isinstance(other, self.__class__):
return orig_eq(self, other)
else:
if isinstance(other, tuple):
return attr.astuple(self) == other
elif isinstance(other, dict):
return attr.asdict(self) == other
else:
return NotImplemented
item_cls.__repr__ = new_repr
item_cls.__eq__ = new_eq
item_cls.keys = lambda self: attr_names
item_cls.__getitem__ = lambda self, key: getattr(self, key)
item_cls.as_dict = lambda self: attr.asdict(self)
item_cls.to_series = lambda self: pd.Series(attr.asdict(self))
return item_cls
def get_tohu_items_name(cls):
"""
Return a string which defines the name of the namedtuple class which will be used
to produce items for the custom generator.
By default this will be the first part of the class name (before '...Generator'),
for example:
FoobarGenerator -> Foobar
QuuxGenerator -> Quux
However, it can be set explicitly by the user by defining `__tohu_items_name__`
in the class definition, for example:
class Quux(CustomGenerator):
__tohu_items_name__ = 'MyQuuxItem'
"""
assert issubclass(cls, TohuBaseGenerator)
try:
tohu_items_name = cls.__dict__['__tohu_items_name__']
logger.debug(f"Using item class name '{tohu_items_name}' (derived from attribute '__tohu_items_name__')")
except KeyError:
m = re.match('^(.*)Generator$', cls.__name__)
if m is not None:
tohu_items_name = m.group(1)
logger.debug(f"Using item class name '{tohu_items_name}' (derived from custom generator name)")
else:
msg = (
"Cannot derive class name for items to be produced by custom generator. "
"Please set '__tohu_items_name__' at the top of the custom generator's "
"definition or change its name so that it ends in '...Generator'"
)
raise ValueError(msg)
return tohu_items_name | maxalbert/tohu | tohu/v6/custom_generator/utils.py | Python | mit | 3,014 |
#!/usr/bin/python
"""soql2atom: a beatbox demo that generates an atom 1.0 formatted feed of any SOQL query (requires beatbox 0.9 or later)
The fields Id, SystemModStamp and CreatedDate are automatically added to the SOQL if needed.
The first field in the select list becomes the title of the entry, so make sure to setup the order of the fields as you need.
The soql should be passed via a 'soql' queryString parameter
Optionally, you can also pass a 'title' queryString parameter to set the title of the feed
The script forces authentication, but many apache installations are configured to block the AUTHORIZATION header,
so the scirpt looks for X_HTTP_AUTHORIZATION instead, you can use a mod_rewrite rule to manage the mapping, something like this
Options +FollowSymLinks
RewriteEngine on
RewriteRule ^(.*)$ soql2atom.py [E=X-HTTP_AUTHORIZATION:%{HTTP:Authorization},QSA,L]
I have this in a .htaccess file in the same directory as soql2atom.py etc.
"""
__version__ = "1.0"
__author__ = "Simon Fell"
__copyright__ = "(C) 2006 Simon Fell. GNU GPL 2."
import sys
import beatbox
import cgi
import cgitb
from xml.sax.xmlreader import AttributesNSImpl
import datetime
from urlparse import urlparse
import os
import base64
import string
cgitb.enable()
sf = beatbox._tPartnerNS
svc = beatbox.Client()
_noAttrs = beatbox._noAttrs
def addRequiredFieldsToSoql(soql):
findPos = string.find(string.lower(soql), "from")
selectList = []
for f in string.lower(soql)[:findPos].split(","):
selectList.append(string.strip(f))
if not "id" in selectList: selectList.append("Id")
if not "systemmodstamp" in selectList: selectList.append("systemModStamp")
if not "createddate" in selectList: selectList.append("createdDate")
return string.join(selectList, ", ") + soql[findPos-1:]
def soql2atom(loginResult, soql, title):
soqlWithFields = addRequiredFieldsToSoql(soql)
userInfo = loginResult[beatbox._tPartnerNS.userInfo]
serverUrl = str(loginResult[beatbox._tPartnerNS.serverUrl])
(scheme, host, path, params, query, frag) = urlparse(serverUrl)
sfbaseUrl = scheme + "://" + host + "/"
thisUrl = "http://" + os.environ["HTTP_HOST"] + os.environ["REQUEST_URI"]
qr = svc.query(soqlWithFields)
atom_ns = "http://www.w3.org/2005/Atom"
ent_ns = "urn:sobject.enterprise.soap.sforce.com"
print "content-type: application/atom+xml"
doGzip = os.environ.has_key("HTTP_ACCEPT_ENCODING") and "gzip" in string.lower(os.environ["HTTP_ACCEPT_ENCODING"]).split(',')
if (doGzip): print "content-encoding: gzip"
print ""
x = beatbox.XmlWriter(doGzip)
x.startPrefixMapping("a", atom_ns)
x.startPrefixMapping("s", ent_ns)
x.startElement(atom_ns, "feed")
x.writeStringElement(atom_ns, "title", title)
x.characters("\n")
x.startElement(atom_ns, "author")
x.writeStringElement(atom_ns, "name", str(userInfo.userFullName))
x.endElement()
x.characters("\n")
rel = AttributesNSImpl( {(None, "rel"): "self", (None, "href") : thisUrl},
{(None, "rel"): "rel", (None, "href"): "href"})
x.startElement(atom_ns, "link", rel)
x.endElement()
x.writeStringElement(atom_ns, "updated", datetime.datetime.utcnow().isoformat() +"Z")
x.writeStringElement(atom_ns, "id", thisUrl + "&userid=" + str(loginResult[beatbox._tPartnerNS.userId]))
x.characters("\n")
type = AttributesNSImpl({(None, u"type") : "html"}, {(None, u"type") : u"type" })
for row in qr[sf.records:]:
x.startElement(atom_ns, "entry")
desc = ""
x.writeStringElement(atom_ns, "title", str(row[2]))
for col in row[2:]:
if col._name[1] == 'Id':
x.writeStringElement(atom_ns, "id", sfbaseUrl + str(col))
writeLink(x, atom_ns, "link", "alternate", "text/html", sfbaseUrl + str(col))
elif col._name[1] == 'SystemModstamp':
x.writeStringElement(atom_ns, "updated", str(col))
elif col._name[1] == 'CreatedDate':
x.writeStringElement(atom_ns, "published", str(col))
elif str(col) != "":
desc = desc + "<b>" + col._name[1] + "</b> : " + str(col) + "<br>"
x.writeStringElement(ent_ns, col._name[1], str(col))
x.startElement(atom_ns, "content", type)
x.characters(desc)
x.endElement() # content
x.characters("\n")
x.endElement() # entry
x.endElement() # feed
print x.endDocument()
def writeLink(x, namespace, localname, rel, type, href):
rel = AttributesNSImpl( {(None, "rel"): rel, (None, "href"): href, (None, "type"): type },
{(None, "rel"): "rel", (None, "href"): "href", (None, "type"): "type"})
x.startElement(namespace, localname, rel)
x.endElement()
def authenticationRequired(message="Unauthorized"):
print "status: 401 Unauthorized"
print "WWW-authenticate: Basic realm=""www.salesforce.com"""
print "content-type: text/plain"
print ""
print message
if not os.environ.has_key('X_HTTP_AUTHORIZATION') or os.environ['X_HTTP_AUTHORIZATION'] == '':
authenticationRequired()
else:
auth = os.environ['X_HTTP_AUTHORIZATION']
(username, password) = base64.decodestring(auth.split(" ")[1]).split(':')
form = cgi.FieldStorage()
if not form.has_key('soql'): raise Exception("Must provide the SOQL query to run via the soql queryString parameter")
soql = form.getvalue("soql")
title = "SOQL2ATOM : " + soql
if form.has_key("title"):
title = form.getvalue("title")
try:
lr = svc.login(username, password)
soql2atom(lr, soql, title)
except beatbox.SoapFaultError, sfe:
if (sfe.faultCode == 'INVALID_LOGIN'):
authenticationRequired(sfe.faultString)
else:
raise
| lexsf/Beatbox | soql2atom.py | Python | gpl-2.0 | 5,463 |
# jsb/plugs/socket/dns.py
#
#
""" do a fqdn loopup. """
## jsb imports
from jsb.lib.commands import cmnds
from jsb.lib.examples import examples
## basic imports
from socket import gethostbyname
from socket import getfqdn
import re
## dns command
def handle_dns(bot, event):
""" arguments: <ip>|<hostname> - do a dns lookup. """
if not event.rest: event.missing("<ip>|<hostname>") ; return
query = event.rest.strip()
ippattern = re.match(r"^([0-9]{1,3}\.){3}[0-9]{1,3}$", query)
hostpattern = re.match(r"(\w+://)?(?P<hostname>\S+\.\w+)", query)
if ippattern:
try:
answer = getfqdn(ippattern.group(0))
event.reply("%(hostname)s is %(answer)s" % {"hostname": query, "answer": answer})
except: event.reply("Couldn't lookup ip")
elif hostpattern:
try:
answer = gethostbyname(hostpattern.group('hostname'))
event.reply("%(ip)s is %(answer)s" % {"ip": query, "answer": answer})
except: event.reply("Couldn't look up the hostname")
else: return
cmnds.add("dns", handle_dns, ["OPER", "USER", "GUEST"])
examples.add("dns", "resolve the ip or the hostname", "dns google.com")
| Petraea/jsonbot | jsb/plugs/socket/dns.py | Python | mit | 1,189 |
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import abc
from typing import Tuple
import numpy as np
class Acquisition(abc.ABC):
"""Acquisition base class"""
def __add__(self, other: "Acquisition") -> "Sum":
"""
Overloads self + other
"""
# If both acquisitions implement gradients, the gradients can be available in the sum
return Sum(self, other)
def __mul__(self, other: "Acquisition") -> "Product":
"""
Overloads self * other
"""
return Product(self, other)
def __rmul__(self, other: "Acquisition") -> "Product":
"""
Overloads other * self
"""
return Product(other, self)
def __truediv__(self, denominator: "Acquisition") -> "Quotient":
"""
Overloads self / other
"""
return Quotient(self, denominator)
@abc.abstractmethod
def evaluate(self, x: np.ndarray) -> np.ndarray:
"""
Abstract method. Evaluates the acquisition function.
:param x: (n_points x n_dims) array of points at which to calculate acquisition function values
:return: (n_points x 1) array of acquisition function values
"""
pass
@property
@abc.abstractmethod
def has_gradients(self) -> bool:
"""
Abstract property. Whether acquisition value has analytical gradient calculation available.
:return: True if gradients are available
"""
pass
def evaluate_with_gradients(self, x: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""
Optional abstract method that must be implemented if has_gradients returns True.
Evaluates value and gradient of acquisition function at x.
:param x: (n_points x n_dims) array of points at which to calculate acquisition function values and gradient
:return: Tuple contains an (n_points x 1) array of acquisition function values and (n_points x n_dims) array of
acquisition function gradients with respect to x
"""
raise NotImplementedError("Gradients not implemented for this acquisition function")
def update_parameters(self) -> None:
"""
Performs any updates to parameters that needs to be done once per outer loop iteration
"""
pass
class Quotient(Acquisition):
"""
Acquisition for division of two acquisition functions
"""
def __init__(self, numerator: Acquisition, denominator: Acquisition):
"""
:param numerator: Acquisition function to act as numerator in quotient
:param denominator: Acquisition function to act as denominator in quotient
"""
self.numerator = numerator
self.denominator = denominator
def evaluate(self, x: np.ndarray) -> np.ndarray:
"""
Evaluate division of the two acquisition functions
:param x: (n_points x n_dims) array of points at which to calculate acquisition function values
:return: (n_points x 1) array of acquisition function values
"""
return self.numerator.evaluate(x) / self.denominator.evaluate(x)
def evaluate_with_gradients(self, x: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""
Evaluate value and gradient of acquisition function at x
:param x: (n_points x n_dims) array of points at which to calculate acquisition function values and gradient
:return: Tuple contains an (n_points x 1) array of acquisition function values and (n_points x n_dims) array of
acquisition function gradients with respect to x
"""
# Evaluate both acquisition functions
numerator_value, numerator_gradients = self.numerator.evaluate_with_gradients(x)
denominator_value, denominator_gradients = self.denominator.evaluate_with_gradients(x)
value = numerator_value / denominator_value
# Calculate gradient of acquisition
gradient = (numerator_gradients / denominator_value) - (
(denominator_gradients * numerator_value) / (denominator_value ** 2)
)
return value, gradient
@property
def has_gradients(self) -> bool:
"""
Whether acquisition value has analytical gradient calculation available.
:return: True if gradients are available
"""
return self.denominator.has_gradients and self.numerator.has_gradients
def update_parameters(self) -> None:
"""
Performs any updates to parameters that needs to be done once per outer loop iteration
"""
self.denominator.update_parameters()
self.numerator.update_parameters()
class Product(Acquisition):
"""
Acquisition for product of two or more acquisition functions
"""
def __init__(self, acquisition_1: Acquisition, acquisition_2: Acquisition):
"""
:param acquisition_1: Acquisition function in product
:param acquisition_2: Other acquisition function in product
"""
self.acquisition_1 = acquisition_1
self.acquisition_2 = acquisition_2
def evaluate(self, x: np.ndarray) -> np.ndarray:
"""
Evaluate product of the two acquisition functions
:param x: (n_points x n_dims) array of points at which to calculate acquisition function values
:return: (n_points x 1) array of acquisition function values
"""
return self.acquisition_1.evaluate(x) * self.acquisition_2.evaluate(x)
def evaluate_with_gradients(self, x: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""
Evaluate value and gradient of acquisition function at x
:param x: (n_points x n_dims) array of points at which to calculate acquisition function values and gradient
:return: Tuple contains an (n_points x 1) array of acquisition function values and (n_points x n_dims) array of
acquisition function gradients with respect to x
"""
# Evaluate acquisitions
value_1, grad_1 = self.acquisition_1.evaluate_with_gradients(x)
value_2, grad_2 = self.acquisition_2.evaluate_with_gradients(x)
# Calculate gradient
grad_total = value_1 * grad_2 + value_2 * grad_1
return value_1 * value_2, grad_total
@property
def has_gradients(self):
"""
Whether acquisition value has analytical gradient calculation available.
:return: True if gradients are available
"""
return self.acquisition_1.has_gradients and self.acquisition_2.has_gradients
def update_parameters(self) -> None:
"""
Performs any updates to parameters that needs to be done once per outer loop iteration
"""
self.acquisition_1.update_parameters()
self.acquisition_2.update_parameters()
class Sum(Acquisition):
"""
Acquisition for sum of two acquisition functions
"""
def __init__(self, acquisition_1: Acquisition, acquisition_2: Acquisition) -> None:
"""
:param acquisition_1: An acquisition function in sum
:param acquisition_2: Other acquisition function in sum
"""
self.acquisition_1 = acquisition_1
self.acquisition_2 = acquisition_2
def evaluate(self, x: np.ndarray) -> np.ndarray:
"""
Evaluate sum of the two acquisition functions
:param x: (n_points x n_dims) array of points at which to calculate acquisition function values
:return: (n_points x 1) array of acquisition function values
"""
return self.acquisition_1.evaluate(x) + self.acquisition_2.evaluate(x)
def evaluate_with_gradients(self, x: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""
Evaluate value and gradient of acquisition function at x
:param x: (n_points x n_dims) array of points at which to calculate acquisition function values and gradient
:return: Tuple contains an (n_points x 1) array of acquisition function values and (n_points x n_dims) array of
acquisition function gradients with respect to x
"""
# Evaluate first acquisition with gradients
value_1, grad_1 = self.acquisition_1.evaluate_with_gradients(x)
value_2, grad_2 = self.acquisition_2.evaluate_with_gradients(x)
return value_1 + value_2, grad_1 + grad_2
@property
def has_gradients(self):
"""
Whether acquisition value has analytical gradient calculation available.
:return: True if gradients are available
"""
return self.acquisition_1.has_gradients and self.acquisition_2.has_gradients
def update_parameters(self) -> None:
"""
Performs any updates to parameters that needs to be done once per outer loop iteration
"""
self.acquisition_1.update_parameters()
self.acquisition_2.update_parameters()
| EmuKit/emukit | emukit/core/acquisition/acquisition.py | Python | apache-2.0 | 8,967 |
from datetime import date, datetime
from io import StringIO
import ibis
import ibis.common.exceptions as com
import ibis.expr.operations as ops
import ibis.expr.types as ir
import ibis.util as util
from .identifiers import quote_identifier
def _cast(translator, expr):
from .client import ClickhouseDataType
op = expr.op()
arg, target = op.args
arg_ = translator.translate(arg)
type_ = str(ClickhouseDataType.from_ibis(target, nullable=False))
return f'CAST({arg_!s} AS {type_!s})'
def _between(translator, expr):
op = expr.op()
arg_, lower_, upper_ = map(translator.translate, op.args)
return f'{arg_!s} BETWEEN {lower_!s} AND {upper_!s}'
def _negate(translator, expr):
arg = expr.op().args[0]
if isinstance(expr, ir.BooleanValue):
arg_ = translator.translate(arg)
return f'NOT {arg_!s}'
else:
arg_ = _parenthesize(translator, arg)
return f'-{arg_!s}'
def _not(translator, expr):
return 'NOT {}'.format(*map(translator.translate, expr.op().args))
def _parenthesize(translator, expr):
op = expr.op()
op_klass = type(op)
# function calls don't need parens
what_ = translator.translate(expr)
if (op_klass in _binary_infix_ops) or (op_klass in _unary_ops):
return f'({what_!s})'
else:
return what_
def _unary(func_name):
return _fixed_arity(func_name, 1)
def _extract_epoch_seconds(translator, expr):
op = expr.op()
return _call(translator, 'toRelativeSecondNum', *op.args)
def _fixed_arity(func_name, arity):
def formatter(translator, expr):
op = expr.op()
arg_count = len(op.args)
if arity != arg_count:
msg = 'Incorrect number of args {0} instead of {1}'
raise com.UnsupportedOperationError(msg.format(arg_count, arity))
return _call(translator, func_name, *op.args)
return formatter
def _agg(func):
def formatter(translator, expr):
return _aggregate(translator, func, *expr.op().args)
return formatter
def _agg_variance_like(func):
variants = {'sample': f'{func}Samp', 'pop': f'{func}Pop'}
def formatter(translator, expr):
arg, how, where = expr.op().args
return _aggregate(translator, variants[how], arg, where)
return formatter
def _binary_infix_op(infix_sym):
def formatter(translator, expr):
op = expr.op()
left, right = op.args
left_ = _parenthesize(translator, left)
right_ = _parenthesize(translator, right)
return f'{left_!s} {infix_sym!s} {right_!s}'
return formatter
def _call(translator, func, *args):
args_ = ', '.join(map(translator.translate, args))
return f'{func!s}({args_!s})'
def _aggregate(translator, func, arg, where=None):
if where is not None:
return _call(translator, func + 'If', arg, where)
else:
return _call(translator, func, arg)
def _xor(translator, expr):
op = expr.op()
left_ = _parenthesize(translator, op.left)
right_ = _parenthesize(translator, op.right)
return f'xor({left_}, {right_})'
def _varargs(func_name):
def varargs_formatter(translator, expr):
op = expr.op()
return _call(translator, func_name, *op.arg)
return varargs_formatter
def _arbitrary(translator, expr):
arg, how, where = expr.op().args
functions = {
None: 'any',
'first': 'any',
'last': 'anyLast',
'heavy': 'anyHeavy',
}
return _aggregate(translator, functions[how], arg, where=where)
def _substring(translator, expr):
# arg_ is the formatted notation
op = expr.op()
arg, start, length = op.args
arg_, start_ = translator.translate(arg), translator.translate(start)
# Clickhouse is 1-indexed
if length is None or isinstance(length.op(), ops.Literal):
if length is not None:
length_ = length.op().value
return f'substring({arg_}, {start_} + 1, {length_})'
else:
return f'substring({arg_}, {start_} + 1)'
else:
length_ = translator.translate(length)
return f'substring({arg_}, {start_} + 1, {length_})'
def _string_find(translator, expr):
op = expr.op()
arg, substr, start, _ = op.args
if start is not None:
raise com.UnsupportedOperationError(
"String find doesn't support start argument"
)
return _call(translator, 'position', arg, substr) + ' - 1'
def _regex_extract(translator, expr):
op = expr.op()
arg, pattern, index = op.args
arg_, pattern_ = translator.translate(arg), translator.translate(pattern)
if index is not None:
index_ = translator.translate(index)
return f'extractAll({arg_}, {pattern_})[{index_} + 1]'
return f'extractAll({arg_}, {pattern_})'
def _parse_url(translator, expr):
op = expr.op()
arg, extract, key = op.args
if extract == 'HOST':
return _call(translator, 'domain', arg)
elif extract == 'PROTOCOL':
return _call(translator, 'protocol', arg)
elif extract == 'PATH':
return _call(translator, 'path', arg)
elif extract == 'QUERY':
if key is not None:
return _call(translator, 'extractURLParameter', arg, key)
else:
return _call(translator, 'queryString', arg)
else:
raise com.UnsupportedOperationError(
f'Parse url with extract {extract} is not supported'
)
def _index_of(translator, expr):
op = expr.op()
arg, arr = op.args
arg_formatted = translator.translate(arg)
arr_formatted = ','.join(map(translator.translate, arr))
return f"indexOf([{arr_formatted}], {arg_formatted}) - 1"
def _sign(translator, expr):
"""Workaround for missing sign function"""
op = expr.op()
(arg,) = op.args
arg_ = translator.translate(arg)
return 'intDivOrZero({0}, abs({0}))'.format(arg_)
def _round(translator, expr):
op = expr.op()
arg, digits = op.args
if digits is not None:
return _call(translator, 'round', arg, digits)
else:
return _call(translator, 'round', arg)
def _hash(translator, expr):
op = expr.op()
arg, how = op.args
algorithms = {
'MD5',
'halfMD5',
'SHA1',
'SHA224',
'SHA256',
'intHash32',
'intHash64',
'cityHash64',
'sipHash64',
'sipHash128',
}
if how not in algorithms:
raise com.UnsupportedOperationError(
f'Unsupported hash algorithm {how}'
)
return _call(translator, how, arg)
def _log(translator, expr):
op = expr.op()
arg, base = op.args
if base is None:
func = 'log'
elif base._arg.value == 2:
func = 'log2'
elif base._arg.value == 10:
func = 'log10'
else:
raise ValueError(f'Base {base} for logarithm not supported!')
return _call(translator, func, arg)
def _value_list(translator, expr):
op = expr.op()
values_ = map(translator.translate, op.values)
return '({})'.format(', '.join(values_))
def _interval_format(translator, expr):
dtype = expr.type()
if dtype.unit in {'ms', 'us', 'ns'}:
raise com.UnsupportedOperationError(
"Clickhouse doesn't support subsecond interval resolutions"
)
return f'INTERVAL {expr.op().value} {dtype.resolution.upper()}'
def _interval_from_integer(translator, expr):
op = expr.op()
arg, unit = op.args
dtype = expr.type()
if dtype.unit in {'ms', 'us', 'ns'}:
raise com.UnsupportedOperationError(
"Clickhouse doesn't support subsecond interval resolutions"
)
arg_ = translator.translate(arg)
return f'INTERVAL {arg_} {dtype.resolution.upper()}'
def _literal(translator, expr):
value = expr.op().value
if value is None and expr.type().nullable:
return _null_literal(translator, expr)
if isinstance(expr, ir.BooleanValue):
return '1' if value else '0'
elif isinstance(expr, ir.StringValue):
return "'{!s}'".format(value.replace("'", "\\'"))
elif isinstance(expr, ir.NumericValue):
return repr(value)
elif isinstance(expr, ir.IntervalValue):
return _interval_format(translator, expr)
elif isinstance(expr, ir.TimestampValue):
if isinstance(value, datetime):
if value.microsecond != 0:
msg = 'Unsupported subsecond accuracy {}'
raise ValueError(msg.format(value))
value = value.strftime('%Y-%m-%d %H:%M:%S')
return f"toDateTime('{value!s}')"
elif isinstance(expr, ir.DateValue):
if isinstance(value, date):
value = value.strftime('%Y-%m-%d')
return f"toDate('{value!s}')"
elif isinstance(expr, ir.ArrayValue):
return str(list(value))
elif isinstance(expr, ir.SetScalar):
return '({})'.format(', '.join(map(repr, value)))
else:
raise NotImplementedError(type(expr))
class _CaseFormatter:
def __init__(self, translator, base, cases, results, default):
self.translator = translator
self.base = base
self.cases = cases
self.results = results
self.default = default
# HACK
self.indent = 2
self.multiline = len(cases) > 1
self.buf = StringIO()
def _trans(self, expr):
return self.translator.translate(expr)
def get_result(self):
self.buf.seek(0)
self.buf.write('CASE')
if self.base is not None:
base_str = self._trans(self.base)
self.buf.write(f' {base_str}')
for case, result in zip(self.cases, self.results):
self._next_case()
case_str = self._trans(case)
result_str = self._trans(result)
self.buf.write(f'WHEN {case_str} THEN {result_str}')
if self.default is not None:
self._next_case()
default_str = self._trans(self.default)
self.buf.write(f'ELSE {default_str}')
if self.multiline:
self.buf.write('\nEND')
else:
self.buf.write(' END')
return self.buf.getvalue()
def _next_case(self):
if self.multiline:
self.buf.write('\n{}'.format(' ' * self.indent))
else:
self.buf.write(' ')
def _simple_case(translator, expr):
op = expr.op()
formatter = _CaseFormatter(
translator, op.base, op.cases, op.results, op.default
)
return formatter.get_result()
def _searched_case(translator, expr):
op = expr.op()
formatter = _CaseFormatter(
translator, None, op.cases, op.results, op.default
)
return formatter.get_result()
def _table_array_view(translator, expr):
ctx = translator.context
table = expr.op().table
query = ctx.get_compiled_expr(table)
return f'(\n{util.indent(query, ctx.indent)}\n)'
def _timestamp_from_unix(translator, expr):
op = expr.op()
arg, unit = op.args
if unit in {'ms', 'us', 'ns'}:
raise ValueError(f'`{unit}` unit is not supported!')
return _call(translator, 'toDateTime', arg)
def _truncate(translator, expr):
op = expr.op()
arg, unit = op.args
converters = {
'Y': 'toStartOfYear',
'M': 'toStartOfMonth',
'W': 'toMonday',
'D': 'toDate',
'h': 'toStartOfHour',
'm': 'toStartOfMinute',
's': 'toDateTime',
}
try:
converter = converters[unit]
except KeyError:
raise com.UnsupportedOperationError(
f'Unsupported truncate unit {unit}'
)
return _call(translator, converter, arg)
def _exists_subquery(translator, expr):
op = expr.op()
ctx = translator.context
dummy = ir.literal(1).name(ir.unnamed)
filtered = op.foreign_table.filter(op.predicates)
expr = filtered.projection([dummy])
subquery = ctx.get_compiled_expr(expr)
if isinstance(op, ops.ExistsSubquery):
key = 'EXISTS'
elif isinstance(op, ops.NotExistsSubquery):
key = 'NOT EXISTS'
else:
raise NotImplementedError
return f'{key} (\n{util.indent(subquery, ctx.indent)}\n)'
def _table_column(translator, expr):
op = expr.op()
field_name = op.name
quoted_name = quote_identifier(field_name, force=True)
table = op.table
ctx = translator.context
# If the column does not originate from the table set in the current SELECT
# context, we should format as a subquery
if translator.permit_subquery and ctx.is_foreign_expr(table):
proj_expr = table.projection([field_name]).to_array()
return _table_array_view(translator, proj_expr)
if ctx.need_aliases():
alias = ctx.get_ref(table)
if alias is not None:
quoted_name = f'{alias}.{quoted_name}'
return quoted_name
def _string_split(translator, expr):
value, sep = expr.op().args
return 'splitByString({}, {})'.format(
translator.translate(sep), translator.translate(value)
)
def _string_join(translator, expr):
sep, elements = expr.op().args
assert isinstance(
elements.op(), ops.ValueList
), f'elements must be a ValueList, got {type(elements.op())}'
return 'arrayStringConcat([{}], {})'.format(
', '.join(map(translator.translate, elements)),
translator.translate(sep),
)
def _string_repeat(translator, expr):
value, times = expr.op().args
result = 'arrayStringConcat(arrayMap(x -> {}, range({})))'.format(
translator.translate(value), translator.translate(times)
)
return result
def _string_like(translator, expr):
value, pattern = expr.op().args[:2]
return '{} LIKE {}'.format(
translator.translate(value), translator.translate(pattern)
)
def _group_concat(translator, expr):
arg, sep, where = expr.op().args
if where is not None:
arg = where.ifelse(arg, ibis.NA)
return 'arrayStringConcat(groupArray({}), {})'.format(
*map(translator.translate, (arg, sep))
)
# TODO: clickhouse uses different string functions
# for ascii and utf-8 encodings,
_binary_infix_ops = {
# Binary operations
ops.Add: _binary_infix_op('+'),
ops.Subtract: _binary_infix_op('-'),
ops.Multiply: _binary_infix_op('*'),
ops.Divide: _binary_infix_op('/'),
ops.Power: _fixed_arity('pow', 2),
ops.Modulus: _binary_infix_op('%'),
# Comparisons
ops.Equals: _binary_infix_op('='),
ops.NotEquals: _binary_infix_op('!='),
ops.GreaterEqual: _binary_infix_op('>='),
ops.Greater: _binary_infix_op('>'),
ops.LessEqual: _binary_infix_op('<='),
ops.Less: _binary_infix_op('<'),
# Boolean comparisons
ops.And: _binary_infix_op('AND'),
ops.Or: _binary_infix_op('OR'),
ops.Xor: _xor,
}
_unary_ops = {ops.Negate: _negate, ops.Not: _not}
operation_registry = {
# Unary operations
ops.TypeOf: _unary('toTypeName'),
ops.IsNan: _unary('isNaN'),
ops.IsInf: _unary('isInfinite'),
ops.Abs: _unary('abs'),
ops.Ceil: _unary('ceil'),
ops.Floor: _unary('floor'),
ops.Exp: _unary('exp'),
ops.Round: _round,
ops.Sign: _sign,
ops.Sqrt: _unary('sqrt'),
ops.Hash: _hash,
ops.Log: _log,
ops.Ln: _unary('log'),
ops.Log2: _unary('log2'),
ops.Log10: _unary('log10'),
# Unary aggregates
ops.CMSMedian: _agg('median'),
# TODO: there is also a `uniq` function which is the
# recommended way to approximate cardinality
ops.HLLCardinality: _agg('uniqHLL12'),
ops.Mean: _agg('avg'),
ops.Sum: _agg('sum'),
ops.Max: _agg('max'),
ops.Min: _agg('min'),
ops.StandardDev: _agg_variance_like('stddev'),
ops.Variance: _agg_variance_like('var'),
ops.GroupConcat: _group_concat,
ops.Count: _agg('count'),
ops.CountDistinct: _agg('uniq'),
ops.Arbitrary: _arbitrary,
# string operations
ops.StringLength: _unary('length'),
ops.Lowercase: _unary('lower'),
ops.Uppercase: _unary('upper'),
ops.Reverse: _unary('reverse'),
ops.Substring: _substring,
ops.StringFind: _string_find,
ops.FindInSet: _index_of,
ops.StringReplace: _fixed_arity('replaceAll', 3),
ops.StringJoin: _string_join,
ops.StringSplit: _string_split,
ops.StringSQLLike: _string_like,
ops.Repeat: _string_repeat,
ops.RegexSearch: _fixed_arity('match', 2),
# TODO: extractAll(haystack, pattern)[index + 1]
ops.RegexExtract: _regex_extract,
ops.RegexReplace: _fixed_arity('replaceRegexpAll', 3),
ops.ParseURL: _parse_url,
# Temporal operations
ops.Date: _unary('toDate'),
ops.DateTruncate: _truncate,
ops.TimestampNow: lambda *args: 'now()',
ops.TimestampTruncate: _truncate,
ops.TimeTruncate: _truncate,
ops.IntervalFromInteger: _interval_from_integer,
ops.ExtractYear: _unary('toYear'),
ops.ExtractMonth: _unary('toMonth'),
ops.ExtractDay: _unary('toDayOfMonth'),
ops.ExtractDayOfYear: _unary('toDayOfYear'),
ops.ExtractQuarter: _unary('toQuarter'),
ops.ExtractEpochSeconds: _extract_epoch_seconds,
ops.ExtractWeekOfYear: _unary('toISOWeek'),
ops.ExtractHour: _unary('toHour'),
ops.ExtractMinute: _unary('toMinute'),
ops.ExtractSecond: _unary('toSecond'),
# Other operations
ops.E: lambda *args: 'e()',
ops.Literal: _literal,
ops.ValueList: _value_list,
ops.Cast: _cast,
# for more than 2 args this should be arrayGreatest|Least(array([]))
# because clickhouse's greatest and least doesn't support varargs
ops.Greatest: _varargs('greatest'),
ops.Least: _varargs('least'),
ops.Where: _fixed_arity('if', 3),
ops.Between: _between,
ops.Contains: _binary_infix_op('IN'),
ops.NotContains: _binary_infix_op('NOT IN'),
ops.SimpleCase: _simple_case,
ops.SearchedCase: _searched_case,
ops.TableColumn: _table_column,
ops.TableArrayView: _table_array_view,
ops.DateAdd: _binary_infix_op('+'),
ops.DateSub: _binary_infix_op('-'),
ops.DateDiff: _binary_infix_op('-'),
ops.TimestampAdd: _binary_infix_op('+'),
ops.TimestampSub: _binary_infix_op('-'),
ops.TimestampDiff: _binary_infix_op('-'),
ops.TimestampFromUNIX: _timestamp_from_unix,
ops.ExistsSubquery: _exists_subquery,
ops.NotExistsSubquery: _exists_subquery,
ops.ArrayLength: _unary('length'),
}
def _raise_error(translator, expr, *args):
msg = "Clickhouse backend doesn't support {0} operation!"
op = expr.op()
raise com.UnsupportedOperationError(msg.format(type(op)))
def _null_literal(translator, expr):
return 'Null'
def _null_if_zero(translator, expr):
op = expr.op()
arg = op.args[0]
arg_ = translator.translate(arg)
return f'nullIf({arg_}, 0)'
def _zero_if_null(translator, expr):
op = expr.op()
arg = op.args[0]
arg_ = translator.translate(arg)
return f'ifNull({arg_}, 0)'
_undocumented_operations = {
ops.NullLiteral: _null_literal, # undocumented
ops.IsNull: _unary('isNull'),
ops.NotNull: _unary('isNotNull'),
ops.IfNull: _fixed_arity('ifNull', 2),
ops.NullIf: _fixed_arity('nullIf', 2),
ops.Coalesce: _varargs('coalesce'),
ops.NullIfZero: _null_if_zero,
ops.ZeroIfNull: _zero_if_null,
}
_unsupported_ops_list = [
ops.WindowOp,
ops.DecimalPrecision,
ops.DecimalScale,
ops.BaseConvert,
ops.CumulativeSum,
ops.CumulativeMin,
ops.CumulativeMax,
ops.CumulativeMean,
ops.CumulativeAny,
ops.CumulativeAll,
ops.IdenticalTo,
ops.RowNumber,
ops.DenseRank,
ops.MinRank,
ops.PercentRank,
ops.FirstValue,
ops.LastValue,
ops.NthValue,
ops.Lag,
ops.Lead,
ops.NTile,
]
_unsupported_ops = {k: _raise_error for k in _unsupported_ops_list}
operation_registry.update(_undocumented_operations)
operation_registry.update(_unsupported_ops)
operation_registry.update(_unary_ops)
operation_registry.update(_binary_infix_ops)
| cloudera/ibis | ibis/backends/clickhouse/registry.py | Python | apache-2.0 | 19,994 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from lxml import etree
from openerp.osv import fields, osv
import openerp.addons.decimal_precision as dp
from openerp.tools.translate import _
from openerp.tools import float_compare
from openerp.report import report_sxw
import openerp
class res_currency(osv.osv):
_inherit = "res.currency"
def _get_current_rate(self, cr, uid, ids, raise_on_no_rate=True, context=None):
if context is None:
context = {}
res = super(res_currency, self)._get_current_rate(cr, uid, ids, raise_on_no_rate, context=context)
if context.get('voucher_special_currency') in ids and context.get('voucher_special_currency_rate'):
res[context.get('voucher_special_currency')] = context.get('voucher_special_currency_rate')
return res
class account_voucher(osv.osv):
def _check_paid(self, cr, uid, ids, name, args, context=None):
res = {}
for voucher in self.browse(cr, uid, ids, context=context):
res[voucher.id] = any([((line.account_id.type, 'in', ('receivable', 'payable')) and line.reconcile_id) for line in voucher.move_ids])
return res
def _get_type(self, cr, uid, context=None):
if context is None:
context = {}
return context.get('type', False)
def _get_period(self, cr, uid, context=None):
if context is None: context = {}
if context.get('period_id', False):
return context.get('period_id')
periods = self.pool.get('account.period').find(cr, uid, context=context)
return periods and periods[0] or False
def _make_journal_search(self, cr, uid, ttype, context=None):
journal_pool = self.pool.get('account.journal')
return journal_pool.search(cr, uid, [('type', '=', ttype)], limit=1)
def _get_journal(self, cr, uid, context=None):
if context is None: context = {}
invoice_pool = self.pool.get('account.invoice')
journal_pool = self.pool.get('account.journal')
if context.get('invoice_id', False):
currency_id = invoice_pool.browse(cr, uid, context['invoice_id'], context=context).currency_id.id
journal_id = journal_pool.search(cr, uid, [('currency', '=', currency_id)], limit=1)
return journal_id and journal_id[0] or False
if context.get('journal_id', False):
return context.get('journal_id')
if not context.get('journal_id', False) and context.get('search_default_journal_id', False):
return context.get('search_default_journal_id')
ttype = context.get('type', 'bank')
if ttype in ('payment', 'receipt'):
ttype = 'bank'
res = self._make_journal_search(cr, uid, ttype, context=context)
return res and res[0] or False
def _get_tax(self, cr, uid, context=None):
if context is None: context = {}
journal_pool = self.pool.get('account.journal')
journal_id = context.get('journal_id', False)
if not journal_id:
ttype = context.get('type', 'bank')
res = journal_pool.search(cr, uid, [('type', '=', ttype)], limit=1)
if not res:
return False
journal_id = res[0]
if not journal_id:
return False
journal = journal_pool.browse(cr, uid, journal_id, context=context)
account_id = journal.default_credit_account_id or journal.default_debit_account_id
if account_id and account_id.tax_ids:
tax_id = account_id.tax_ids[0].id
return tax_id
return False
def _get_payment_rate_currency(self, cr, uid, context=None):
"""
Return the default value for field payment_rate_currency_id: the currency of the journal
if there is one, otherwise the currency of the user's company
"""
if context is None: context = {}
journal_pool = self.pool.get('account.journal')
journal_id = context.get('journal_id', False)
if journal_id:
journal = journal_pool.browse(cr, uid, journal_id, context=context)
if journal.currency:
return journal.currency.id
#no journal given in the context, use company currency as default
return self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.currency_id.id
def _get_currency(self, cr, uid, context=None):
if context is None: context = {}
journal_pool = self.pool.get('account.journal')
journal_id = context.get('journal_id', False)
if journal_id:
journal = journal_pool.browse(cr, uid, journal_id, context=context)
if journal.currency:
return journal.currency.id
return self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.currency_id.id
def _get_partner(self, cr, uid, context=None):
if context is None: context = {}
return context.get('partner_id', False)
def _get_reference(self, cr, uid, context=None):
if context is None: context = {}
return context.get('reference', False)
def _get_narration(self, cr, uid, context=None):
if context is None: context = {}
return context.get('narration', False)
def _get_amount(self, cr, uid, context=None):
if context is None:
context= {}
return context.get('amount', 0.0)
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
if context is None: context = {}
return [(r['id'], (r['number'] or _('Voucher'))) for r in self.read(cr, uid, ids, ['number'], context, load='_classic_write')]
def fields_view_get(self, cr, uid, view_id=None, view_type=False, context=None, toolbar=False, submenu=False):
mod_obj = self.pool.get('ir.model.data')
if context is None: context = {}
if view_type == 'form':
if not view_id and context.get('invoice_type'):
if context.get('invoice_type') in ('out_invoice', 'out_refund'):
result = mod_obj.get_object_reference(cr, uid, 'account_voucher', 'view_vendor_receipt_form')
else:
result = mod_obj.get_object_reference(cr, uid, 'account_voucher', 'view_vendor_payment_form')
result = result and result[1] or False
view_id = result
if not view_id and context.get('line_type'):
if context.get('line_type') == 'customer':
result = mod_obj.get_object_reference(cr, uid, 'account_voucher', 'view_vendor_receipt_form')
else:
result = mod_obj.get_object_reference(cr, uid, 'account_voucher', 'view_vendor_payment_form')
result = result and result[1] or False
view_id = result
res = super(account_voucher, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=submenu)
doc = etree.XML(res['arch'])
if context.get('type', 'sale') in ('purchase', 'payment'):
nodes = doc.xpath("//field[@name='partner_id']")
for node in nodes:
node.set('context', "{'default_customer': 0, 'search_default_supplier': 1, 'default_supplier': 1}")
if context.get('invoice_type','') in ('in_invoice', 'in_refund'):
node.set('string', _("Supplier"))
res['arch'] = etree.tostring(doc)
return res
def _compute_writeoff_amount(self, cr, uid, line_dr_ids, line_cr_ids, amount, type):
debit = credit = 0.0
sign = type == 'payment' and -1 or 1
for l in line_dr_ids:
if isinstance(l, dict):
debit += l['amount']
for l in line_cr_ids:
if isinstance(l, dict):
credit += l['amount']
return amount - sign * (credit - debit)
def onchange_line_ids(self, cr, uid, ids, line_dr_ids, line_cr_ids, amount, voucher_currency, type, context=None):
context = context or {}
if not line_dr_ids and not line_cr_ids:
return {'value':{'writeoff_amount': 0.0}}
# resolve lists of commands into lists of dicts
line_dr_ids = self.resolve_2many_commands(cr, uid, 'line_dr_ids', line_dr_ids, ['amount'], context)
line_cr_ids = self.resolve_2many_commands(cr, uid, 'line_cr_ids', line_cr_ids, ['amount'], context)
#compute the field is_multi_currency that is used to hide/display options linked to secondary currency on the voucher
is_multi_currency = False
#loop on the voucher lines to see if one of these has a secondary currency. If yes, we need to see the options
for voucher_line in line_dr_ids+line_cr_ids:
line_id = voucher_line.get('id') and self.pool.get('account.voucher.line').browse(cr, uid, voucher_line['id'], context=context).move_line_id.id or voucher_line.get('move_line_id')
if line_id and self.pool.get('account.move.line').browse(cr, uid, line_id, context=context).currency_id:
is_multi_currency = True
break
return {'value': {'writeoff_amount': self._compute_writeoff_amount(cr, uid, line_dr_ids, line_cr_ids, amount, type), 'is_multi_currency': is_multi_currency}}
def _get_journal_currency(self, cr, uid, ids, name, args, context=None):
res = {}
for voucher in self.browse(cr, uid, ids, context=context):
res[voucher.id] = voucher.journal_id.currency and voucher.journal_id.currency.id or voucher.company_id.currency_id.id
return res
def _get_writeoff_amount(self, cr, uid, ids, name, args, context=None):
if not ids: return {}
currency_obj = self.pool.get('res.currency')
res = {}
debit = credit = 0.0
for voucher in self.browse(cr, uid, ids, context=context):
sign = voucher.type == 'payment' and -1 or 1
for l in voucher.line_dr_ids:
debit += l.amount
for l in voucher.line_cr_ids:
credit += l.amount
currency = voucher.currency_id or voucher.company_id.currency_id
res[voucher.id] = currency_obj.round(cr, uid, currency, voucher.amount - sign * (credit - debit))
return res
def _paid_amount_in_company_currency(self, cr, uid, ids, name, args, context=None):
if context is None:
context = {}
res = {}
ctx = context.copy()
for v in self.browse(cr, uid, ids, context=context):
ctx.update({'date': v.date})
#make a new call to browse in order to have the right date in the context, to get the right currency rate
voucher = self.browse(cr, uid, v.id, context=ctx)
ctx.update({
'voucher_special_currency': voucher.payment_rate_currency_id and voucher.payment_rate_currency_id.id or False,
'voucher_special_currency_rate': voucher.currency_id.rate * voucher.payment_rate,})
res[voucher.id] = self.pool.get('res.currency').compute(cr, uid, voucher.currency_id.id, voucher.company_id.currency_id.id, voucher.amount, context=ctx)
return res
def _get_currency_help_label(self, cr, uid, currency_id, payment_rate, payment_rate_currency_id, context=None):
"""
This function builds a string to help the users to understand the behavior of the payment rate fields they can specify on the voucher.
This string is only used to improve the usability in the voucher form view and has no other effect.
:param currency_id: the voucher currency
:type currency_id: integer
:param payment_rate: the value of the payment_rate field of the voucher
:type payment_rate: float
:param payment_rate_currency_id: the value of the payment_rate_currency_id field of the voucher
:type payment_rate_currency_id: integer
:return: translated string giving a tip on what's the effect of the current payment rate specified
:rtype: str
"""
rml_parser = report_sxw.rml_parse(cr, uid, 'currency_help_label', context=context)
currency_pool = self.pool.get('res.currency')
currency_str = payment_rate_str = ''
if currency_id:
currency_str = rml_parser.formatLang(1, currency_obj=currency_pool.browse(cr, uid, currency_id, context=context))
if payment_rate_currency_id:
payment_rate_str = rml_parser.formatLang(payment_rate, currency_obj=currency_pool.browse(cr, uid, payment_rate_currency_id, context=context))
currency_help_label = _('At the operation date, the exchange rate was\n%s = %s') % (currency_str, payment_rate_str)
return currency_help_label
def _fnct_currency_help_label(self, cr, uid, ids, name, args, context=None):
res = {}
for voucher in self.browse(cr, uid, ids, context=context):
res[voucher.id] = self._get_currency_help_label(cr, uid, voucher.currency_id.id, voucher.payment_rate, voucher.payment_rate_currency_id.id, context=context)
return res
_name = 'account.voucher'
_description = 'Accounting Voucher'
_inherit = ['mail.thread']
_order = "date desc, id desc"
# _rec_name = 'number'
_track = {
'state': {
'account_voucher.mt_voucher_state_change': lambda self, cr, uid, obj, ctx=None: True,
},
}
_columns = {
'type':fields.selection([
('sale','Sale'),
('purchase','Purchase'),
('payment','Payment'),
('receipt','Receipt'),
],'Default Type', readonly=True, states={'draft':[('readonly',False)]}),
'name':fields.char('Memo', readonly=True, states={'draft':[('readonly',False)]}),
'date':fields.date('Date', readonly=True, select=True, states={'draft':[('readonly',False)]},
help="Effective date for accounting entries", copy=False),
'journal_id':fields.many2one('account.journal', 'Journal', required=True, readonly=True, states={'draft':[('readonly',False)]}),
'account_id':fields.many2one('account.account', 'Account', required=True, readonly=True, states={'draft':[('readonly',False)]}),
'line_ids':fields.one2many('account.voucher.line', 'voucher_id', 'Voucher Lines',
readonly=True, copy=True,
states={'draft':[('readonly',False)]}),
'line_cr_ids':fields.one2many('account.voucher.line','voucher_id','Credits',
domain=[('type','=','cr')], context={'default_type':'cr'}, readonly=True, states={'draft':[('readonly',False)]}),
'line_dr_ids':fields.one2many('account.voucher.line','voucher_id','Debits',
domain=[('type','=','dr')], context={'default_type':'dr'}, readonly=True, states={'draft':[('readonly',False)]}),
'period_id': fields.many2one('account.period', 'Period', required=True, readonly=True, states={'draft':[('readonly',False)]}),
'narration':fields.text('Notes', readonly=True, states={'draft':[('readonly',False)]}),
'currency_id': fields.function(_get_journal_currency, type='many2one', relation='res.currency', string='Currency', readonly=True, required=True),
'company_id': fields.many2one('res.company', 'Company', required=True, readonly=True, states={'draft':[('readonly',False)]}),
'state':fields.selection(
[('draft','Draft'),
('cancel','Cancelled'),
('proforma','Pro-forma'),
('posted','Posted')
], 'Status', readonly=True, track_visibility='onchange', copy=False,
help=' * The \'Draft\' status is used when a user is encoding a new and unconfirmed Voucher. \
\n* The \'Pro-forma\' when voucher is in Pro-forma status,voucher does not have an voucher number. \
\n* The \'Posted\' status is used when user create voucher,a voucher number is generated and voucher entries are created in account \
\n* The \'Cancelled\' status is used when user cancel voucher.'),
'amount': fields.float('Total', digits_compute=dp.get_precision('Account'), required=True, readonly=True, states={'draft':[('readonly',False)]}),
'tax_amount':fields.float('Tax Amount', digits_compute=dp.get_precision('Account'), readonly=True),
'reference': fields.char('Ref #', readonly=True, states={'draft':[('readonly',False)]},
help="Transaction reference number.", copy=False),
'number': fields.char('Number', readonly=True, copy=False),
'move_id':fields.many2one('account.move', 'Account Entry', copy=False),
'move_ids': fields.related('move_id','line_id', type='one2many', relation='account.move.line', string='Journal Items', readonly=True),
'partner_id':fields.many2one('res.partner', 'Partner', change_default=1, readonly=True, states={'draft':[('readonly',False)]}),
'audit': fields.related('move_id','to_check', type='boolean', help='Check this box if you are unsure of that journal entry and if you want to note it as \'to be reviewed\' by an accounting expert.', relation='account.move', string='To Review'),
'paid': fields.function(_check_paid, string='Paid', type='boolean', help="The Voucher has been totally paid."),
'pay_now':fields.selection([
('pay_now','Pay Directly'),
('pay_later','Pay Later or Group Funds'),
],'Payment', select=True, readonly=True, states={'draft':[('readonly',False)]}),
'tax_id': fields.many2one('account.tax', 'Tax', readonly=True, states={'draft':[('readonly',False)]}, domain=[('price_include','=', False)], help="Only for tax excluded from price"),
'pre_line':fields.boolean('Previous Payments ?', required=False),
'date_due': fields.date('Due Date', readonly=True, select=True, states={'draft':[('readonly',False)]}),
'payment_option':fields.selection([
('without_writeoff', 'Keep Open'),
('with_writeoff', 'Reconcile Payment Balance'),
], 'Payment Difference', required=True, readonly=True, states={'draft': [('readonly', False)]}, help="This field helps you to choose what you want to do with the eventual difference between the paid amount and the sum of allocated amounts. You can either choose to keep open this difference on the partner's account, or reconcile it with the payment(s)"),
'writeoff_acc_id': fields.many2one('account.account', 'Counterpart Account', readonly=True, states={'draft': [('readonly', False)]}),
'comment': fields.char('Counterpart Comment', required=True, readonly=True, states={'draft': [('readonly', False)]}),
'analytic_id': fields.many2one('account.analytic.account','Write-Off Analytic Account', readonly=True, states={'draft': [('readonly', False)]}),
'writeoff_amount': fields.function(_get_writeoff_amount, string='Difference Amount', type='float', readonly=True, help="Computed as the difference between the amount stated in the voucher and the sum of allocation on the voucher lines."),
'payment_rate_currency_id': fields.many2one('res.currency', 'Payment Rate Currency', required=True, readonly=True, states={'draft':[('readonly',False)]}),
'payment_rate': fields.float('Exchange Rate', digits=(12,6), required=True, readonly=True, states={'draft': [('readonly', False)]},
help='The specific rate that will be used, in this voucher, between the selected currency (in \'Payment Rate Currency\' field) and the voucher currency.'),
'paid_amount_in_company_currency': fields.function(_paid_amount_in_company_currency, string='Paid Amount in Company Currency', type='float', readonly=True),
'is_multi_currency': fields.boolean('Multi Currency Voucher', help='Fields with internal purpose only that depicts if the voucher is a multi currency one or not'),
'currency_help_label': fields.function(_fnct_currency_help_label, type='text', string="Helping Sentence", help="This sentence helps you to know how to specify the payment rate by giving you the direct effect it has"),
}
_defaults = {
'period_id': _get_period,
'partner_id': _get_partner,
'journal_id':_get_journal,
'currency_id': _get_currency,
'reference': _get_reference,
'narration':_get_narration,
'amount': _get_amount,
'type':_get_type,
'state': 'draft',
'pay_now': 'pay_now',
'name': '',
'date': lambda *a: time.strftime('%Y-%m-%d'),
'company_id': lambda self,cr,uid,c: self.pool.get('res.company')._company_default_get(cr, uid, 'account.voucher',context=c),
'tax_id': _get_tax,
'payment_option': 'without_writeoff',
'comment': _('Write-Off'),
'payment_rate': 1.0,
'payment_rate_currency_id': _get_payment_rate_currency,
}
def compute_tax(self, cr, uid, ids, context=None):
tax_pool = self.pool.get('account.tax')
partner_pool = self.pool.get('res.partner')
position_pool = self.pool.get('account.fiscal.position')
voucher_line_pool = self.pool.get('account.voucher.line')
voucher_pool = self.pool.get('account.voucher')
if context is None: context = {}
for voucher in voucher_pool.browse(cr, uid, ids, context=context):
voucher_amount = 0.0
for line in voucher.line_ids:
voucher_amount += line.untax_amount or line.amount
line.amount = line.untax_amount or line.amount
voucher_line_pool.write(cr, uid, [line.id], {'amount':line.amount, 'untax_amount':line.untax_amount})
if not voucher.tax_id:
self.write(cr, uid, [voucher.id], {'amount':voucher_amount, 'tax_amount':0.0})
continue
tax = [tax_pool.browse(cr, uid, voucher.tax_id.id, context=context)]
partner = partner_pool.browse(cr, uid, voucher.partner_id.id, context=context) or False
taxes = position_pool.map_tax(cr, uid, partner and partner.property_account_position or False, tax)
tax = tax_pool.browse(cr, uid, taxes, context=context)
total = voucher_amount
total_tax = 0.0
if not tax[0].price_include:
for line in voucher.line_ids:
for tax_line in tax_pool.compute_all(cr, uid, tax, line.amount, 1).get('taxes', []):
total_tax += tax_line.get('amount', 0.0)
total += total_tax
else:
for line in voucher.line_ids:
line_total = 0.0
line_tax = 0.0
for tax_line in tax_pool.compute_all(cr, uid, tax, line.untax_amount or line.amount, 1).get('taxes', []):
line_tax += tax_line.get('amount', 0.0)
line_total += tax_line.get('price_unit')
total_tax += line_tax
untax_amount = line.untax_amount or line.amount
voucher_line_pool.write(cr, uid, [line.id], {'amount':line_total, 'untax_amount':untax_amount})
self.write(cr, uid, [voucher.id], {'amount':total, 'tax_amount':total_tax})
return True
def onchange_price(self, cr, uid, ids, line_ids, tax_id, partner_id=False, context=None):
context = context or {}
tax_pool = self.pool.get('account.tax')
partner_pool = self.pool.get('res.partner')
position_pool = self.pool.get('account.fiscal.position')
if not line_ids:
line_ids = []
res = {
'tax_amount': False,
'amount': False,
}
voucher_total = 0.0
# resolve the list of commands into a list of dicts
line_ids = self.resolve_2many_commands(cr, uid, 'line_ids', line_ids, ['amount'], context)
total_tax = 0.0
for line in line_ids:
line_amount = 0.0
line_amount = line.get('amount',0.0)
if tax_id:
tax = [tax_pool.browse(cr, uid, tax_id, context=context)]
if partner_id:
partner = partner_pool.browse(cr, uid, partner_id, context=context) or False
taxes = position_pool.map_tax(cr, uid, partner and partner.property_account_position or False, tax)
tax = tax_pool.browse(cr, uid, taxes, context=context)
if not tax[0].price_include:
for tax_line in tax_pool.compute_all(cr, uid, tax, line_amount, 1).get('taxes', []):
total_tax += tax_line.get('amount')
voucher_total += line_amount
total = voucher_total + total_tax
res.update({
'amount': total or voucher_total,
'tax_amount': total_tax
})
return {
'value': res
}
def onchange_term_id(self, cr, uid, ids, term_id, amount):
term_pool = self.pool.get('account.payment.term')
terms = False
due_date = False
default = {'date_due':False}
if term_id and amount:
terms = term_pool.compute(cr, uid, term_id, amount)
if terms:
due_date = terms[-1][0]
default.update({
'date_due':due_date
})
return {'value':default}
def onchange_journal_voucher(self, cr, uid, ids, line_ids=False, tax_id=False, price=0.0, partner_id=False, journal_id=False, ttype=False, company_id=False, context=None):
"""price
Returns a dict that contains new values and context
@param partner_id: latest value from user input for field partner_id
@param args: other arguments
@param context: context arguments, like lang, time zone
@return: Returns a dict which contains new values, and context
"""
default = {
'value':{},
}
if not partner_id or not journal_id:
return default
partner_pool = self.pool.get('res.partner')
journal_pool = self.pool.get('account.journal')
journal = journal_pool.browse(cr, uid, journal_id, context=context)
partner = partner_pool.browse(cr, uid, partner_id, context=context)
account_id = False
tr_type = False
if journal.type in ('sale','sale_refund'):
account_id = partner.property_account_receivable.id
tr_type = 'sale'
elif journal.type in ('purchase', 'purchase_refund','expense'):
account_id = partner.property_account_payable.id
tr_type = 'purchase'
else:
if not journal.default_credit_account_id or not journal.default_debit_account_id:
raise osv.except_osv(_('Error!'), _('Please define default credit/debit accounts on the journal "%s".') % (journal.name))
account_id = journal.default_credit_account_id.id or journal.default_debit_account_id.id
tr_type = 'receipt'
default['value']['account_id'] = account_id
default['value']['type'] = ttype or tr_type
vals = self.onchange_journal(cr, uid, ids, journal_id, line_ids, tax_id, partner_id, time.strftime('%Y-%m-%d'), price, ttype, company_id, context)
default['value'].update(vals.get('value'))
return default
def onchange_rate(self, cr, uid, ids, rate, amount, currency_id, payment_rate_currency_id, company_id, context=None):
res = {'value': {'paid_amount_in_company_currency': amount, 'currency_help_label': self._get_currency_help_label(cr, uid, currency_id, rate, payment_rate_currency_id, context=context)}}
if rate and amount and currency_id:
company_currency = self.pool.get('res.company').browse(cr, uid, company_id, context=context).currency_id
#context should contain the date, the payment currency and the payment rate specified on the voucher
amount_in_company_currency = self.pool.get('res.currency').compute(cr, uid, currency_id, company_currency.id, amount, context=context)
res['value']['paid_amount_in_company_currency'] = amount_in_company_currency
return res
def onchange_amount(self, cr, uid, ids, amount, rate, partner_id, journal_id, currency_id, ttype, date, payment_rate_currency_id, company_id, context=None):
if context is None:
context = {}
ctx = context.copy()
ctx.update({'date': date})
#read the voucher rate with the right date in the context
currency_id = currency_id or self.pool.get('res.company').browse(cr, uid, company_id, context=ctx).currency_id.id
voucher_rate = self.pool.get('res.currency').read(cr, uid, [currency_id], ['rate'], context=ctx)[0]['rate']
ctx.update({
'voucher_special_currency': payment_rate_currency_id,
'voucher_special_currency_rate': rate * voucher_rate})
res = self.recompute_voucher_lines(cr, uid, ids, partner_id, journal_id, amount, currency_id, ttype, date, context=ctx)
vals = self.onchange_rate(cr, uid, ids, rate, amount, currency_id, payment_rate_currency_id, company_id, context=ctx)
for key in vals.keys():
res[key].update(vals[key])
return res
def recompute_payment_rate(self, cr, uid, ids, vals, currency_id, date, ttype, journal_id, amount, context=None):
if context is None:
context = {}
#on change of the journal, we need to set also the default value for payment_rate and payment_rate_currency_id
currency_obj = self.pool.get('res.currency')
journal = self.pool.get('account.journal').browse(cr, uid, journal_id, context=context)
company_id = journal.company_id.id
payment_rate = 1.0
currency_id = currency_id or journal.company_id.currency_id.id
payment_rate_currency_id = currency_id
ctx = context.copy()
ctx.update({'date': date})
o2m_to_loop = False
if ttype == 'receipt':
o2m_to_loop = 'line_cr_ids'
elif ttype == 'payment':
o2m_to_loop = 'line_dr_ids'
if o2m_to_loop and 'value' in vals and o2m_to_loop in vals['value']:
for voucher_line in vals['value'][o2m_to_loop]:
if not isinstance(voucher_line, dict):
continue
if voucher_line['currency_id'] != currency_id:
# we take as default value for the payment_rate_currency_id, the currency of the first invoice that
# is not in the voucher currency
payment_rate_currency_id = voucher_line['currency_id']
tmp = currency_obj.browse(cr, uid, payment_rate_currency_id, context=ctx).rate
payment_rate = tmp / currency_obj.browse(cr, uid, currency_id, context=ctx).rate
break
vals['value'].update({
'payment_rate': payment_rate,
'currency_id': currency_id,
'payment_rate_currency_id': payment_rate_currency_id
})
#read the voucher rate with the right date in the context
voucher_rate = self.pool.get('res.currency').read(cr, uid, [currency_id], ['rate'], context=ctx)[0]['rate']
ctx.update({
'voucher_special_currency_rate': payment_rate * voucher_rate,
'voucher_special_currency': payment_rate_currency_id})
res = self.onchange_rate(cr, uid, ids, payment_rate, amount, currency_id, payment_rate_currency_id, company_id, context=ctx)
for key in res.keys():
vals[key].update(res[key])
return vals
def basic_onchange_partner(self, cr, uid, ids, partner_id, journal_id, ttype, context=None):
partner_pool = self.pool.get('res.partner')
journal_pool = self.pool.get('account.journal')
res = {'value': {'account_id': False}}
if not partner_id or not journal_id:
return res
journal = journal_pool.browse(cr, uid, journal_id, context=context)
partner = partner_pool.browse(cr, uid, partner_id, context=context)
account_id = False
if journal.type in ('sale','sale_refund'):
account_id = partner.property_account_receivable.id
elif journal.type in ('purchase', 'purchase_refund','expense'):
account_id = partner.property_account_payable.id
else:
account_id = journal.default_credit_account_id.id or journal.default_debit_account_id.id
res['value']['account_id'] = account_id
return res
def onchange_partner_id(self, cr, uid, ids, partner_id, journal_id, amount, currency_id, ttype, date, context=None):
if not journal_id:
return {}
if context is None:
context = {}
#TODO: comment me and use me directly in the sales/purchases views
res = self.basic_onchange_partner(cr, uid, ids, partner_id, journal_id, ttype, context=context)
if ttype in ['sale', 'purchase']:
return res
ctx = context.copy()
# not passing the payment_rate currency and the payment_rate in the context but it's ok because they are reset in recompute_payment_rate
ctx.update({'date': date})
vals = self.recompute_voucher_lines(cr, uid, ids, partner_id, journal_id, amount, currency_id, ttype, date, context=ctx)
vals2 = self.recompute_payment_rate(cr, uid, ids, vals, currency_id, date, ttype, journal_id, amount, context=context)
for key in vals.keys():
res[key].update(vals[key])
for key in vals2.keys():
res[key].update(vals2[key])
#TODO: can probably be removed now
#TODO: onchange_partner_id() should not returns [pre_line, line_dr_ids, payment_rate...] for type sale, and not
# [pre_line, line_cr_ids, payment_rate...] for type purchase.
# We should definitively split account.voucher object in two and make distinct on_change functions. In the
# meanwhile, bellow lines must be there because the fields aren't present in the view, what crashes if the
# onchange returns a value for them
if ttype == 'sale':
del(res['value']['line_dr_ids'])
del(res['value']['pre_line'])
del(res['value']['payment_rate'])
elif ttype == 'purchase':
del(res['value']['line_cr_ids'])
del(res['value']['pre_line'])
del(res['value']['payment_rate'])
return res
def recompute_voucher_lines(self, cr, uid, ids, partner_id, journal_id, price, currency_id, ttype, date, context=None):
"""
Returns a dict that contains new values and context
@param partner_id: latest value from user input for field partner_id
@param args: other arguments
@param context: context arguments, like lang, time zone
@return: Returns a dict which contains new values, and context
"""
def _remove_noise_in_o2m():
"""if the line is partially reconciled, then we must pay attention to display it only once and
in the good o2m.
This function returns True if the line is considered as noise and should not be displayed
"""
if line.reconcile_partial_id:
if currency_id == line.currency_id.id:
if line.amount_residual_currency <= 0:
return True
else:
if line.amount_residual <= 0:
return True
return False
if context is None:
context = {}
context_multi_currency = context.copy()
currency_pool = self.pool.get('res.currency')
move_line_pool = self.pool.get('account.move.line')
partner_pool = self.pool.get('res.partner')
journal_pool = self.pool.get('account.journal')
line_pool = self.pool.get('account.voucher.line')
#set default values
default = {
'value': {'line_dr_ids': [], 'line_cr_ids': [], 'pre_line': False},
}
# drop existing lines
line_ids = ids and line_pool.search(cr, uid, [('voucher_id', '=', ids[0])])
for line in line_pool.browse(cr, uid, line_ids, context=context):
if line.type == 'cr':
default['value']['line_cr_ids'].append((2, line.id))
else:
default['value']['line_dr_ids'].append((2, line.id))
if not partner_id or not journal_id:
return default
journal = journal_pool.browse(cr, uid, journal_id, context=context)
partner = partner_pool.browse(cr, uid, partner_id, context=context)
currency_id = currency_id or journal.company_id.currency_id.id
total_credit = 0.0
total_debit = 0.0
account_type = None
if context.get('account_id'):
account_type = self.pool['account.account'].browse(cr, uid, context['account_id'], context=context).type
if ttype == 'payment':
if not account_type:
account_type = 'payable'
total_debit = price or 0.0
else:
total_credit = price or 0.0
if not account_type:
account_type = 'receivable'
if not context.get('move_line_ids', False):
ids = move_line_pool.search(cr, uid, [('state','=','valid'), ('account_id.type', '=', account_type), ('reconcile_id', '=', False), ('partner_id', '=', partner_id)], context=context)
else:
ids = context['move_line_ids']
invoice_id = context.get('invoice_id', False)
company_currency = journal.company_id.currency_id.id
move_lines_found = []
#order the lines by most old first
ids.reverse()
account_move_lines = move_line_pool.browse(cr, uid, ids, context=context)
#compute the total debit/credit and look for a matching open amount or invoice
for line in account_move_lines:
if _remove_noise_in_o2m():
continue
if invoice_id:
if line.invoice.id == invoice_id:
#if the invoice linked to the voucher line is equal to the invoice_id in context
#then we assign the amount on that line, whatever the other voucher lines
move_lines_found.append(line.id)
elif currency_id == company_currency:
#otherwise treatments is the same but with other field names
if line.amount_residual == price:
#if the amount residual is equal the amount voucher, we assign it to that voucher
#line, whatever the other voucher lines
move_lines_found.append(line.id)
break
#otherwise we will split the voucher amount on each line (by most old first)
total_credit += line.credit or 0.0
total_debit += line.debit or 0.0
elif currency_id == line.currency_id.id:
if line.amount_residual_currency == price:
move_lines_found.append(line.id)
break
total_credit += line.credit and line.amount_currency or 0.0
total_debit += line.debit and line.amount_currency or 0.0
remaining_amount = price
#voucher line creation
for line in account_move_lines:
if _remove_noise_in_o2m():
continue
if line.currency_id and currency_id == line.currency_id.id:
amount_original = abs(line.amount_currency)
amount_unreconciled = abs(line.amount_residual_currency)
else:
#always use the amount booked in the company currency as the basis of the conversion into the voucher currency
amount_original = currency_pool.compute(cr, uid, company_currency, currency_id, line.credit or line.debit or 0.0, context=context_multi_currency)
amount_unreconciled = currency_pool.compute(cr, uid, company_currency, currency_id, abs(line.amount_residual), context=context_multi_currency)
line_currency_id = line.currency_id and line.currency_id.id or company_currency
rs = {
'name':line.move_id.name,
'type': line.credit and 'dr' or 'cr',
'move_line_id':line.id,
'account_id':line.account_id.id,
'amount_original': amount_original,
'amount': (line.id in move_lines_found) and min(abs(remaining_amount), amount_unreconciled) or 0.0,
'date_original':line.date,
'date_due':line.date_maturity,
'amount_unreconciled': amount_unreconciled,
'currency_id': line_currency_id,
}
remaining_amount -= rs['amount']
#in case a corresponding move_line hasn't been found, we now try to assign the voucher amount
#on existing invoices: we split voucher amount by most old first, but only for lines in the same currency
if not move_lines_found:
if currency_id == line_currency_id:
if line.credit:
amount = min(amount_unreconciled, abs(total_debit))
rs['amount'] = amount
total_debit -= amount
else:
amount = min(amount_unreconciled, abs(total_credit))
rs['amount'] = amount
total_credit -= amount
if rs['amount_unreconciled'] == rs['amount']:
rs['reconcile'] = True
if rs['type'] == 'cr':
default['value']['line_cr_ids'].append(rs)
else:
default['value']['line_dr_ids'].append(rs)
if len(default['value']['line_cr_ids']) > 0:
default['value']['pre_line'] = 1
elif len(default['value']['line_dr_ids']) > 0:
default['value']['pre_line'] = 1
default['value']['writeoff_amount'] = self._compute_writeoff_amount(cr, uid, default['value']['line_dr_ids'], default['value']['line_cr_ids'], price, ttype)
return default
def onchange_payment_rate_currency(self, cr, uid, ids, currency_id, payment_rate, payment_rate_currency_id, date, amount, company_id, context=None):
if context is None:
context = {}
res = {'value': {}}
if currency_id:
#set the default payment rate of the voucher and compute the paid amount in company currency
ctx = context.copy()
ctx.update({'date': date})
#read the voucher rate with the right date in the context
voucher_rate = self.pool.get('res.currency').read(cr, uid, [currency_id], ['rate'], context=ctx)[0]['rate']
ctx.update({
'voucher_special_currency_rate': payment_rate * voucher_rate,
'voucher_special_currency': payment_rate_currency_id})
vals = self.onchange_rate(cr, uid, ids, payment_rate, amount, currency_id, payment_rate_currency_id, company_id, context=ctx)
for key in vals.keys():
res[key].update(vals[key])
return res
def onchange_date(self, cr, uid, ids, date, currency_id, payment_rate_currency_id, amount, company_id, context=None):
"""
@param date: latest value from user input for field date
@param args: other arguments
@param context: context arguments, like lang, time zone
@return: Returns a dict which contains new values, and context
"""
if context is None:
context ={}
res = {'value': {}}
#set the period of the voucher
period_pool = self.pool.get('account.period')
currency_obj = self.pool.get('res.currency')
ctx = context.copy()
ctx.update({'company_id': company_id, 'account_period_prefer_normal': True})
voucher_currency_id = currency_id or self.pool.get('res.company').browse(cr, uid, company_id, context=ctx).currency_id.id
pids = period_pool.find(cr, uid, date, context=ctx)
if pids:
res['value'].update({'period_id':pids[0]})
if payment_rate_currency_id:
ctx.update({'date': date})
payment_rate = 1.0
if payment_rate_currency_id != currency_id:
tmp = currency_obj.browse(cr, uid, payment_rate_currency_id, context=ctx).rate
payment_rate = tmp / currency_obj.browse(cr, uid, voucher_currency_id, context=ctx).rate
vals = self.onchange_payment_rate_currency(cr, uid, ids, voucher_currency_id, payment_rate, payment_rate_currency_id, date, amount, company_id, context=context)
vals['value'].update({'payment_rate': payment_rate})
for key in vals.keys():
res[key].update(vals[key])
return res
def onchange_journal(self, cr, uid, ids, journal_id, line_ids, tax_id, partner_id, date, amount, ttype, company_id, context=None):
if context is None:
context = {}
if not journal_id:
return False
journal_pool = self.pool.get('account.journal')
journal = journal_pool.browse(cr, uid, journal_id, context=context)
account_id = journal.default_credit_account_id or journal.default_debit_account_id
tax_id = False
if account_id and account_id.tax_ids:
tax_id = account_id.tax_ids[0].id
vals = {'value':{} }
if ttype in ('sale', 'purchase'):
vals = self.onchange_price(cr, uid, ids, line_ids, tax_id, partner_id, context)
vals['value'].update({'tax_id':tax_id,'amount': amount})
currency_id = False
if journal.currency:
currency_id = journal.currency.id
else:
currency_id = journal.company_id.currency_id.id
vals['value'].update({'currency_id': currency_id, 'payment_rate_currency_id': currency_id})
#in case we want to register the payment directly from an invoice, it's confusing to allow to switch the journal
#without seeing that the amount is expressed in the journal currency, and not in the invoice currency. So to avoid
#this common mistake, we simply reset the amount to 0 if the currency is not the invoice currency.
if context.get('payment_expected_currency') and currency_id != context.get('payment_expected_currency'):
vals['value']['amount'] = 0
amount = 0
if partner_id:
res = self.onchange_partner_id(cr, uid, ids, partner_id, journal_id, amount, currency_id, ttype, date, context)
for key in res.keys():
vals[key].update(res[key])
return vals
def button_proforma_voucher(self, cr, uid, ids, context=None):
self.signal_workflow(cr, uid, ids, 'proforma_voucher')
return {'type': 'ir.actions.act_window_close'}
def proforma_voucher(self, cr, uid, ids, context=None):
self.action_move_line_create(cr, uid, ids, context=context)
return True
def action_cancel_draft(self, cr, uid, ids, context=None):
self.create_workflow(cr, uid, ids)
self.write(cr, uid, ids, {'state':'draft'})
return True
def cancel_voucher(self, cr, uid, ids, context=None):
reconcile_pool = self.pool.get('account.move.reconcile')
move_pool = self.pool.get('account.move')
move_line_pool = self.pool.get('account.move.line')
for voucher in self.browse(cr, uid, ids, context=context):
# refresh to make sure you don't unlink an already removed move
voucher.refresh()
for line in voucher.move_ids:
# refresh to make sure you don't unreconcile an already unreconciled entry
line.refresh()
if line.reconcile_id:
move_lines = [move_line.id for move_line in line.reconcile_id.line_id]
move_lines.remove(line.id)
reconcile_pool.unlink(cr, uid, [line.reconcile_id.id])
if len(move_lines) >= 2:
move_line_pool.reconcile_partial(cr, uid, move_lines, 'auto',context=context)
if voucher.move_id:
move_pool.button_cancel(cr, uid, [voucher.move_id.id])
move_pool.unlink(cr, uid, [voucher.move_id.id])
res = {
'state':'cancel',
'move_id':False,
}
self.write(cr, uid, ids, res)
return True
def unlink(self, cr, uid, ids, context=None):
for t in self.read(cr, uid, ids, ['state'], context=context):
if t['state'] not in ('draft', 'cancel'):
raise osv.except_osv(_('Invalid Action!'), _('Cannot delete voucher(s) which are already opened or paid.'))
return super(account_voucher, self).unlink(cr, uid, ids, context=context)
def onchange_payment(self, cr, uid, ids, pay_now, journal_id, partner_id, ttype='sale'):
res = {}
if not partner_id:
return res
res = {}
partner_pool = self.pool.get('res.partner')
journal_pool = self.pool.get('account.journal')
if pay_now == 'pay_later':
partner = partner_pool.browse(cr, uid, partner_id)
journal = journal_pool.browse(cr, uid, journal_id)
if journal.type in ('sale','sale_refund'):
account_id = partner.property_account_receivable.id
elif journal.type in ('purchase', 'purchase_refund','expense'):
account_id = partner.property_account_payable.id
else:
account_id = journal.default_credit_account_id.id or journal.default_debit_account_id.id
if account_id:
res['account_id'] = account_id
return {'value':res}
def _sel_context(self, cr, uid, voucher_id, context=None):
"""
Select the context to use accordingly if it needs to be multicurrency or not.
:param voucher_id: Id of the actual voucher
:return: The returned context will be the same as given in parameter if the voucher currency is the same
than the company currency, otherwise it's a copy of the parameter with an extra key 'date' containing
the date of the voucher.
:rtype: dict
"""
company_currency = self._get_company_currency(cr, uid, voucher_id, context)
current_currency = self._get_current_currency(cr, uid, voucher_id, context)
if current_currency <> company_currency:
context_multi_currency = context.copy()
voucher = self.pool.get('account.voucher').browse(cr, uid, voucher_id, context)
context_multi_currency.update({'date': voucher.date})
return context_multi_currency
return context
def first_move_line_get(self, cr, uid, voucher_id, move_id, company_currency, current_currency, context=None):
'''
Return a dict to be use to create the first account move line of given voucher.
:param voucher_id: Id of voucher what we are creating account_move.
:param move_id: Id of account move where this line will be added.
:param company_currency: id of currency of the company to which the voucher belong
:param current_currency: id of currency of the voucher
:return: mapping between fieldname and value of account move line to create
:rtype: dict
'''
voucher = self.pool.get('account.voucher').browse(cr,uid,voucher_id,context)
debit = credit = 0.0
# TODO: is there any other alternative then the voucher type ??
# ANSWER: We can have payment and receipt "In Advance".
# TODO: Make this logic available.
# -for sale, purchase we have but for the payment and receipt we do not have as based on the bank/cash journal we can not know its payment or receipt
if voucher.type in ('purchase', 'payment'):
credit = voucher.paid_amount_in_company_currency
elif voucher.type in ('sale', 'receipt'):
debit = voucher.paid_amount_in_company_currency
if debit < 0: credit = -debit; debit = 0.0
if credit < 0: debit = -credit; credit = 0.0
sign = debit - credit < 0 and -1 or 1
#set the first line of the voucher
move_line = {
'name': voucher.name or '/',
'debit': debit,
'credit': credit,
'account_id': voucher.account_id.id,
'move_id': move_id,
'journal_id': voucher.journal_id.id,
'period_id': voucher.period_id.id,
'partner_id': voucher.partner_id.id,
'currency_id': company_currency <> current_currency and current_currency or False,
'amount_currency': (sign * abs(voucher.amount) # amount < 0 for refunds
if company_currency != current_currency else 0.0),
'date': voucher.date,
'date_maturity': voucher.date_due
}
return move_line
def account_move_get(self, cr, uid, voucher_id, context=None):
'''
This method prepare the creation of the account move related to the given voucher.
:param voucher_id: Id of voucher for which we are creating account_move.
:return: mapping between fieldname and value of account move to create
:rtype: dict
'''
seq_obj = self.pool.get('ir.sequence')
voucher = self.pool.get('account.voucher').browse(cr,uid,voucher_id,context)
if voucher.number:
name = voucher.number
elif voucher.journal_id.sequence_id:
if not voucher.journal_id.sequence_id.active:
raise osv.except_osv(_('Configuration Error !'),
_('Please activate the sequence of selected journal !'))
c = dict(context)
c.update({'fiscalyear_id': voucher.period_id.fiscalyear_id.id})
name = seq_obj.next_by_id(cr, uid, voucher.journal_id.sequence_id.id, context=c)
else:
raise osv.except_osv(_('Error!'),
_('Please define a sequence on the journal.'))
if not voucher.reference:
ref = name.replace('/','')
else:
ref = voucher.reference
move = {
'name': name,
'journal_id': voucher.journal_id.id,
'narration': voucher.narration,
'date': voucher.date,
'ref': ref,
'period_id': voucher.period_id.id,
}
return move
def _get_exchange_lines(self, cr, uid, line, move_id, amount_residual, company_currency, current_currency, context=None):
'''
Prepare the two lines in company currency due to currency rate difference.
:param line: browse record of the voucher.line for which we want to create currency rate difference accounting
entries
:param move_id: Account move wher the move lines will be.
:param amount_residual: Amount to be posted.
:param company_currency: id of currency of the company to which the voucher belong
:param current_currency: id of currency of the voucher
:return: the account move line and its counterpart to create, depicted as mapping between fieldname and value
:rtype: tuple of dict
'''
if amount_residual > 0:
account_id = line.voucher_id.company_id.expense_currency_exchange_account_id
if not account_id:
model, action_id = self.pool['ir.model.data'].get_object_reference(cr, uid, 'account', 'action_account_form')
msg = _("You should configure the 'Loss Exchange Rate Account' to manage automatically the booking of accounting entries related to differences between exchange rates.")
raise openerp.exceptions.RedirectWarning(msg, action_id, _('Go to the configuration panel'))
else:
account_id = line.voucher_id.company_id.income_currency_exchange_account_id
if not account_id:
model, action_id = self.pool['ir.model.data'].get_object_reference(cr, uid, 'account', 'action_account_form')
msg = _("You should configure the 'Gain Exchange Rate Account' to manage automatically the booking of accounting entries related to differences between exchange rates.")
raise openerp.exceptions.RedirectWarning(msg, action_id, _('Go to the configuration panel'))
# Even if the amount_currency is never filled, we need to pass the foreign currency because otherwise
# the receivable/payable account may have a secondary currency, which render this field mandatory
if line.account_id.currency_id:
account_currency_id = line.account_id.currency_id.id
else:
account_currency_id = company_currency <> current_currency and current_currency or False
move_line = {
'journal_id': line.voucher_id.journal_id.id,
'period_id': line.voucher_id.period_id.id,
'name': _('change')+': '+(line.name or '/'),
'account_id': line.account_id.id,
'move_id': move_id,
'partner_id': line.voucher_id.partner_id.id,
'currency_id': account_currency_id,
'amount_currency': 0.0,
'quantity': 1,
'credit': amount_residual > 0 and amount_residual or 0.0,
'debit': amount_residual < 0 and -amount_residual or 0.0,
'date': line.voucher_id.date,
}
move_line_counterpart = {
'journal_id': line.voucher_id.journal_id.id,
'period_id': line.voucher_id.period_id.id,
'name': _('change')+': '+(line.name or '/'),
'account_id': account_id.id,
'move_id': move_id,
'amount_currency': 0.0,
'partner_id': line.voucher_id.partner_id.id,
'currency_id': account_currency_id,
'quantity': 1,
'debit': amount_residual > 0 and amount_residual or 0.0,
'credit': amount_residual < 0 and -amount_residual or 0.0,
'date': line.voucher_id.date,
}
return (move_line, move_line_counterpart)
def _convert_amount(self, cr, uid, amount, voucher_id, context=None):
'''
This function convert the amount given in company currency. It takes either the rate in the voucher (if the
payment_rate_currency_id is relevant) either the rate encoded in the system.
:param amount: float. The amount to convert
:param voucher: id of the voucher on which we want the conversion
:param context: to context to use for the conversion. It may contain the key 'date' set to the voucher date
field in order to select the good rate to use.
:return: the amount in the currency of the voucher's company
:rtype: float
'''
if context is None:
context = {}
currency_obj = self.pool.get('res.currency')
voucher = self.browse(cr, uid, voucher_id, context=context)
return currency_obj.compute(cr, uid, voucher.currency_id.id, voucher.company_id.currency_id.id, amount, context=context)
def voucher_move_line_create(self, cr, uid, voucher_id, line_total, move_id, company_currency, current_currency, context=None):
'''
Create one account move line, on the given account move, per voucher line where amount is not 0.0.
It returns Tuple with tot_line what is total of difference between debit and credit and
a list of lists with ids to be reconciled with this format (total_deb_cred,list_of_lists).
:param voucher_id: Voucher id what we are working with
:param line_total: Amount of the first line, which correspond to the amount we should totally split among all voucher lines.
:param move_id: Account move wher those lines will be joined.
:param company_currency: id of currency of the company to which the voucher belong
:param current_currency: id of currency of the voucher
:return: Tuple build as (remaining amount not allocated on voucher lines, list of account_move_line created in this method)
:rtype: tuple(float, list of int)
'''
if context is None:
context = {}
move_line_obj = self.pool.get('account.move.line')
currency_obj = self.pool.get('res.currency')
tax_obj = self.pool.get('account.tax')
tot_line = line_total
rec_lst_ids = []
date = self.read(cr, uid, [voucher_id], ['date'], context=context)[0]['date']
ctx = context.copy()
ctx.update({'date': date})
voucher = self.pool.get('account.voucher').browse(cr, uid, voucher_id, context=ctx)
voucher_currency = voucher.journal_id.currency or voucher.company_id.currency_id
ctx.update({
'voucher_special_currency_rate': voucher_currency.rate * voucher.payment_rate ,
'voucher_special_currency': voucher.payment_rate_currency_id and voucher.payment_rate_currency_id.id or False,})
prec = self.pool.get('decimal.precision').precision_get(cr, uid, 'Account')
for line in voucher.line_ids:
#create one move line per voucher line where amount is not 0.0
# AND (second part of the clause) only if the original move line was not having debit = credit = 0 (which is a legal value)
if not line.amount and not (line.move_line_id and not float_compare(line.move_line_id.debit, line.move_line_id.credit, precision_digits=prec) and not float_compare(line.move_line_id.debit, 0.0, precision_digits=prec)):
continue
# convert the amount set on the voucher line into the currency of the voucher's company
# this calls res_curreny.compute() with the right context, so that it will take either the rate on the voucher if it is relevant or will use the default behaviour
amount = self._convert_amount(cr, uid, line.untax_amount or line.amount, voucher.id, context=ctx)
# if the amount encoded in voucher is equal to the amount unreconciled, we need to compute the
# currency rate difference
if line.amount == line.amount_unreconciled:
if not line.move_line_id:
raise osv.except_osv(_('Wrong voucher line'),_("The invoice you are willing to pay is not valid anymore."))
sign = line.type =='dr' and -1 or 1
currency_rate_difference = sign * (line.move_line_id.amount_residual - amount)
else:
currency_rate_difference = 0.0
move_line = {
'journal_id': voucher.journal_id.id,
'period_id': voucher.period_id.id,
'name': line.name or '/',
'account_id': line.account_id.id,
'move_id': move_id,
'partner_id': voucher.partner_id.id,
'currency_id': line.move_line_id and (company_currency <> line.move_line_id.currency_id.id and line.move_line_id.currency_id.id) or False,
'analytic_account_id': line.account_analytic_id and line.account_analytic_id.id or False,
'quantity': 1,
'credit': 0.0,
'debit': 0.0,
'date': voucher.date
}
if amount < 0:
amount = -amount
if line.type == 'dr':
line.type = 'cr'
else:
line.type = 'dr'
if (line.type=='dr'):
tot_line += amount
move_line['debit'] = amount
else:
tot_line -= amount
move_line['credit'] = amount
if voucher.tax_id and voucher.type in ('sale', 'purchase'):
move_line.update({
'account_tax_id': voucher.tax_id.id,
})
if move_line.get('account_tax_id', False):
tax_data = tax_obj.browse(cr, uid, [move_line['account_tax_id']], context=context)[0]
if not (tax_data.base_code_id and tax_data.tax_code_id):
raise osv.except_osv(_('No Account Base Code and Account Tax Code!'),_("You have to configure account base code and account tax code on the '%s' tax!") % (tax_data.name))
# compute the amount in foreign currency
foreign_currency_diff = 0.0
amount_currency = False
if line.move_line_id:
# We want to set it on the account move line as soon as the original line had a foreign currency
if line.move_line_id.currency_id and line.move_line_id.currency_id.id != company_currency:
# we compute the amount in that foreign currency.
if line.move_line_id.currency_id.id == current_currency:
# if the voucher and the voucher line share the same currency, there is no computation to do
sign = (move_line['debit'] - move_line['credit']) < 0 and -1 or 1
amount_currency = sign * (line.amount)
else:
# if the rate is specified on the voucher, it will be used thanks to the special keys in the context
# otherwise we use the rates of the system
amount_currency = currency_obj.compute(cr, uid, company_currency, line.move_line_id.currency_id.id, move_line['debit']-move_line['credit'], context=ctx)
if line.amount == line.amount_unreconciled:
foreign_currency_diff = line.move_line_id.amount_residual_currency - abs(amount_currency)
move_line['amount_currency'] = amount_currency
voucher_line = move_line_obj.create(cr, uid, move_line)
rec_ids = [voucher_line, line.move_line_id.id]
if not currency_obj.is_zero(cr, uid, voucher.company_id.currency_id, currency_rate_difference):
# Change difference entry in company currency
exch_lines = self._get_exchange_lines(cr, uid, line, move_id, currency_rate_difference, company_currency, current_currency, context=context)
new_id = move_line_obj.create(cr, uid, exch_lines[0],context)
move_line_obj.create(cr, uid, exch_lines[1], context)
rec_ids.append(new_id)
if line.move_line_id and line.move_line_id.currency_id and not currency_obj.is_zero(cr, uid, line.move_line_id.currency_id, foreign_currency_diff):
# Change difference entry in voucher currency
move_line_foreign_currency = {
'journal_id': line.voucher_id.journal_id.id,
'period_id': line.voucher_id.period_id.id,
'name': _('change')+': '+(line.name or '/'),
'account_id': line.account_id.id,
'move_id': move_id,
'partner_id': line.voucher_id.partner_id.id,
'currency_id': line.move_line_id.currency_id.id,
'amount_currency': -1 * foreign_currency_diff,
'quantity': 1,
'credit': 0.0,
'debit': 0.0,
'date': line.voucher_id.date,
}
new_id = move_line_obj.create(cr, uid, move_line_foreign_currency, context=context)
rec_ids.append(new_id)
if line.move_line_id.id:
rec_lst_ids.append(rec_ids)
return (tot_line, rec_lst_ids)
def writeoff_move_line_get(self, cr, uid, voucher_id, line_total, move_id, name, company_currency, current_currency, context=None):
'''
Set a dict to be use to create the writeoff move line.
:param voucher_id: Id of voucher what we are creating account_move.
:param line_total: Amount remaining to be allocated on lines.
:param move_id: Id of account move where this line will be added.
:param name: Description of account move line.
:param company_currency: id of currency of the company to which the voucher belong
:param current_currency: id of currency of the voucher
:return: mapping between fieldname and value of account move line to create
:rtype: dict
'''
currency_obj = self.pool.get('res.currency')
move_line = {}
voucher = self.pool.get('account.voucher').browse(cr,uid,voucher_id,context)
current_currency_obj = voucher.currency_id or voucher.journal_id.company_id.currency_id
if not currency_obj.is_zero(cr, uid, current_currency_obj, line_total):
diff = line_total
account_id = False
write_off_name = ''
if voucher.payment_option == 'with_writeoff':
account_id = voucher.writeoff_acc_id.id
write_off_name = voucher.comment
elif voucher.partner_id:
if voucher.type in ('sale', 'receipt'):
account_id = voucher.partner_id.property_account_receivable.id
else:
account_id = voucher.partner_id.property_account_payable.id
else:
# fallback on account of voucher
account_id = voucher.account_id.id
sign = voucher.type == 'payment' and -1 or 1
move_line = {
'name': write_off_name or name,
'account_id': account_id,
'move_id': move_id,
'partner_id': voucher.partner_id.id,
'date': voucher.date,
'credit': diff > 0 and diff or 0.0,
'debit': diff < 0 and -diff or 0.0,
'amount_currency': company_currency <> current_currency and (sign * -1 * voucher.writeoff_amount) or 0.0,
'currency_id': company_currency <> current_currency and current_currency or False,
'analytic_account_id': voucher.analytic_id and voucher.analytic_id.id or False,
}
return move_line
def _get_company_currency(self, cr, uid, voucher_id, context=None):
'''
Get the currency of the actual company.
:param voucher_id: Id of the voucher what i want to obtain company currency.
:return: currency id of the company of the voucher
:rtype: int
'''
return self.pool.get('account.voucher').browse(cr,uid,voucher_id,context).journal_id.company_id.currency_id.id
def _get_current_currency(self, cr, uid, voucher_id, context=None):
'''
Get the currency of the voucher.
:param voucher_id: Id of the voucher what i want to obtain current currency.
:return: currency id of the voucher
:rtype: int
'''
voucher = self.pool.get('account.voucher').browse(cr,uid,voucher_id,context)
return voucher.currency_id.id or self._get_company_currency(cr,uid,voucher.id,context)
def action_move_line_create(self, cr, uid, ids, context=None):
'''
Confirm the vouchers given in ids and create the journal entries for each of them
'''
if context is None:
context = {}
move_pool = self.pool.get('account.move')
move_line_pool = self.pool.get('account.move.line')
for voucher in self.browse(cr, uid, ids, context=context):
local_context = dict(context, force_company=voucher.journal_id.company_id.id)
if voucher.move_id:
continue
company_currency = self._get_company_currency(cr, uid, voucher.id, context)
current_currency = self._get_current_currency(cr, uid, voucher.id, context)
# we select the context to use accordingly if it's a multicurrency case or not
context = self._sel_context(cr, uid, voucher.id, context)
# But for the operations made by _convert_amount, we always need to give the date in the context
ctx = context.copy()
ctx.update({'date': voucher.date})
# Create the account move record.
move_id = move_pool.create(cr, uid, self.account_move_get(cr, uid, voucher.id, context=context), context=context)
# Get the name of the account_move just created
name = move_pool.browse(cr, uid, move_id, context=context).name
# Create the first line of the voucher
move_line_id = move_line_pool.create(cr, uid, self.first_move_line_get(cr,uid,voucher.id, move_id, company_currency, current_currency, local_context), local_context)
move_line_brw = move_line_pool.browse(cr, uid, move_line_id, context=context)
line_total = move_line_brw.debit - move_line_brw.credit
rec_list_ids = []
if voucher.type == 'sale':
line_total = line_total - self._convert_amount(cr, uid, voucher.tax_amount, voucher.id, context=ctx)
elif voucher.type == 'purchase':
line_total = line_total + self._convert_amount(cr, uid, voucher.tax_amount, voucher.id, context=ctx)
# Create one move line per voucher line where amount is not 0.0
line_total, rec_list_ids = self.voucher_move_line_create(cr, uid, voucher.id, line_total, move_id, company_currency, current_currency, context)
# Create the writeoff line if needed
ml_writeoff = self.writeoff_move_line_get(cr, uid, voucher.id, line_total, move_id, name, company_currency, current_currency, local_context)
if ml_writeoff:
move_line_pool.create(cr, uid, ml_writeoff, local_context)
# We post the voucher.
self.write(cr, uid, [voucher.id], {
'move_id': move_id,
'state': 'posted',
'number': name,
})
if voucher.journal_id.entry_posted:
move_pool.post(cr, uid, [move_id], context={})
# We automatically reconcile the account move lines.
reconcile = False
for rec_ids in rec_list_ids:
if len(rec_ids) >= 2:
reconcile = move_line_pool.reconcile_partial(cr, uid, rec_ids, writeoff_acc_id=voucher.writeoff_acc_id.id, writeoff_period_id=voucher.period_id.id, writeoff_journal_id=voucher.journal_id.id)
return True
class account_voucher_line(osv.osv):
_name = 'account.voucher.line'
_description = 'Voucher Lines'
_order = "move_line_id"
# If the payment is in the same currency than the invoice, we keep the same amount
# Otherwise, we compute from invoice currency to payment currency
def _compute_balance(self, cr, uid, ids, name, args, context=None):
currency_pool = self.pool.get('res.currency')
rs_data = {}
for line in self.browse(cr, uid, ids, context=context):
ctx = context.copy()
ctx.update({'date': line.voucher_id.date})
voucher_rate = self.pool.get('res.currency').read(cr, uid, line.voucher_id.currency_id.id, ['rate'], context=ctx)['rate']
ctx.update({
'voucher_special_currency': line.voucher_id.payment_rate_currency_id and line.voucher_id.payment_rate_currency_id.id or False,
'voucher_special_currency_rate': line.voucher_id.payment_rate * voucher_rate})
res = {}
company_currency = line.voucher_id.journal_id.company_id.currency_id.id
voucher_currency = line.voucher_id.currency_id and line.voucher_id.currency_id.id or company_currency
move_line = line.move_line_id or False
if not move_line:
res['amount_original'] = 0.0
res['amount_unreconciled'] = 0.0
elif move_line.currency_id and voucher_currency==move_line.currency_id.id:
res['amount_original'] = abs(move_line.amount_currency)
res['amount_unreconciled'] = abs(move_line.amount_residual_currency)
else:
#always use the amount booked in the company currency as the basis of the conversion into the voucher currency
res['amount_original'] = currency_pool.compute(cr, uid, company_currency, voucher_currency, move_line.credit or move_line.debit or 0.0, context=ctx)
res['amount_unreconciled'] = currency_pool.compute(cr, uid, company_currency, voucher_currency, abs(move_line.amount_residual), context=ctx)
rs_data[line.id] = res
return rs_data
def _currency_id(self, cr, uid, ids, name, args, context=None):
'''
This function returns the currency id of a voucher line. It's either the currency of the
associated move line (if any) or the currency of the voucher or the company currency.
'''
res = {}
for line in self.browse(cr, uid, ids, context=context):
move_line = line.move_line_id
if move_line:
res[line.id] = move_line.currency_id and move_line.currency_id.id or move_line.company_id.currency_id.id
else:
res[line.id] = line.voucher_id.currency_id and line.voucher_id.currency_id.id or line.voucher_id.company_id.currency_id.id
return res
_columns = {
'voucher_id':fields.many2one('account.voucher', 'Voucher', required=1, ondelete='cascade'),
'name':fields.char('Description',),
'account_id':fields.many2one('account.account','Account', required=True),
'partner_id':fields.related('voucher_id', 'partner_id', type='many2one', relation='res.partner', string='Partner'),
'untax_amount':fields.float('Untax Amount'),
'amount':fields.float('Amount', digits_compute=dp.get_precision('Account')),
'reconcile': fields.boolean('Full Reconcile'),
'type':fields.selection([('dr','Debit'),('cr','Credit')], 'Dr/Cr'),
'account_analytic_id': fields.many2one('account.analytic.account', 'Analytic Account'),
'move_line_id': fields.many2one('account.move.line', 'Journal Item', copy=False),
'date_original': fields.related('move_line_id','date', type='date', relation='account.move.line', string='Date', readonly=1),
'date_due': fields.related('move_line_id','date_maturity', type='date', relation='account.move.line', string='Due Date', readonly=1),
'amount_original': fields.function(_compute_balance, multi='dc', type='float', string='Original Amount', store=True, digits_compute=dp.get_precision('Account')),
'amount_unreconciled': fields.function(_compute_balance, multi='dc', type='float', string='Open Balance', store=True, digits_compute=dp.get_precision('Account')),
'company_id': fields.related('voucher_id','company_id', relation='res.company', type='many2one', string='Company', store=True, readonly=True),
'currency_id': fields.function(_currency_id, string='Currency', type='many2one', relation='res.currency', readonly=True),
}
_defaults = {
'name': '',
}
def onchange_reconcile(self, cr, uid, ids, reconcile, amount, amount_unreconciled, context=None):
vals = {'amount': 0.0}
if reconcile:
vals = { 'amount': amount_unreconciled}
return {'value': vals}
def onchange_amount(self, cr, uid, ids, amount, amount_unreconciled, context=None):
vals = {}
if amount:
vals['reconcile'] = (amount == amount_unreconciled)
return {'value': vals}
def onchange_move_line_id(self, cr, user, ids, move_line_id, context=None):
"""
Returns a dict that contains new values and context
@param move_line_id: latest value from user input for field move_line_id
@param args: other arguments
@param context: context arguments, like lang, time zone
@return: Returns a dict which contains new values, and context
"""
res = {}
move_line_pool = self.pool.get('account.move.line')
if move_line_id:
move_line = move_line_pool.browse(cr, user, move_line_id, context=context)
if move_line.credit:
ttype = 'dr'
else:
ttype = 'cr'
res.update({
'account_id': move_line.account_id.id,
'type': ttype,
'currency_id': move_line.currency_id and move_line.currency_id.id or move_line.company_id.currency_id.id,
})
return {
'value':res,
}
def default_get(self, cr, user, fields_list, context=None):
"""
Returns default values for fields
@param fields_list: list of fields, for which default values are required to be read
@param context: context arguments, like lang, time zone
@return: Returns a dict that contains default values for fields
"""
if context is None:
context = {}
journal_id = context.get('journal_id', False)
partner_id = context.get('partner_id', False)
journal_pool = self.pool.get('account.journal')
partner_pool = self.pool.get('res.partner')
values = super(account_voucher_line, self).default_get(cr, user, fields_list, context=context)
if (not journal_id) or ('account_id' not in fields_list):
return values
journal = journal_pool.browse(cr, user, journal_id, context=context)
account_id = False
ttype = 'cr'
if journal.type in ('sale', 'sale_refund'):
account_id = journal.default_credit_account_id and journal.default_credit_account_id.id or False
ttype = 'cr'
elif journal.type in ('purchase', 'expense', 'purchase_refund'):
account_id = journal.default_debit_account_id and journal.default_debit_account_id.id or False
ttype = 'dr'
elif partner_id:
partner = partner_pool.browse(cr, user, partner_id, context=context)
if context.get('type') == 'payment':
ttype = 'dr'
account_id = partner.property_account_payable.id
elif context.get('type') == 'receipt':
account_id = partner.property_account_receivable.id
values.update({
'account_id':account_id,
'type':ttype
})
return values
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| diogocs1/comps | web/addons/account_voucher/account_voucher.py | Python | apache-2.0 | 83,973 |
print "Happy New Year" | alihesari/Happy-New-Year | happy-new-year.py | Python | mit | 22 |
import logging
import os
from contextlib import contextmanager
LOG_FORMAT = "%(name)s.%(module)s.%(funcName)s: %(message)s"
def enable_logging():
logging.basicConfig(
level=logging.DEBUG,
format=LOG_FORMAT,
)
# Allow from-the-start debugging (vs toggled during load of tasks module) via
# shell env var.
if os.environ.get('INVOKE_DEBUG'):
enable_logging()
# Add top level logger functions to global namespace. Meh.
log = logging.getLogger('invoke')
for x in ('debug',):
globals()[x] = getattr(log, x)
def sort_names(names):
"""
Sort task ``names`` by nesting depth & then as regular strings.
"""
return sorted(names, key=lambda x: (x.count('.'), x))
# TODO: Make part of public API sometime
@contextmanager
def cd(where):
cwd = os.getcwd()
os.chdir(where)
try:
yield
finally:
os.chdir(cwd)
| frol/invoke | invoke/util.py | Python | bsd-2-clause | 879 |
import _plotly_utils.basevalidators
class CustomdatasrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="customdatasrc", parent_name="pointcloud", **kwargs):
super(CustomdatasrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
| plotly/python-api | packages/python/plotly/plotly/validators/pointcloud/_customdatasrc.py | Python | mit | 459 |
# proxy module
from __future__ import absolute_import
from chaco.function_data_source import *
| enthought/etsproxy | enthought/chaco/function_data_source.py | Python | bsd-3-clause | 95 |
# Copyright (c) 2014 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit tests for datastore module.
"""
import mock
from oslo_utils import units
from cinder import test
from cinder.volume.drivers.vmware import datastore as ds_sel
from cinder.volume.drivers.vmware import exceptions as vmdk_exceptions
class DatastoreTest(test.TestCase):
"""Unit tests for Datastore."""
def setUp(self):
super(DatastoreTest, self).setUp()
self._session = mock.Mock()
self._vops = mock.Mock()
self._ds_sel = ds_sel.DatastoreSelector(
self._vops, self._session, 1024)
@mock.patch('oslo_vmware.pbm.get_profile_id_by_name')
def test_get_profile_id(self, get_profile_id_by_name):
profile_id = mock.sentinel.profile_id
get_profile_id_by_name.return_value = profile_id
profile_name = mock.sentinel.profile_name
self.assertEqual(profile_id, self._ds_sel.get_profile_id(profile_name))
get_profile_id_by_name.assert_called_once_with(self._session,
profile_name)
@mock.patch('oslo_vmware.pbm.get_profile_id_by_name')
def test_get_profile_id_with_invalid_profile(self, get_profile_id_by_name):
get_profile_id_by_name.return_value = None
profile_name = mock.sentinel.profile_name
self.assertRaises(vmdk_exceptions.ProfileNotFoundException,
self._ds_sel.get_profile_id,
profile_name)
get_profile_id_by_name.assert_called_once_with(self._session,
profile_name)
def _create_datastore(self, value):
return mock.Mock(name=value, value=value)
def _create_summary(
self, ds, free_space=units.Mi, _type=ds_sel.DatastoreType.VMFS,
capacity=2 * units.Mi, accessible=True):
return mock.Mock(datastore=ds, freeSpace=free_space, type=_type,
capacity=capacity, accessible=accessible,
name=ds.value)
def _create_host(self, value):
host = mock.Mock(spec=['_type', 'value'], name=value)
host._type = 'HostSystem'
host.value = value
return host
@mock.patch('cinder.volume.drivers.vmware.datastore.DatastoreSelector.'
'_filter_by_profile')
def test_filter_datastores(self, filter_by_profile):
host1 = self._create_host('host-1')
host2 = self._create_host('host-2')
host3 = self._create_host('host-3')
host_mounts1 = [mock.Mock(key=host1)]
host_mounts2 = [mock.Mock(key=host2)]
host_mounts3 = [mock.Mock(key=host3)]
# empty summary
ds1 = self._create_datastore('ds-1')
ds1_props = {'host': host_mounts1}
# hard anti-affinity datastore
ds2 = self._create_datastore('ds-2')
ds2_props = {'summary': self._create_summary(ds2),
'host': host_mounts2}
# not enough free space
ds3 = self._create_datastore('ds-3')
ds3_props = {'summary': self._create_summary(ds3, free_space=128),
'host': host_mounts1}
# not connected to a valid host
ds4 = self._create_datastore('ds-4')
ds4_props = {'summary': self._create_summary(ds4),
'host': host_mounts3}
# invalid datastore type
ds5 = self._create_datastore('ds-5')
ds5_props = {'summary': self._create_summary(ds5, _type='foo'),
'host': host_mounts1}
# hard affinity datastore type
ds6 = self._create_datastore('ds-6')
ds6_props = {
'summary': self._create_summary(
ds6, _type=ds_sel.DatastoreType.VSAN),
'host': host_mounts2}
# inaccessible datastore
ds7 = self._create_datastore('ds-7')
ds7_props = {'summary': self._create_summary(ds7, accessible=False),
'host': host_mounts1}
def mock_in_maintenace(summary):
return summary.datastore.value == 'ds-8'
self._vops._in_maintenance.side_effect = mock_in_maintenace
# in-maintenance datastore
ds8 = self._create_datastore('ds-8')
ds8_props = {'summary': self._create_summary(ds8),
'host': host_mounts2}
# not compliant with profile
ds9 = self._create_datastore('ds-9')
ds9_props = {'summary': self._create_summary(ds9),
'host': host_mounts1}
# valid datastore
ds10 = self._create_datastore('ds-10')
ds10_props = {'summary': self._create_summary(ds10),
'host': host_mounts1}
filter_by_profile.return_value = {ds10: ds10_props}
datastores = {ds1: ds1_props,
ds2: ds2_props,
ds3: ds3_props,
ds4: ds4_props,
ds5: ds5_props,
ds6: ds6_props,
ds7: ds7_props,
ds8: ds8_props,
ds9: ds9_props,
ds10: ds10_props}
profile_id = mock.sentinel.profile_id
datastores = self._ds_sel._filter_datastores(
datastores,
512,
profile_id,
['ds-2'],
{ds_sel.DatastoreType.VMFS, ds_sel.DatastoreType.NFS},
valid_host_refs=[host1, host2])
self.assertEqual({ds10: ds10_props}, datastores)
filter_by_profile.assert_called_once_with(
{ds9: ds9_props, ds10: ds10_props},
profile_id)
def test_filter_datastores_with_empty_datastores(self):
self.assertIsNone(self._ds_sel._filter_datastores(
{}, 1024, None, None, None))
def _create_host_properties(
self, parent, connection_state='connected', in_maintenace=False):
return mock.Mock(connectionState=connection_state,
inMaintenanceMode=in_maintenace,
parent=parent)
@mock.patch('cinder.volume.drivers.vmware.datastore.DatastoreSelector.'
'_get_host_properties')
@mock.patch('cinder.volume.drivers.vmware.datastore.DatastoreSelector.'
'_get_resource_pool')
def test_select_best_datastore(self, get_resource_pool, get_host_props):
host1 = self._create_host('host-1')
host2 = self._create_host('host-2')
host3 = self._create_host('host-3')
host_mounts1 = [mock.Mock(key=host1,
mountInfo=mock.sentinel.ds1_mount_info1),
mock.Mock(key=host2,
mountInfo=mock.sentinel.ds1_mount_info2),
mock.Mock(key=host3,
mountInfo=mock.sentinel.ds1_mount_info3)]
host_mounts2 = [mock.Mock(key=host2,
mountInfo=mock.sentinel.ds2_mount_info2),
mock.Mock(key=host3,
mountInfo=mock.sentinel.ds2_mount_info3)]
host_mounts3 = [mock.Mock(key=host1,
mountInfo=mock.sentinel.ds3_mount_info1),
mock.Mock(key=host2,
mountInfo=mock.sentinel.ds3_mount_info2)]
host_mounts4 = [mock.Mock(key=host1,
mountInfo=mock.sentinel.ds4_mount_info1)]
ds1 = self._create_datastore('ds-1')
ds1_props = {'summary': self._create_summary(ds1),
'host': host_mounts1}
ds2 = self._create_datastore('ds-2')
ds2_props = {
'summary': self._create_summary(
ds2, free_space=1024, capacity=2048),
'host': host_mounts2}
ds3 = self._create_datastore('ds-3')
ds3_props = {
'summary': self._create_summary(
ds3, free_space=512, capacity=2048),
'host': host_mounts3}
ds4 = self._create_datastore('ds-3')
ds4_props = {'summary': self._create_summary(ds4),
'host': host_mounts4}
cluster_ref = mock.sentinel.cluster_ref
def mock_get_host_properties(host_ref):
self.assertIsNot(host1, host_ref)
if host_ref == host2:
in_maintenance = False
else:
in_maintenance = True
runtime = mock.Mock(spec=['connectionState', 'inMaintenanceMode'])
runtime.connectionState = 'connected'
runtime.inMaintenanceMode = in_maintenance
return {'parent': cluster_ref, 'runtime': runtime}
get_host_props.side_effect = mock_get_host_properties
def mock_is_usable(mount_info):
if (mount_info == mock.sentinel.ds1_mount_info2 or
mount_info == mock.sentinel.ds2_mount_info2):
return False
else:
return True
self._vops._is_usable.side_effect = mock_is_usable
rp = mock.sentinel.resource_pool
get_resource_pool.return_value = rp
# ds1 is mounted to 3 hosts: host1, host2 and host3; host1 is
# not a valid host, ds1 is not usable in host1, and host3 is
# in maintenance mode.
# ds2 and ds3 are mounted to same hosts, and ds2 has a low space
# utilization. But ds2 is not usable in host2, and host3 is in
# maintenance mode. Therefore, ds3 and host2 will be selected.
datastores = {ds1: ds1_props,
ds2: ds2_props,
ds3: ds3_props,
ds4: ds4_props}
ret = self._ds_sel._select_best_datastore(
datastores, valid_host_refs=[host2, host3])
self.assertEqual((host2, rp, ds3_props['summary']), ret)
self.assertItemsEqual([mock.call(mock.sentinel.ds1_mount_info2),
mock.call(mock.sentinel.ds1_mount_info3),
mock.call(mock.sentinel.ds2_mount_info2),
mock.call(mock.sentinel.ds2_mount_info3),
mock.call(mock.sentinel.ds3_mount_info2)],
self._vops._is_usable.call_args_list)
self.assertEqual([mock.call(host3), mock.call(host2)],
get_host_props.call_args_list)
get_resource_pool.assert_called_once_with(cluster_ref)
def test_select_best_datastore_with_empty_datastores(self):
self.assertIsNone(self._ds_sel._select_best_datastore({}))
@mock.patch('cinder.volume.drivers.vmware.datastore.DatastoreSelector.'
'get_profile_id')
@mock.patch('cinder.volume.drivers.vmware.datastore.DatastoreSelector.'
'_get_datastores')
@mock.patch('cinder.volume.drivers.vmware.datastore.DatastoreSelector.'
'_filter_datastores')
@mock.patch('cinder.volume.drivers.vmware.datastore.DatastoreSelector.'
'_select_best_datastore')
def test_select_datastore(
self, select_best_datastore, filter_datastores, get_datastores,
get_profile_id):
profile_id = mock.sentinel.profile_id
get_profile_id.return_value = profile_id
datastores = mock.sentinel.datastores
get_datastores.return_value = datastores
filtered_datastores = mock.sentinel.filtered_datastores
filter_datastores.return_value = filtered_datastores
best_datastore = mock.sentinel.best_datastore
select_best_datastore.return_value = best_datastore
size_bytes = 1024
req = {self._ds_sel.SIZE_BYTES: size_bytes}
aff_ds_types = [ds_sel.DatastoreType.VMFS]
req[ds_sel.DatastoreSelector.HARD_AFFINITY_DS_TYPE] = aff_ds_types
anti_affinity_ds = [mock.sentinel.ds]
req[ds_sel.DatastoreSelector.HARD_ANTI_AFFINITY_DS] = anti_affinity_ds
profile_name = mock.sentinel.profile_name
req[ds_sel.DatastoreSelector.PROFILE_NAME] = profile_name
hosts = mock.sentinel.hosts
self.assertEqual(best_datastore,
self._ds_sel.select_datastore(req, hosts))
get_datastores.assert_called_once_with()
filter_datastores.assert_called_once_with(
datastores, size_bytes, profile_id, anti_affinity_ds, aff_ds_types,
valid_host_refs=hosts)
select_best_datastore.assert_called_once_with(filtered_datastores,
valid_host_refs=hosts)
@mock.patch('oslo_vmware.pbm.get_profile_id_by_name')
@mock.patch('cinder.volume.drivers.vmware.datastore.DatastoreSelector.'
'_filter_by_profile')
def test_is_datastore_compliant(self, filter_by_profile,
get_profile_id_by_name):
# Test with empty profile.
profile_name = None
datastore = mock.sentinel.datastore
self.assertTrue(self._ds_sel.is_datastore_compliant(datastore,
profile_name))
# Test with invalid profile.
profile_name = mock.sentinel.profile_name
get_profile_id_by_name.return_value = None
self.assertRaises(vmdk_exceptions.ProfileNotFoundException,
self._ds_sel.is_datastore_compliant,
datastore,
profile_name)
get_profile_id_by_name.assert_called_once_with(self._session,
profile_name)
# Test with valid profile and non-compliant datastore.
get_profile_id_by_name.reset_mock()
profile_id = mock.sentinel.profile_id
get_profile_id_by_name.return_value = profile_id
filter_by_profile.return_value = []
self.assertFalse(self._ds_sel.is_datastore_compliant(datastore,
profile_name))
get_profile_id_by_name.assert_called_once_with(self._session,
profile_name)
filter_by_profile.assert_called_once_with([datastore], profile_id)
# Test with valid profile and compliant datastore.
get_profile_id_by_name.reset_mock()
filter_by_profile.reset_mock()
filter_by_profile.return_value = [datastore]
self.assertTrue(self._ds_sel.is_datastore_compliant(datastore,
profile_name))
get_profile_id_by_name.assert_called_once_with(self._session,
profile_name)
filter_by_profile.assert_called_once_with([datastore], profile_id)
| bswartz/cinder | cinder/tests/unit/test_vmware_datastore.py | Python | apache-2.0 | 15,364 |
# -*- encoding: utf-8 -*-
"""
Customizes party address to have address in correct format for Endicia API .
"""
# This file is part of Tryton. The COPYRIGHT file at the top level of
# this repository contains the full copyright notices and license terms.
import string
from endicia import FromAddress, ToAddress
from trytond.pool import PoolMeta
__all__ = ['Address']
__metaclass__ = PoolMeta
class Address:
'''
Address
'''
__name__ = "party.address"
def address_to_endicia_from_address(self):
'''
Converts party address to Endicia From Address.
:param return: Returns instance of FromAddress
'''
if getattr(self, 'phone'):
phone = getattr(self, 'phone')
else:
phone = self.party.phone
if phone:
# Remove the special characters in the phone if any
phone = "".join([char for char in phone if char in string.digits])
return FromAddress(
FromName=self.name or self.party.name,
# FromCompany = user_rec.company.name or None,
ReturnAddress1=self.street,
ReturnAddress2=self.streetbis,
ReturnAddress3=None,
ReturnAddress4=None,
FromCity=self.city,
FromState=self.subdivision and self.subdivision.code[3:],
FromPostalCode=self.zip and self.zip[:5],
FromPhone=phone and phone[-10:],
FromEMail=self.party.email,
)
def address_to_endicia_to_address(self):
'''
Converts party address to Endicia To Address.
:param return: Returns instance of ToAddress
'''
if getattr(self, 'phone'):
phone = getattr(self, 'phone')
else:
phone = self.party.phone
zip = self.zip
if phone:
# Remove the special characters in the phone if any
phone = "".join([char for char in phone if char in string.digits])
if self.country and self.country.code != 'US':
# International
phone = phone[-30:]
zip = zip and zip[:15]
else:
# Domestic
phone = phone[-10:]
zip = zip and zip[:5]
return ToAddress(
ToName=self.name or self.party.name,
ToCompany=self.name or self.party.name,
ToAddress1=self.street,
ToAddress2=self.streetbis,
ToAddress3=None,
ToAddress4=None,
ToCity=self.city,
ToState=self.subdivision and self.subdivision.code[3:],
ToPostalCode=zip,
ToCountry=self.country and self.country.endicia_name,
ToCountryCode=self.country and self.country.code,
ToPhone=phone,
ToEMail=self.party.email,
)
| fulfilio/trytond-shipping-endicia | party.py | Python | bsd-3-clause | 2,854 |
from django.core.urlresolvers import reverse
from django.db import models
class Tag(models.Model):
name = models.CharField(max_length=64, unique=True)
TYPES = (
('gen', 'Generic'),
('char', 'Character'),
('meta', 'Meta'),
('dang', 'Dangerous'),
)
type = models.CharField(max_length=4, choices=TYPES, default='gen')
origins = models.TextField(blank=True)
crossovers = models.TextField(blank=True)
definition = models.TextField(blank=True)
def __str__(self):
return self.name
@property
def url(self):
return reverse('view_tag', args=[self.name])
| PrincessTeruko/TsunArt | tags/models.py | Python | mit | 575 |
from io import BytesIO
from jawa.cf import ClassFile
from jawa.attributes.source_file import SourceFileAttribute
def test_sourcefile_read(loader):
"""
Ensure we can read a SourceFileAttribute generated by javac.
"""
cf = loader['HelloWorldDebug']
source_file = cf.attributes.find_one(name='SourceFile')
assert(source_file.source_file.value == 'HelloWorldDebug.java')
def test_sourcefile_write():
"""
Ensure SourceFileAttribute can be written and read back.
"""
cf_one = ClassFile.create(u'SourceFileTest')
sfa = cf_one.attributes.create(SourceFileAttribute)
sfa.source_file = cf_one.constants.create_utf8(u'SourceFileTest.java')
fout = BytesIO()
cf_one.save(fout)
fin = BytesIO(fout.getvalue())
cf_two = ClassFile(fin)
source_file = cf_two.attributes.find_one(name=u'SourceFile')
assert(source_file.source_file.value == u'SourceFileTest.java')
| TkTech/Jawa | tests/attributes/test_sourcefile_attribute.py | Python | mit | 925 |
import pandas as pd
import numpy as np
import scipy as sp
datafolder = '/home/eliezer/datasets/hetrec2011/lastfm/'
datafolder = '/home/eliezer/datasets/hetrec2011/lastfm/'
from experiment_util import LoadLastFM
loader=LoadLastFM(datafolder)
loader.load()
R = loader.mat_users_artists_train.T
W = loader.mat_artists_tags[:,np.random.choice(loader.mat_artists_tags.shape[1], 1000, replace=False)]
S = loader.list_friends_id
n_latent = 100
#cspmf.fit(R,W,S,n_latent)
test_cases=np.where(loader.mat_users_artists_test)
| zehsilva/poissonmf_cs | scripts/test_load.py | Python | mit | 518 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Task',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('object_id', models.PositiveIntegerField()),
('created', models.DateTimeField(auto_now_add=True)),
('defer_until', models.DateTimeField(auto_now_add=True)),
('priority', models.PositiveIntegerField()),
('attempt_count', models.PositiveIntegerField(default=0)),
('log', models.TextField(blank=True)),
('note', models.TextField(blank=True)),
],
options={
'ordering': ['-priority', 'attempt_count', 'defer_until'],
},
bases=(models.Model,),
),
migrations.CreateModel(
name='TaskCategory',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('slug', models.SlugField(unique=True, max_length=100)),
('priority', models.PositiveIntegerField(default=0)),
],
options={
'ordering': ['-priority', 'slug'],
'verbose_name_plural': 'Task categories',
},
bases=(models.Model,),
),
migrations.AddField(
model_name='task',
name='category',
field=models.ForeignKey(to='tasks.TaskCategory'),
preserve_default=True,
),
migrations.AddField(
model_name='task',
name='content_type',
field=models.ForeignKey(to='contenttypes.ContentType'),
preserve_default=True,
),
]
| mysociety/pombola | pombola/tasks/migrations/0001_initial.py | Python | agpl-3.0 | 2,017 |
#!/usr/bin/env python
import random
'''\
The computer will pick a number between 1 and 100. (You can choose any high
number you want.) The purpose of the game is to guess the number the computer
picked in as few guesses as possible.
source:http://openbookproject.net/pybiblio/practice/\
'''
high_or_low = {True: "Too high. Try again:",
False: "Too low. Try again: "}
def main():
choice = random.randrange(1, 100)
user_choice = -1
while user_choice != choice:
user_choice = int(input("Please enter your choice: "))
is_high = user_choice > choice
if user_choice == choice:
break
print(high_or_low[is_high])
print("You guessed {0} correctly".format(choice))
if __name__ == "__main__":
main()
| CompSoc-NUIG/python_tutorials_2013 | guess.py | Python | unlicense | 774 |
# Mini-project #6 - Blackjack
import simplegui
import random
# load card sprite - 936x384 - source: jfitz.com
CARD_SIZE = (72, 96)
CARD_CENTER = (36, 48)
card_images = simplegui.load_image("http://storage.googleapis.com/codeskulptor-assets/cards_jfitz.png")
CARD_BACK_SIZE = (72, 96)
CARD_BACK_CENTER = (36, 48)
card_back = simplegui.load_image("http://storage.googleapis.com/codeskulptor-assets/card_jfitz_back.png")
# initialize some useful global variables
in_play = False
outcome = ""
score = 0
# define globals for cards
SUITS = ('C', 'S', 'H', 'D')
RANKS = ('A', '2', '3', '4', '5', '6', '7', '8', '9', 'T', 'J', 'Q', 'K')
VALUES = {'A':1, '2':2, '3':3, '4':4, '5':5, '6':6, '7':7, '8':8, '9':9, 'T':10, 'J':10, 'Q':10, 'K':10}
# define card class
class Card:
def __init__(self, suit, rank):
if (suit in SUITS) and (rank in RANKS):
self.suit = suit
self.rank = rank
else:
self.suit = None
self.rank = None
print "Invalid card: ", suit, rank
def __str__(self):
return self.suit + self.rank
def get_suit(self):
return self.suit
def get_rank(self):
return self.rank
def draw(self, canvas, pos):
card_loc = (CARD_CENTER[0] + CARD_SIZE[0] * RANKS.index(self.rank),
CARD_CENTER[1] + CARD_SIZE[1] * SUITS.index(self.suit))
canvas.draw_image(card_images, card_loc, CARD_SIZE, [pos[0] + CARD_CENTER[0], pos[1] + CARD_CENTER[1]], CARD_SIZE)
# define hand class
class Hand:
def __init__(self):
self.Hand=[] # create Hand object
def __str__(self):
s=''
for a in range(len(self.Hand)):
s+=self.Hand[a]
return s
def add_card(self, card):
self.Hand.append(card) # add a card object to a hand
def get_value(self):
self.Hand # count aces as 1, if the hand has an ace, then add 10 to hand value if it doesn't bust
# compute the value of the hand, see Blackjack video
def draw(self, canvas, pos):
pass # draw a hand on the canvas, use the draw method for cards
# define deck class
class Deck:
def __init__(self):
pass # create a Deck object
def shuffle(self):
# shuffle the deck
pass # use random.shuffle()
def deal_card(self):
pass # deal a card object from the deck
def __str__(self):
pass # return a string representing the deck
#define event handlers for buttons
def deal():
global outcome, in_play
# your code goes here
in_play = True
def hit():
pass # replace with your code below
# if the hand is in play, hit the player
# if busted, assign a message to outcome, update in_play and score
def stand():
pass # replace with your code below
# if hand is in play, repeatedly hit dealer until his hand has value 17 or more
# assign a message to outcome, update in_play and score
# draw handler
def draw(canvas):
# test to make sure that card.draw works, replace with your code below
card = Card("S", "A")
card.draw(canvas, [300, 300])
# initialization frame
frame = simplegui.create_frame("Blackjack", 600, 600)
frame.set_canvas_background("Green")
#create buttons and canvas callback
frame.add_button("Deal", deal, 200)
frame.add_button("Hit", hit, 200)
frame.add_button("Stand", stand, 200)
frame.set_draw_handler(draw)
# get things rolling
deal()
frame.start()
# remember to review the gradic rubric
| ZethernDev/blackjack | old_code.py | Python | mit | 3,559 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""simple parser / string tokenizer
rather than returning a list of token types etc, we simple return a list
of tokens. Each tokenizing function takes a string as input and returns
a list of tokens.
"""
# Copyright 2002, 2003 St James Software
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
def stringeval(text):
"""takes away repeated quotes (escapes) and returns the string
represented by the text"""
stringchar = text[0]
if text[-1] != stringchar or stringchar not in ("'", '"'):
# scratch your head
raise ValueError("error parsing escaped string: %r" % text)
return text[1:-1].replace((stringchar + stringchar), stringchar)
def stringquote(text):
"""escapes quotes as neccessary and returns a string representing
the text"""
if "'" in text:
if '"' in text:
return '"' + text.replace('"', '""') + '"'
else:
return '"' + text + '"'
else:
return "'" + text + "'"
class ParserError(ValueError):
"""Intelligent parser error"""
def __init__(self, parser, message, tokennum):
"""takes a message and the number of the token that caused the error"""
tokenpos = parser.findtokenpos(tokennum)
line, charpos = parser.getlinepos(tokenpos)
ValueError.__init__(self, "%s at line %d, char %d (token %r)" % \
(message, line, charpos, parser.tokens[tokennum]))
self.parser = parser
self.tokennum = tokennum
class SimpleParser:
"""this is a simple parser"""
def __init__(self, defaulttokenlist=None, whitespacechars=" \t\r\n",
includewhitespacetokens=0):
if defaulttokenlist is None:
self.defaulttokenlist = ['<=', '>=', '==', '!=',
'+=', '-=', '*=', '/=', '<>']
self.defaulttokenlist.extend('(),[]:=+-')
else:
self.defaulttokenlist = defaulttokenlist
self.whitespacechars = whitespacechars
self.includewhitespacetokens = includewhitespacetokens
self.standardtokenizers = [
self.stringtokenize, self.removewhitespace, self.separatetokens
]
self.quotechars = ('"', "'")
self.endquotechars = {'"': '"', "'": "'"}
self.stringescaping = 1
def stringtokenize(self, text):
"""makes strings in text into tokens..."""
tokens = []
laststart = 0
instring = 0
endstringchar, escapechar = '', '\\'
gotclose, gotescape = 0, 0
for pos in range(len(text)):
char = text[pos]
if instring:
if (self.stringescaping and
(gotescape or char == escapechar) and not gotclose):
gotescape = not gotescape
elif char == endstringchar:
gotclose = not gotclose
elif gotclose:
tokens.append(text[laststart:pos])
instring, laststart, endstringchar = 0, pos, ''
if not instring:
if char in self.quotechars:
if pos > laststart:
tokens.append(text[laststart:pos])
instring, laststart, endstringchar, gotclose = 1, pos, self.endquotechars[char], 0
if laststart < len(text):
tokens.append(text[laststart:])
return tokens
def keeptogether(self, text):
"""checks whether a token should be kept together"""
return self.isstringtoken(text)
def isstringtoken(self, text):
"""checks whether a token is a string token"""
return text[:1] in self.quotechars
def separatetokens(self, text, tokenlist=None):
"""this separates out tokens in tokenlist from whitespace etc"""
if self.keeptogether(text):
return [text]
if tokenlist is None:
tokenlist = self.defaulttokenlist
# loop through and put tokens into a list
tokens = []
pos = 0
laststart = 0
lentext = len(text)
while pos < lentext:
foundtoken = 0
for token in tokenlist:
lentoken = len(token)
if text[pos:pos+lentoken] == token:
if laststart < pos:
tokens.append(text[laststart:pos])
tokens.append(token)
pos += lentoken
foundtoken, laststart = 1, pos
break
if not foundtoken:
pos += 1
if laststart < lentext:
tokens.append(text[laststart:])
return tokens
def removewhitespace(self, text):
"""this removes whitespace but lets it separate things out into
separate tokens"""
if self.keeptogether(text):
return [text]
# loop through and put tokens into a list
tokens = []
pos = 0
inwhitespace = 0
laststart = 0
for pos in range(len(text)):
char = text[pos]
if inwhitespace:
if char not in self.whitespacechars:
if laststart < pos and self.includewhitespacetokens:
tokens.append(text[laststart:pos])
inwhitespace, laststart = 0, pos
else:
if char in self.whitespacechars:
if laststart < pos:
tokens.append(text[laststart:pos])
inwhitespace, laststart = 1, pos
if (laststart < len(text) and
(not inwhitespace or self.includewhitespacetokens)):
tokens.append(text[laststart:])
return tokens
def applytokenizer(self, inputlist, tokenizer):
"""apply a tokenizer to a set of text, flattening the result"""
tokenizedlists = [tokenizer(text) for text in inputlist]
joined = []
map(joined.extend, tokenizedlists)
return joined
def applytokenizers(self, inputlist, tokenizers):
"""apply a set of tokenizers to a set of text, flattening each time"""
for tokenizer in tokenizers:
inputlist = self.applytokenizer(inputlist, tokenizer)
return inputlist
def tokenize(self, source, tokenizers=None):
"""tokenize the text string with the standard tokenizers"""
self.source = source
if tokenizers is None:
tokenizers = self.standardtokenizers
self.tokens = self.applytokenizers([self.source], tokenizers)
return self.tokens
def findtokenpos(self, tokennum):
"""finds the position of the given token in the text"""
currenttokenpos = 0
for currenttokennum in range(tokennum + 1):
currenttokenpos = self.source.find(self.tokens[currenttokennum],
currenttokenpos)
return currenttokenpos
def getlinepos(self, tokenpos):
"""finds the line and character position of the given character"""
sourcecut = self.source[:tokenpos]
line = sourcecut.count("\n") + 1
charpos = tokenpos - sourcecut.rfind("\n")
return line, charpos
def raiseerror(self, message, tokennum):
"""raises a ParserError"""
raise ParserError(self, message, tokennum)
| staranjeet/fjord | vendor/packages/translate-toolkit/translate/misc/sparse.py | Python | bsd-3-clause | 7,986 |
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import availability_zones
from nova import db
from nova import exception
from nova.objects import base
from nova.objects import compute_node
from nova.objects import fields
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class Service(base.NovaPersistentObject, base.NovaObject):
# Version 1.0: Initial version
# Version 1.1: Added compute_node nested object
# Version 1.2: String attributes updated to support unicode
VERSION = '1.2'
fields = {
'id': fields.IntegerField(),
'host': fields.StringField(nullable=True),
'binary': fields.StringField(nullable=True),
'topic': fields.StringField(nullable=True),
'report_count': fields.IntegerField(),
'disabled': fields.BooleanField(),
'disabled_reason': fields.StringField(nullable=True),
'availability_zone': fields.StringField(nullable=True),
'compute_node': fields.ObjectField(compute_node.ComputeNode),
}
@staticmethod
def _do_compute_node(context, service, db_service):
try:
# NOTE(danms): The service.compute_node relationship returns
# a list, which should only have one item in it. If it's empty
# or otherwise malformed, ignore it.
db_compute = db_service['compute_node'][0]
except Exception:
return
service.compute_node = compute_node.ComputeNode._from_db_object(
context, compute_node.ComputeNode(), db_compute)
@staticmethod
def _from_db_object(context, service, db_service):
allow_missing = ('availability_zone',)
for key in service.fields:
if key in allow_missing and key not in db_service:
continue
if key == 'compute_node':
service._do_compute_node(context, service, db_service)
else:
service[key] = db_service[key]
service._context = context
service.obj_reset_changes()
return service
def obj_load_attr(self, attrname):
if not self._context:
raise exception.OrphanedObjectError(method='obj_load_attr',
objtype=self.obj_name())
LOG.debug(_("Lazy-loading `%(attr)s' on %(name)s id %(id)s"),
{'attr': attrname,
'name': self.obj_name(),
'id': self.id,
})
if attrname != 'compute_node':
raise exception.ObjectActionError(
action='obj_load_attr',
reason='attribute %s not lazy-loadable' % attrname)
self.compute_node = compute_node.ComputeNode.get_by_service_id(
self._context, self.id)
@base.remotable_classmethod
def get_by_id(cls, context, service_id):
db_service = db.service_get(context, service_id)
return cls._from_db_object(context, cls(), db_service)
@base.remotable_classmethod
def get_by_host_and_topic(cls, context, host, topic):
db_service = db.service_get_by_host_and_topic(context, host, topic)
return cls._from_db_object(context, cls(), db_service)
@base.remotable_classmethod
def get_by_compute_host(cls, context, host):
db_service = db.service_get_by_compute_host(context, host)
return cls._from_db_object(context, cls(), db_service)
@base.remotable_classmethod
def get_by_args(cls, context, host, binary):
db_service = db.service_get_by_args(context, host, binary)
return cls._from_db_object(context, cls(), db_service)
@base.remotable
def create(self, context):
updates = self.obj_get_changes()
db_service = db.service_create(context, updates)
self._from_db_object(context, self, db_service)
@base.remotable
def save(self, context):
updates = self.obj_get_changes()
updates.pop('id', None)
db_service = db.service_update(context, self.id, updates)
self._from_db_object(context, self, db_service)
@base.remotable
def destroy(self, context):
db.service_destroy(context, self.id)
class ServiceList(base.ObjectListBase, base.NovaObject):
@base.remotable_classmethod
def get_by_topic(cls, context, topic):
db_services = db.service_get_all_by_topic(context, topic)
return base.obj_make_list(context, ServiceList(), Service, db_services)
@base.remotable_classmethod
def get_by_host(cls, context, host):
db_services = db.service_get_all_by_host(context, host)
return base.obj_make_list(context, ServiceList(), Service, db_services)
@base.remotable_classmethod
def get_all(cls, context, disabled=None, set_zones=False):
db_services = db.service_get_all(context, disabled=disabled)
if set_zones:
db_services = availability_zones.set_availability_zones(
context, db_services)
return base.obj_make_list(context, ServiceList(), Service, db_services)
| ntt-sic/nova | nova/objects/service.py | Python | apache-2.0 | 5,687 |
#!/usr/bin/env python
# Copyright (c) 2003-2006 ActiveState Software Inc.
#
# The MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is furnished
# to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
#
# Authors:
# Trent Mick <TrentM@ActiveState.com>
"""An alternate version of the cmd.Cmd object for command-line handling
that uses argument vectors instead of command strings. This is much more
handy.
Also, some minor changes have been made to some default behaviours of
the cmd.Cmd class.
XXX Describe those differences here.
"""
# TODO:
# - see XXX's and TODO's below
# - port to cmd.Cmd changes in Python 2.3 (stdout and stdin ctor args)
# - add tests for the actual ListCmd class :)
# - LaTeX documentation
import os
import sys
import cmd
import string
_version_ = (0, 1, 0)
class ListCmdError(Exception):
pass
def line2argv(line):
r"""Parse the given line into an argument vector.
"line" is the line of input to parse.
This may get niggly when dealing with quoting and escaping. The
current state of this parsing may not be completely thorough/correct
in this respect.
>>> from listcmd import line2argv
>>> line2argv("foo")
['foo']
>>> line2argv("foo bar")
['foo', 'bar']
>>> line2argv("foo bar ")
['foo', 'bar']
>>> line2argv(" foo bar")
['foo', 'bar']
>>> line2argv("'foo bar'")
['foo bar']
>>> line2argv('"foo bar"')
['foo bar']
>>> line2argv(r'"foo\"bar"')
['foo"bar']
>>> line2argv("'foo bar' spam")
['foo bar', 'spam']
>>> line2argv("'foo 'bar spam")
['foo bar', 'spam']
>>> line2argv('some\tsimple\ttests')
['some', 'simple', 'tests']
>>> line2argv('a "more complex" test')
['a', 'more complex', 'test']
>>> line2argv('a more="complex test of " quotes')
['a', 'more=complex test of ', 'quotes']
>>> line2argv('a more" complex test of " quotes')
['a', 'more complex test of ', 'quotes']
>>> line2argv('an "embedded \\"quote\\""')
['an', 'embedded "quote"']
# Komodo bug 48027
>>> line2argv('foo bar C:\\')
['foo', 'bar', 'C:\\']
# Komodo change 127581
>>> line2argv(r'"\test\slash" "foo bar" "foo\"bar"')
['\\test\\slash', 'foo bar', 'foo"bar']
# Komodo change 127629
>>> if sys.platform == "win32":
... line2argv(r'\foo\bar') == ['\\foo\\bar']
... line2argv(r'\\foo\\bar') == ['\\\\foo\\\\bar']
... line2argv('"foo') == ['foo']
... else:
... line2argv(r'\foo\bar') == ['foobar']
... line2argv(r'\\foo\\bar') == ['\\foo\\bar']
... try:
... line2argv('"foo')
... except ValueError, ex:
... "not terminated" in str(ex)
True
True
True
"""
line = line.strip()
argv = []
state = "default"
arg = None # the current argument being parsed
i = -1
while 1:
i += 1
if i >= len(line): break
ch = line[i]
if ch == "\\" and i+1 < len(line):
# escaped char always added to arg, regardless of state
if arg is None: arg = ""
if (sys.platform == "win32"
or state in ("double-quoted", "single-quoted")
) and line[i+1] not in tuple('"\''):
arg += ch
i += 1
arg += line[i]
continue
if state == "single-quoted":
if ch == "'":
state = "default"
else:
arg += ch
elif state == "double-quoted":
if ch == '"':
state = "default"
else:
arg += ch
elif state == "default":
if ch == '"':
if arg is None: arg = ""
state = "double-quoted"
elif ch == "'":
if arg is None: arg = ""
state = "single-quoted"
elif ch in string.whitespace:
if arg is not None:
argv.append(arg)
arg = None
else:
if arg is None: arg = ""
arg += ch
if arg is not None:
argv.append(arg)
if not sys.platform == "win32" and state != "default":
raise ValueError("command line is not terminated: unfinished %s "
"segment" % state)
return argv
def argv2line(argv):
r"""Put together the given argument vector into a command line.
"argv" is the argument vector to process.
>>> from listcmd import argv2line
>>> argv2line(['foo'])
'foo'
>>> argv2line(['foo', 'bar'])
'foo bar'
>>> argv2line(['foo', 'bar baz'])
'foo "bar baz"'
>>> argv2line(['foo"bar'])
'foo"bar'
>>> print argv2line(['foo" bar'])
'foo" bar'
>>> print argv2line(["foo' bar"])
"foo' bar"
>>> argv2line(["foo'bar"])
"foo'bar"
"""
escapedArgs = []
for arg in argv:
if ' ' in arg and '"' not in arg:
arg = '"'+arg+'"'
elif ' ' in arg and "'" not in arg:
arg = "'"+arg+"'"
elif ' ' in arg:
arg = arg.replace('"', r'\"')
arg = '"'+arg+'"'
escapedArgs.append(arg)
return ' '.join(escapedArgs)
class ListCmd(cmd.Cmd):
"""Pass arglists instead of command strings to commands.
Modify the std Cmd class to pass arg lists instead of command lines.
This seems more appropriate for integration with sys.argv which handles
the proper parsing of the command line arguments (particularly handling
of quoting of args with spaces).
"""
#TODO:
# - See the XXX's in this class.
# - Add an "options" argument to the constructor specifying whether
# the special '?' and '!' things should be used. One might want to
# key some of this on whether operating in a command loop or not.
# - Look at the complete_* stuff and see if it needs to be adapted.
# - Figure out how to deal with the onecmd vs. cmdloop differences:
# - return code from .default()
# - need for self.name and usage in error messages
# - Document this and submit to the Python core.
prompt = "(ListCmd) "
def logerror(self, msg):
#XXX document this new method
sys.stderr.write(msg+'\n')
def cmdloop(self, intro=None):
"""Repeatedly issue a prompt, accept input, parse into an argv, and
dispatch to action methods, passing them the argv.
"intro" is a introductory method to print when starting the
command loop. This overrides the class "intro" attribute,
if any.
"""
#XXX Might be nice to add a trap for KeyboardInterrupt which
# defers to say, self.interrupt, for handling. This handler would
# do nothing by default but could offer confirm that the user
# wants to cancel.
self.preloop()
if intro is not None:
self.intro = intro
if self.intro:
sys.stdout.write(str(self.intro)+"\n")
stop = None
while not stop:
if self.cmdqueue:
#XXX What is the .cmdqueue? "cmd.py" does not seem to do
# anything useful with it.
line = self.cmdqueue.pop(0)
else:
if self.use_rawinput:
try:
line = raw_input(self.prompt)
except EOFError:
line = 'EOF'
else:
sys.stdout.write(self.prompt)
sys.stdout.flush()
line = sys.stdin.readline()
if not len(line):
line = 'EOF'
else:
line = line[:-1] # chop \n
argv = line2argv(line)
try:
argv = self.precmd(argv)
stop = self.onecmd(argv)
stop = self.postcmd(stop, argv)
except:
if not self.onerror():
raise
self.postloop()
def onerror(self):
"""Called if an exception is raised in any of precmd(), onecmd(),
or postcmd(). If true is returned, the exception is deemed to have
been dealt with.
"""
pass
def precmd(self, argv):
"""Hook method executed just before the command argv is
interpreted, but after the input prompt is generated and issued.
"""
return argv
def postcmd(self, stop, argv):
"""Hook method executed just after a command dispatch is finished."""
return stop
def onecmd(self, argv):
# Differences from Cmd:
# - use an argv, rather than a command string
# - don't specially handle the '?' redirect to 'help'
# - don't allow the '!' shell out
if not argv:
return self.emptyline()
self.lastcmd = argv
cmdName = argv[0]
try:
func = getattr(self, 'do_' + cmdName)
except AttributeError:
return self.default(argv)
try:
return func(argv)
except TypeError, ex:
self.logerror("%s: %s" % (cmdName, ex))
self.logerror("try 'help %s'" % cmdName)
if 0: # for debugging
print
import traceback
traceback.print_exception(*sys.exc_info())
def default(self, argv):
self.logerror("unknown syntax: '%s'" % argv2line(argv))
#XXX Would like to return 1 here to return an error code for
# a single command line, however this return value is used to
# indicate whether a command loop should stop. TODO: separate
# these two: return code and whether to stop loop, consider
# using a Stop exception or something like that.
#return 1
def _do_one_help(self, arg):
try:
# If help_<arg1>() exists, then call it.
func = getattr(self, 'help_' + arg)
except AttributeError:
try:
doc = getattr(self, 'do_' + arg).__doc__
except AttributeError:
doc = None
if doc: # *do* have help, print that
sys.stdout.write(doc + '\n')
sys.stdout.flush()
else:
self.logerror("no help for '%s'" % (arg,))
else:
return func()
# Technically this improved do_help() does not fit into _ListCmd, and
# something like this would be more appropriate:
# def do_help(self, argv):
# cmd.Cmd.do_help(self, ' '.join(argv[1:]))
# but I don't want to make another class for it.
def do_help(self, argv):
if argv[1:]:
for arg in argv[1:]:
retval = self._do_one_help(arg)
if retval:
return retval
else:
doc = self.__class__.__doc__ # try class docstring
if doc:
sys.stdout.write(doc + '\n')
sys.stdout.flush()
elif __doc__: # else try module docstring
sys.stdout.write(__doc__)
sys.stdout.flush()
def emptyline(self):
# Different from Cmd: don't repeat the last command for an emptyline.
pass
#---- self-test
def _test():
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
| f304646673/PhpDebugger | src/dbgp/listcmd.py | Python | apache-2.0 | 12,414 |
#!/usr/bin/env python
# Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from itertools import groupby, islice
import sys
import in_generator
import template_expander
PARAMETER_NAME = 'data'
def _trie(tags, index):
"""Make a trie from list of tags, starting at index.
Resulting trie is partly space-optimized (semi-radix tree): once have only
one string left, compact the entire branch to one leaf node.
However, does not compact branch nodes with a single child. (FIXME)
Returns:
(char, subtrie, tag, conditions): (char, trie, str, list)
code generation differs between branch nodes and leaf nodes,
hence need different data for each.
Arguments:
tags: sorted list
(sorted needed by groupby, list needed by len)
index: index at which to branch
(assumes prior to this index strings have a common prefix)
"""
def trie_node(char, subtags_iter):
# Pass in |char| so we can include in same tuple without unpacking
subtags = list(subtags_iter) # need list for len
if len(subtags) == 1: # terminal node, no subtrie
subtrie = None
tag = subtags[0]
conditions = _conditions(tag, index + 1)
else:
subtrie = _trie(subtags, index + 1)
tag = None
conditions = None
return char, subtrie, tag, conditions
# Group by char at index
def char_at_index(tag):
return tag[index].lower()
char_subtags = ((k, g) for k, g in groupby(tags, char_at_index))
# FIXME: if all subtags have a common prefix, merge with child
# and skip the switch in the generated code
return (trie_node(char, subtags) for char, subtags in char_subtags)
def _conditions(tag, index):
# boolean conditions to check suffix; corresponds to compacting branch
# with a single leaf
return ["%s[%d] == '%c'" % (PARAMETER_NAME, i, c.lower())
for i, c in islice(enumerate(tag), index, None)]
class ElementLookupTrieWriter(in_generator.Writer):
# FIXME: Inherit all these from somewhere.
defaults = {
'JSInterfaceName': None,
'constructorNeedsCreatedByParser': None,
'constructorNeedsFormElement': None,
'interfaceName': None,
'noConstructor': None,
'runtimeEnabled': None,
}
default_parameters = {
'attrsNullNamespace': None,
'export': '',
'fallbackInterfaceName': '',
'fallbackJSInterfaceName': '',
'namespace': '',
'namespacePrefix': '',
'namespaceURI': '',
}
def __init__(self, in_file_paths):
super(ElementLookupTrieWriter, self).__init__(in_file_paths)
self._tags = [entry['name'] for entry in self.in_file.name_dictionaries]
self._namespace = self.in_file.parameters['namespace'].strip('"')
self._outputs = {
(self._namespace + 'ElementLookupTrie.h'): self.generate_header,
(self._namespace + 'ElementLookupTrie.cpp'): self.generate_implementation,
}
@template_expander.use_jinja('ElementLookupTrie.h.tmpl')
def generate_header(self):
return {
'namespace': self._namespace,
}
@template_expander.use_jinja('ElementLookupTrie.cpp.tmpl')
def generate_implementation(self):
# First sort, so groupby works
self._tags.sort(key=lambda tag: (len(tag), tag))
# Group tags by length
length_tags = ((k, g) for k, g in groupby(self._tags, len))
return {
'namespace': self._namespace,
'length_tries': ((length, _trie(tags, 0))
for length, tags in length_tags),
}
if __name__ == '__main__':
in_generator.Maker(ElementLookupTrieWriter).main(sys.argv)
| zero-rp/miniblink49 | third_party/WebKit/Source/build/scripts/make_element_lookup_trie.py | Python | apache-2.0 | 5,298 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
from openerp import models, fields, api
from datetime import datetime
from dateutil.relativedelta import relativedelta
class PurchaseForecastLoad(models.TransientModel):
_name = 'purchase.forecast.load'
def _get_default_partner(self):
model = self.env.context.get('active_model', False)
record = self.env[model].browse(self.env.context.get('active_id'))
partner = False
if model == 'purchase.order':
partner = record.partner_id
return partner
def _get_default_forecast(self):
model = self.env.context.get('active_model', False)
record = self.env[model].browse(self.env.context.get('active_id'))
forecast = False
if model == 'procurement.sale.forecast':
forecast = record.id
return forecast
def _get_default_purchase(self):
model = self.env.context.get('active_model', False)
record = self.env[model].browse(self.env.context.get('active_id'))
purchase = False
if model == 'purchase.order':
purchase = record.id
return purchase
def _get_default_date_from(self):
model = self.env.context.get('active_model', False)
record = self.env[model].browse(self.env.context.get('active_id'))
date_from = False
if model == 'purchase.order':
date_from = record.date_order
elif model == 'procurement.sale.forecast':
date_from = record.date_from
return date_from
def _get_default_date_to(self):
model = self.env.context.get('active_model', False)
record = self.env[model].browse(self.env.context.get('active_id'))
date_to = False
if model == 'purchase.order':
date_to = record.date_order
elif model == 'procurement.sale.forecast':
date_to = record.date_to
return date_to
partner_id = fields.Many2one("res.partner", string="Partner",
default=_get_default_partner)
date_from = fields.Date(string="Date from", default=_get_default_date_from)
date_to = fields.Date(string="Date to", default=_get_default_date_to)
purchase_id = fields.Many2one("purchase.order", "Purchase",
default=_get_default_purchase)
forecast_id = fields.Many2one("procurement.sale.forecast", "Forecast",
default=_get_default_forecast)
product_categ_id = fields.Many2one("product.category", string="Category")
product_tmpl_id = fields.Many2one("product.template", string="Template")
product_id = fields.Many2one("product.product", string="Product")
factor = fields.Float(string="Factor", default=1)
@api.onchange('purchase_id')
def purchase_onchange(self):
if self.purchase_id:
self.partner_id = self.purchase_id.partner_id.id
self.date_from = self.purchase_id.date_order
self.date_to = self.purchase_id.date_order
@api.onchange('forecast_id')
def forecast_onchange(self):
if self.forecast_id:
self.date_from = self.forecast_id.date_from
self.date_to = self.forecast_id.date_to
@api.multi
def match_purchases_forecast(self, purchases, factor):
self.ensure_one()
forecast_line_obj = self.env['procurement.sale.forecast.line']
res = {}
for purchase in purchases:
forecast = self.forecast_id.id
partner = self.partner_id.id
product = purchase.product_id.id
forecast_lines = forecast_line_obj.search(
[('product_id', '=', product),
('partner_id', '=', partner),
('forecast_id', '=', forecast)])
if not forecast_lines:
if partner not in res:
res[partner] = {}
if product not in res[partner]:
res[partner][product] = {'qty': 0.0, 'amount': 0.0}
product_dict = res[partner][product]
sum_qty = product_dict['qty'] + purchase.product_qty
sum_subtotal = (product_dict['amount'] +
purchase.price_subtotal)
product_dict['qty'] = sum_qty * factor
product_dict['amount'] = sum_subtotal
return res
@api.multi
def get_date_list(self, forecast):
self.ensure_one()
date_list = []
date_format = '%Y-%m-%d'
date_start = datetime.strptime(forecast.date_from, date_format)
date_end = datetime.strptime(forecast.date_to, date_format)
month_count = ((date_end.year - date_start.year) * 12 +
date_end.month - date_start.month)
first_date = datetime(date_start.year, date_start.month, 1)
date_list.append(datetime.strftime(first_date, date_format))
while month_count > 0:
next_date = first_date + relativedelta(months=month_count)
date_list.append(datetime.strftime(next_date, date_format))
month_count -= 1
return date_list
@api.multi
def get_purchase_forecast_lists(self, forecast):
purchase_line_obj = self.env['purchase.order.line']
purchase_obj = self.env['purchase.order']
product_obj = self.env['product.product']
self.ensure_one()
purchases = []
if self.purchase_id:
purchases = self.purchase_id
else:
purchase_domain = [('date_order', '>=', self.date_from),
('date_order', '<=', self.date_to)]
if self.partner_id:
purchase_domain += [('partner_id', '=', self.partner_id.id)]
purchases = purchase_obj.search(purchase_domain)
purchase_line_domain = [('order_id', 'in', purchases.ids)]
if self.product_id:
purchase_line_domain += [('product_id', '=', self.product_id.id)]
elif self.product_tmpl_id:
purchase_line_domain += [('product_tmpl_id', '=',
self.product_tmpl_id.id)]
elif self.product_categ_id:
products = product_obj.search([('categ_id', '=',
self.product_categ_id.id)])
purchase_line_domain += [('product_id', 'in', products.ids)]
purchase_lines = purchase_line_obj.search(purchase_line_domain)
return purchase_lines
@api.multi
def load_purchases(self):
self.ensure_one()
date_format = '%Y-%m-%d'
forecast_line_obj = self.env['procurement.sale.forecast.line']
forecast = self.forecast_id
purchase_lines = self.get_purchase_forecast_lists(forecast)
date_list = self.get_date_list(forecast)
date_start = datetime.strptime(self.date_from, date_format)
date_end = datetime.strptime(self.date_to, date_format)
month_count = ((date_end.year - date_start.year) * 12 +
date_end.month - date_start.month + 1)
result = self.match_purchases_forecast(purchase_lines, self.factor)
for date in date_list:
for partner in result.keys():
for product in result[partner].keys():
prod_vals = result[partner][product]
forecast_line_vals = {'product_id': product,
'forecast_id': self.forecast_id.id,
'partner_id': partner,
'date': date,
'qty': (prod_vals['qty'] /
month_count),
'unit_price': (prod_vals['amount'] /
prod_vals['qty'])
}
forecast_line_obj.create(forecast_line_vals)
return True
| InakiZabala/odoomrp-wip | procurement_purchase_forecast/wizard/purchase_forecast_load.py | Python | agpl-3.0 | 8,835 |
#!/usr/bin/env python
from setuptools import setup
version = '0.0.1'
setup(
name='foursquare.pants.changed',
author='Foursquare',
author_email='pants@foursquare.com',
description='List, build or test locally changed targets',
url = 'https://github.com/foursquare/pants-changed',
version=version,
download_url='https://github.com/foursquare/pants-changed/tags/v'+version,
packages=['foursquare.pants.changed'],
namespace_packages=['foursquare', 'foursquare.pants'],
entry_points = {
'pantsbuild.plugin': [
'register_goals = foursquare.pants.changed.register:register',
]
},
keywords=['pantsbuild', 'pantsbuild plugin'],
)
| foursquare/pants-changed | setup.py | Python | apache-2.0 | 661 |
#
# Autor: Igor Nunes
# Materia: Programa Python
# Orientador : Ronaldo
# Aula de Banco Basico Delete
#
import mysql.connector
config =
{
'host':'localhost',
'port': 3306,
'database':'LojaDB',
'user':'admin',
'password':'admin'
}
db = mysql.connector.connector(**config)
cursor = db.cursor()
comando = (
"delete from LojaDB.produtos "
"where codigo = '%s' ")
)
varcodigo = input ("Digite Codigo")
#
cursor.execute(comando, varcodigo)
db.commit()
print('Jáfoi')
cursor.Close
db.close | ronas/PythonGNF | Igor/BancoDadosDelete.py | Python | gpl-3.0 | 548 |
""" Collection of helper classes and functions to reduce boilerplate code. """
from .fields import *
from .flatten import FlattenedAccess
from .serialization import Serializable, FrozenSerializable, SimpleJsonEncoder, encode
from .hparams import HyperParameters
try:
from .serialization import YamlSerializable
except ImportError:
pass
# For backward compatibility purposes
JsonSerializable = Serializable
SimpleEncoder = SimpleJsonEncoder
| lebrice/SimpleParsing | simple_parsing/helpers/__init__.py | Python | mit | 450 |
# -*- coding: utf-8 -*-
from odoo import fields, models
class Notification(models.Model):
_inherit = 'mail.notification'
notification_type = fields.Selection(selection_add=[('snail', 'Snailmail')], ondelete={'snail': 'cascade'})
letter_id = fields.Many2one('snailmail.letter', string="Snailmail Letter", index=True, ondelete='cascade')
failure_type = fields.Selection(selection_add=[
('sn_credit', "Snailmail Credit Error"),
('sn_trial', "Snailmail Trial Error"),
('sn_price', "Snailmail No Price Available"),
('sn_fields', "Snailmail Missing Required Fields"),
('sn_format', "Snailmail Format Error"),
('sn_error', "Snailmail Unknown Error"),
])
| jeremiahyan/odoo | addons/snailmail/models/mail_notification.py | Python | gpl-3.0 | 719 |
"""Definition of the SimpleContact content type
"""
from zope.interface import implements, directlyProvides
from Products.Archetypes import atapi
from Products.ATContentTypes.content import base
from Products.ATContentTypes.content import schemata
from vwcollective.simplecontact import simplecontactMessageFactory as _
from vwcollective.simplecontact.interfaces import ISimpleContact
from vwcollective.simplecontact.config import PROJECTNAME
SimpleContactSchema = schemata.ATContentTypeSchema.copy() + atapi.Schema((
# -*- Your Archetypes field definitions here ... -*-
atapi.StringField(
'profession',
storage=atapi.AnnotationStorage(),
widget=atapi.StringWidget(
label=_(u"Profession"),
description=_(u"Enter profession or accademic title"),
),
),
atapi.StringField(
'position',
storage=atapi.AnnotationStorage(),
widget=atapi.StringWidget(
label=_(u"Position"),
description=_(u"Enter position in the company"),
),
),
atapi.FileField(
'vita',
storage=atapi.AnnotationStorage(),
widget=atapi.FileWidget(
label=_(u"Curriculum vitae"),
description=_(u"Upload vita as pdf"),
),
validators=('isNonEmptyFile'),
),
atapi.ImageField(
'image',
storage=atapi.AnnotationStorage(),
swallowResizeExceptions=True,
max_size='no',
sizes={'large' : (768, 768),
'preview': (400, 400),
'mini' : (200, 200),
'thumb' : (128, 128),
'tile' : (64, 64),
'icon' : (32, 32),
'listing': (16, 16),
},
widget=atapi.ImageWidget(
label=_(u"Portait"),
description=_(u"Upload a portrait image that will be displayed in overview pages and listings"),
),
validators=('isNonEmptyFile'),
),
atapi.StringField(
'email',
storage=atapi.AnnotationStorage(),
widget=atapi.StringWidget(
label=_(u"Email"),
description=_(u"Enter a valid email address."),
),
validators=('isEmail'),
),
atapi.StringField(
'phone',
storage=atapi.AnnotationStorage(),
widget=atapi.StringWidget(
label=_(u"Phone"),
description=_(u"Field description"),
),
),
))
# Set storage on fields copied from ATContentTypeSchema, making sure
# they work well with the python bridge properties.
SimpleContactSchema['title'].storage = atapi.AnnotationStorage()
SimpleContactSchema['description'].storage = atapi.AnnotationStorage()
schemata.finalizeATCTSchema(SimpleContactSchema, moveDiscussion=False)
class SimpleContact(base.ATCTContent):
"""A simple type holding contact information"""
implements(ISimpleContact)
meta_type = "SimpleContact"
schema = SimpleContactSchema
title = atapi.ATFieldProperty('title')
description = atapi.ATFieldProperty('description')
# -*- Your ATSchema to Python Property Bridges Here ... -*-
vita = atapi.ATFieldProperty('vita')
image = atapi.ATFieldProperty('image')
email = atapi.ATFieldProperty('email')
phone = atapi.ATFieldProperty('phone')
position = atapi.ATFieldProperty('position')
profession = atapi.ATFieldProperty('profession')
def tag(self, **kargs):
"""Generate image tag"""
return self.getField('image').tag(self, **kargs)
def __bobo_traverse__(self, REQUEST, name):
"""Make image scales accessible and return the appropriate image content"""
if name.startswith('image'):
field = selg.getField('image')
image = None
if name == 'image':
image = field.getScale(self)
else:
scalename = name[len('image_'):]
if scalename in field.getAvailableSizes(self):
image = field.getScale(self, scale=scalename)
if image is not None and not isinstance(image, basestring):
return image
return super(SimpleContact, self).__bobo_traverse__(REQUEST, name)
atapi.registerType(SimpleContact, PROJECTNAME)
| vwc/agita | src/vwcollective.simplecontact/vwcollective/simplecontact/content/simplecontact.py | Python | mit | 4,207 |
# pointer variables are : v0=4, v1=5, v2=6, v3=7, v4=8, x=1, y=2, z=3
# next pointers are : left=0, parrent=2, right=1
# data values are : 0="00000001"
def get_program():
program=[
("ifx==null","00000000",1,5,1),
("ifx==null","00000001",2,5,2,"NOABSTR"),
("x=y.next","00000010",4,1,1,3,"NOABSTR"),
("x=y.next","00000011",5,2,1,4,"NOABSTR"),
("ifx==y","00000100",4,5,5,10,"NOABSTR"),
("x=y.next","00000101",4,2,1,6,"NOABSTR"),
("x=y.next","00000110",5,3,1,7,"NOABSTR"),
("x=y.next","00000111",6,5,1,8,"NOABSTR"),
("x=y.next","00001000",7,6,1,9,"NOABSTR"),
("ifx==y","00001001",4,7,23,10,"NOABSTR"),
("ifx==null","00001010",1,18,11),
("ifx==null","00001011",2,18,12,"NOABSTR"),
("x=y.next","00001100",4,1,1,13,"NOABSTR"),
("x=y.next","00001101",5,4,1,14,"NOABSTR"),
("x=y.next","00001110",6,5,2,15,"NOABSTR"),
("x=y.next","00001111",7,2,2,16,"NOABSTR"),
("x=y.next","00010000",8,7,0,17,"NOABSTR"),
("ifx==y","00010001",6,8,22,18,"NOABSTR"),
("x=y.next","00010010",4,2,1,19,"NOABSTR"),
("ifdata","00010011",4,"00000001",22,20,"NOABSTR"),
("x=y.next","00010100",2,1,1,21,"NOABSTR"),
("goto","00010101",10,"NOABSTR"),
("goto","00010110",0,"NOABSTR"),
("exit","00010111","NOABSTR")]
node_width=26
pointer_num=9
desc_num=7
next_num=3
err_line="11111111"
restrict_var=1
env=(node_width, pointer_num, desc_num, next_num, err_line,restrict_var)
return(program, env) | marusak/C2ARTMC | tests/test_dirs/complex_expression/expected_program.py | Python | gpl-3.0 | 1,584 |
# This is a copy of fetch/api/resources/echo-content.py since it's more
# convenient in this directory due to service worker's path restriction.
def main(request, response):
headers = [("X-Request-Method", request.method),
("X-Request-Content-Length", request.headers.get("Content-Length", "NO")),
("X-Request-Content-Type", request.headers.get("Content-Type", "NO")),
# Avoid any kind of content sniffing on the response.
("Content-Type", "text/plain")]
content = request.body
return headers, content
| anthgur/servo | tests/wpt/web-platform-tests/service-workers/service-worker/resources/echo-content.py | Python | mpl-2.0 | 578 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_python-boilerplate
----------------------------------
Tests for `python-boilerplate` module.
"""
import unittest
from python-boilerplate import python-boilerplate
class TestPython-boilerplate(unittest.TestCase):
def setUp(self):
pass
def test_something(self):
pass
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main()
| jlant/playground | python/hello-cookiecutter/python-boilerplate/tests/test_python-boilerplate.py | Python | mit | 438 |
from django import forms
from django.contrib import admin
from django.contrib.admin.util import unquote
from django.conf.urls.defaults import patterns, url
from django.contrib.contenttypes import generic
from django.core import mail
from django.core.context_processors import csrf
from django.core.urlresolvers import reverse, NoReverseMatch
from django.db import models
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response
from django.utils import translation
from django.utils.translation import ugettext_lazy as _
import smtplib
from pennyblack import settings
import datetime
try:
from django.utils import timezone
except ImportError:
now = datetime.datetime.now
else:
now = timezone.now
#-----------------------------------------------------------------------------
# Job
#-----------------------------------------------------------------------------
class Job(models.Model):
"""A bunch of participants which receive a newsletter"""
newsletter = models.ForeignKey('pennyblack.Newsletter', related_name="jobs", null=True)
status = models.IntegerField(choices=settings.JOB_STATUS, default=1)
date_created = models.DateTimeField(verbose_name=_("created"), default=now)
date_deliver_start = models.DateTimeField(blank=True, null=True, verbose_name=_("started delivering"), default=None)
date_deliver_finished = models.DateTimeField(blank=True, null=True, verbose_name=_("finished delivering"), default=None)
content_type = models.ForeignKey('contenttypes.ContentType', null=True, blank=True)
object_id = models.PositiveIntegerField(null=True, blank=True)
group_object = generic.GenericForeignKey('content_type', 'object_id')
collection = models.TextField(blank=True)
#ga tracking
utm_campaign = models.SlugField(verbose_name=_("utm campaign"), blank=True)
public_slug = models.SlugField(verbose_name=_("slug"), unique=True,
help_text=_("Unique slug to allow public access to the newsletter"),
blank=True, null=True)
class Meta:
ordering = ('-date_created',)
verbose_name = _("newsletter delivery task")
verbose_name_plural = _("newsletter delivery tasks")
app_label = 'pennyblack'
def __unicode__(self):
return (self.newsletter.subject if self.newsletter is not None else "unasigned delivery task")
def clean(self, *args, **kwargs):
self.public_slug = self.public_slug.strip()
if self.public_slug == "":
self.public_slug = None
super(Job, self).clean(*args, **kwargs)
def delete(self, *args, **kwargs):
"""
If the job refers to a inactive Newsletter delete it.
"""
if not self.newsletter.active:
self.newsletter.delete()
super(Job, self).delete(*args, **kwargs)
@property
def public_url(self):
try:
if self.public_slug:
return self.newsletter.get_base_url() + reverse('pennyblack.views.view_public', args=(self.public_slug,))
except (NoReverseMatch,):
pass
return None
@property
def count_mails_total(self):
return self.mails.count()
@property
def count_mails_sent(self):
return self.mails.filter(sent=True).count()
@property
def percentage_mails_sent(self):
if self.count_mails_total == 0:
return 0
return round(float(self.count_mails_sent) / float(self.count_mails_total) * 100, 1)
@property
def count_mails_viewed(self):
return self.mails.exclude(viewed=None).count()
@property
def count_mails_delivered(self):
return self.count_mails_sent - self.count_mails_bounced
@property
def percentage_mails_viewed(self):
if self.count_mails_delivered == 0:
return 0
return round(float(self.count_mails_viewed) / self.count_mails_delivered * 100, 1)
@property
def count_mails_bounced(self):
return self.mails.filter(bounced=True).count()
@property
def count_mails_clicked(self):
return self.mails.filter(clicks__isnull=False).count()
@property
def percentage_mails_clicked(self):
if self.count_mails_delivered == 0:
return 0
return round(float(self.count_mails_clicked) / float(self.count_mails_delivered) * 100, 1)
@property
def percentage_mails_bounced(self):
if self.count_mails_sent == 0:
return 0
return round(float(self.count_mails_bounced) / float(self.count_mails_sent) * 100, 1)
# fields
def field_mails_sent(self):
return self.count_mails_sent
field_mails_sent.short_description = _('# of mails sent')
def field_opening_rate(self):
return '%s%%' % self.percentage_mails_viewed
field_opening_rate.short_description = _('opening rate')
def field_mails_total(self):
return self.count_mails_total
field_mails_total.short_description = _('# of mails')
def can_send(self):
"""
Is used to determine if a send button should be displayed.
"""
if not self.status in settings.JOB_STATUS_CAN_SEND:
return False
return self.is_valid()
def can_view_public(self):
"""
Used to determine if a job's newsletter can be viewed publically
"""
if not self.status in settings.JOB_STATUS_CAN_VIEW_PUBLIC:
return False
return self.is_valid()
def is_valid(self):
if self.newsletter is None or not self.newsletter.is_valid():
return False
return True
def create_mails(self, queryset):
"""
Create mails for every NewsletterReceiverMixin in queryset.
"""
if hasattr(queryset, 'iterator') and callable(queryset.iterator):
for receiver in queryset.iterator():
self.create_mail(receiver)
else:
for receiver in queryset:
self.create_mail(receiver)
def create_mail(self, receiver):
"""
Creates a single mail. This is also used in workflow mail send process.
receiver has to implement all the methods from NewsletterReceiverMixin
"""
return self.mails.create(person=receiver)
def add_link(self, link, identifier=''):
"""
Adds a link and returns a replacement link
"""
if identifier != '':
try:
return self.links.get(identifier=identifier)
except self.links.model.DoesNotExist:
return self.links.create(link_target='', identifier=identifier)
# clean link from htmlentities
for old, new in (('&', '&'), ('<', '<'), ('>', '>'), ('"', '"')):
link = link.replace(old, new)
link = self.links.create(link_target=link)
link.save()
return '{{base_url}}' + reverse('pennyblack.redirect_link', kwargs={'mail_hash': '{{mail.mail_hash}}', 'link_hash': link.link_hash}).replace('%7B', '{').replace('%7D', '}')
def start_sending(self):
self.status = 11
self.save()
try:
from pennyblack.tasks import SendJobTask
except ImportError:
pass
else:
SendJobTask.delay(self.id)
def send(self):
"""
Sends every pending e-mail in the job.
"""
self.newsletter = self.newsletter.create_snapshot()
self.newsletter.replace_links(self)
self.newsletter.prepare_to_send()
self.status = 21
self.date_deliver_start = now()
self.save()
try:
translation.activate(self.newsletter.language)
connection = mail.get_connection()
connection.open()
for newsletter_mail in self.mails.filter(sent=False).iterator():
try:
connection.send_messages([newsletter_mail.get_message()])
except smtplib.SMTPRecipientsRefused as e:
newsletter_mail.bounce()
else:
newsletter_mail.mark_sent()
connection.close()
except:
self.status = 41
self.save()
raise
else:
self.status = 31
self.date_deliver_finished = now()
self.save()
class JobStatistic(Job):
class Meta:
proxy = True
verbose_name = _("statistic")
verbose_name_plural = _("statistics")
app_label = 'pennyblack'
class JobAdminForm(forms.ModelForm):
from pennyblack.models.newsletter import Newsletter
newsletter = forms.ModelChoiceField(queryset=Newsletter.objects.massmail())
class JobAdmin(admin.ModelAdmin):
from pennyblack.models.link import LinkInline
from pennyblack.models.mail import MailInline
date_hierarchy = 'date_deliver_start'
actions = None
list_display = ('newsletter', 'group_object', 'status', 'public_slug', 'field_mails_total', 'field_mails_sent', 'date_created')
list_filter = ('status', 'newsletter',)
fields = ('newsletter', 'collection', 'status', 'group_object', 'field_mails_total', 'field_mails_sent', 'date_deliver_start', 'date_deliver_finished', 'public_slug', 'utm_campaign')
readonly_fields = ('collection', 'status', 'group_object', 'field_mails_total', 'field_mails_sent', 'date_deliver_start', 'date_deliver_finished',)
inlines = (LinkInline, MailInline,)
massmail_form = JobAdminForm
def get_form(self, request, obj=None, **kwargs):
if obj and obj.status in settings.JOB_STATUS_CAN_EDIT:
kwargs['form'] = self.massmail_form
return super(JobAdmin, self).get_form(request, obj, **kwargs)
def get_readonly_fields(self, request, obj=None):
if obj and obj.status in settings.JOB_STATUS_CAN_EDIT:
return self.readonly_fields
else:
return self.readonly_fields + ('newsletter',)
def change_view(self, request, object_id, extra_context={}):
obj = self.get_object(request, unquote(object_id))
extra_context['can_send'] = obj.can_send()
request._pennyblack_job_obj = obj # add object to request for the mail inline
return super(JobAdmin, self).change_view(request, object_id, extra_context=extra_context)
def send_newsletter_view(self, request, object_id):
obj = self.get_object(request, unquote(object_id))
if request.method == 'POST' and "_send" in request.POST:
obj.start_sending()
self.message_user(request, _("Newsletter has been marked for delivery."))
return HttpResponseRedirect(reverse('admin:%s_%s_changelist' % (self.model._meta.app_label, self.model._meta.module_name)))
def response_change(self, request, obj):
"""
Determines the HttpResponse for the change_view stage.
"""
if "_send_prepare" in request.POST:
context = {
'object': obj,
'opts': self.model._meta,
'app_label': self.model._meta.app_label,
}
context.update(csrf(request))
return render_to_response(
'admin/pennyblack/job/send_confirmation.html', context)
return super(JobAdmin, self).response_change(request, obj)
def get_urls(self):
urls = super(JobAdmin, self).get_urls()
info = self.model._meta.app_label, self.model._meta.module_name
my_urls = patterns('',
url(r'^(?P<object_id>\d+)/send/$', self.admin_site.admin_view(self.send_newsletter_view), name=('%s_%s_send' % info)),
)
return my_urls + urls
def has_add_permission(self, request):
return False
def has_delete_permission(self, request, obj=None):
return False
class JobStatisticAdmin(admin.ModelAdmin):
date_hierarchy = 'date_deliver_start'
actions = None
list_display = ('newsletter', 'group_object', 'field_mails_total', 'field_mails_sent', 'field_opening_rate', 'date_created')
# list_filter = ('status', 'newsletter',)
fields = ('newsletter', 'collection', 'group_object', 'date_deliver_start', 'date_deliver_finished', 'utm_campaign')
readonly_fields = ('newsletter', 'collection', 'group_object', 'date_deliver_start', 'date_deliver_finished', 'utm_campaign')
def queryset(self, request):
return self.model.objects.exclude(status=1)
def has_add_permission(self, request):
return False
def has_delete_permission(self, request, obj=None):
return False
def get_graph_data(self, obj):
date_start = obj.date_deliver_start.replace(minute=0, second=0, microsecond=0)
opened_serie = []
for i in range(336):
t = date_start + datetime.timedelta(hours=i)
count_opened = obj.mails.exclude(viewed=None).filter(viewed__lt=t).count()
opened_serie.append('[%s000,%s]' % (t.strftime('%s'), count_opened))
if t > now():
break
return {
'opened_serie': ','.join(opened_serie),
}
def change_view(self, request, object_id, extra_context={}):
obj = self.get_object(request, unquote(object_id))
graph_data = self.get_graph_data(obj)
extra_context.update(graph_data)
return super(JobStatisticAdmin, self).change_view(request, object_id, extra_context=extra_context)
def email_list_view(self, request, object_id):
obj = self.get_object(request, unquote(object_id))
context = {
'object': obj,
'opts': self.model._meta,
'app_label': self.model._meta.app_label,
}
return render_to_response('admin/pennyblack/jobstatistic/email_list.html', context)
def user_agents_view(self, request, object_id):
from pennyblack.models import EmailClient
obj = self.get_object(request, unquote(object_id))
user_agents = EmailClient.objects.filter(mail__job__id=obj.id).values('user_agent').annotate(count=models.Count('user_agent')).order_by('-count')
context = {
'object': obj,
'opts': self.model._meta,
'app_label': self.model._meta.app_label,
'user_agents': user_agents
}
return render_to_response('admin/pennyblack/jobstatistic/user_agents.html', context)
def get_urls(self):
urls = super(JobStatisticAdmin, self).get_urls()
info = self.model._meta.app_label, self.model._meta.module_name
my_urls = patterns('',
url(r'^(?P<object_id>\d+)/email-list/$', self.admin_site.admin_view(self.email_list_view), name='%s_%s_email_list' % info),
url(r'^(?P<object_id>\d+)/user-agents/$', self.admin_site.admin_view(self.user_agents_view), name='%s_%s_user_agents' % info),
)
return my_urls + urls
| nickburlett/pennyblack | pennyblack/models/job.py | Python | bsd-3-clause | 14,888 |
import math
import torch
from .Module import Module
class TemporalConvolution(Module):
def __init__(self, inputFrameSize, outputFrameSize, kW, dW=1):
super(TemporalConvolution, self).__init__()
self.inputFrameSize = inputFrameSize
self.outputFrameSize = outputFrameSize
self.kW = kW
self.dW = dW
self.weight = torch.Tensor(outputFrameSize, inputFrameSize * kW)
self.bias = torch.Tensor(outputFrameSize)
self.gradWeight = torch.Tensor(outputFrameSize, inputFrameSize * kW)
self.gradBias = torch.Tensor(outputFrameSize)
self.reset()
def reset(self, stdv=None):
if stdv is not None:
stdv = stdv * math.sqrt(3)
else:
stdv = 1. / math.sqrt(self.kW * self.inputFrameSize)
self.weight.uniform_(-stdv, stdv)
self.bias.uniform_(-stdv, stdv)
def updateOutput(self, input):
self._backend.TemporalConvolution_updateOutput(
self._backend.library_state,
input,
self.output,
self.weight,
self.bias,
self.kW,
self.dW,
self.inputFrameSize,
self.outputFrameSize
)
return self.output
def updateGradInput(self, input, gradOutput):
if self.gradInput is None:
return
self._backend.TemporalConvolution_updateGradInput(
self._backend.library_state,
input,
gradOutput,
self.gradInput,
self.weight,
self.kW,
self.dW
)
return self.gradInput
def accGradParameters(self, input, gradOutput, scale=1):
self._backend.TemporalConvolution_accGradParameters(
self._backend.library_state,
input,
gradOutput,
self.gradWeight,
self.gradBias,
self.kW,
self.dW,
scale
)
| RPGOne/Skynet | pytorch-master/torch/legacy/nn/TemporalConvolution.py | Python | bsd-3-clause | 1,969 |
from os import urandom
from random import seed, choice
import lib.args
@lib.args.convert(n=int)
def new(n, forbidden=''):
seed(urandom(n))
allowed = "azertyuiopqsdfghjklmwxcvbnAZERTYUIOPQSDFGHJKLMWXCVBN`!$%^&*()_+-=;,./<>?1234567890'\"§èçé#@|{}àùµ"
symbols = list(frozenset(allowed) - frozenset(forbidden))
print("".join([choice(symbols) for i in range(n)]))
@lib.args.convert(n=int)
def bytes(n):
print(" ".join(map(lambda x: hex(int(x))[2:], urandom(n))))
def hexadecimal(password):
print(" ".join(list(map(lambda x: hex(ord(x))[2:], password))))
def ascii(*bytes):
print("".join(list(map(lambda x: chr(int(x, 16)), bytes))))
| aureooms/sak | sak/password.py | Python | agpl-3.0 | 679 |
import os
import github3
import pytest
from github3 import repos
from tests.utils import (BaseCase, load, mock)
class TestRepository(BaseCase):
def __init__(self, methodName='runTest'):
super(TestRepository, self).__init__(methodName)
self.repo = repos.Repository(load('repo'))
def setUp(self):
super(TestRepository, self).setUp()
self.repo = repos.Repository(self.repo.as_dict(), self.g)
self.api = 'https://api.github.com/repos/sigmavirus24/github3.py/'
def test_create_fork(self):
self.response('repo', 202)
self.conf = {'data': None}
self.post(self.api + 'forks')
self.assertRaises(github3.GitHubError, self.repo.create_fork)
self.login()
assert isinstance(self.repo.create_fork(), repos.Repository)
self.mock_assertions()
self.conf['data'] = {'organization': 'github3py'}
assert isinstance(self.repo.create_fork('github3py'), repos.Repository)
self.mock_assertions()
def test_create_hook(self):
self.response('hook', 201)
self.post(self.api + 'hooks')
self.conf = {
'data': {
'name': 'Hookname',
'config': {
'foo': 'bar'
}
}
}
self.assertRaises(github3.GitHubError, self.repo.create_hook,
None, None)
self.login()
h = self.repo.create_hook(**self.conf['data'])
assert isinstance(h, repos.hook.Hook)
self.mock_assertions()
def test_create_issue(self):
self.response('issue', 201)
title = 'Construct _api attribute on our own'
self.post(self.api + 'issues')
self.conf = {'data': {'title': title}}
self.assertRaises(github3.GitHubError, self.repo.create_issue, title)
self.login()
assert isinstance(self.repo.create_issue(title), github3.issues.Issue)
self.mock_assertions()
body = 'Fake body'
#self.conf['data'].update(body=body)
assert isinstance(self.repo.create_issue(title, body),
github3.issues.Issue)
self.mock_assertions()
assignee, mile, labels = 'sigmavirus24', 1, ['bug', 'enhancement']
#self.conf['data'].update({'assignee': assignee, 'milestone': mile,
# 'labels': labels})
issue = self.repo.create_issue(title, body, assignee, mile, labels)
assert isinstance(issue, github3.issues.Issue)
self.mock_assertions()
def test_create_key(self):
self.response('key', 201)
self.post(self.api + 'keys')
self.conf = {'data': {'key': 'ssh-rsa foobarbogus',
'title': 'Fake key'}}
self.assertRaises(github3.GitHubError, self.repo.create_key,
**self.conf['data'])
self.login()
assert isinstance(self.repo.create_key(**self.conf['data']),
github3.users.Key)
self.mock_assertions()
def test_create_label(self):
self.response('label', 201)
self.post(self.api + 'labels')
self.conf = {'data': {'name': 'foo', 'color': 'f00f00'}}
self.assertRaises(github3.GitHubError, self.repo.create_label,
**self.conf['data'])
self.login()
assert isinstance(self.repo.create_label(**self.conf['data']),
github3.issues.label.Label)
self.mock_assertions()
def test_create_milestone(self):
self.response('milestone', 201)
self.post(self.api + 'milestones')
self.conf = {'data': {'title': 'foo'}}
self.assertRaises(github3.GitHubError, self.repo.create_milestone,
**self.conf['data'])
self.login()
assert isinstance(self.repo.create_milestone('foo'),
github3.issues.milestone.Milestone)
self.mock_assertions()
def test_create_pull(self):
self.response('pull', 201)
self.post(self.api + 'pulls')
self.conf = {'data': {'title': 'Fake title', 'base': 'master',
'head': 'feature_branch'}}
self.assertRaises(github3.GitHubError, self.repo.create_pull,
**self.conf['data'])
self.login()
assert isinstance(self.repo.create_pull(**self.conf['data']),
github3.pulls.PullRequest)
self.mock_assertions()
def test_create_pull_from_issue(self):
self.response('pull', 201)
self.post(self.api + 'pulls')
self.conf = {'data': {'issue': 1, 'base': 'master',
'head': 'feature_branch'}}
self.assertRaises(github3.GitHubError,
self.repo.create_pull_from_issue,
**self.conf['data'])
self.login()
pull = self.repo.create_pull_from_issue(**self.conf['data'])
assert isinstance(pull, github3.pulls.PullRequest)
self.mock_assertions()
def test_create_status(self):
self.response('status', 201)
self.post(self.api + 'statuses/fakesha')
self.conf = {'data': {'state': 'success'}}
self.assertRaises(github3.GitHubError, self.repo.create_status,
'fakesha', 'success')
self.login()
s = self.repo.create_status('fakesha', 'success')
assert isinstance(s, repos.status.Status)
assert repr(s) > ''
self.mock_assertions()
def test_delete(self):
self.response('', 204)
self.delete(self.api[:-1])
self.conf = {}
self.assertRaises(github3.GitHubError, self.repo.delete)
self.login()
assert self.repo.delete()
self.mock_assertions()
def test_delete_key(self):
self.response('', 204)
self.delete(self.api + 'keys/2')
self.conf = {}
self.assertRaises(github3.GitHubError, self.repo.delete_key, 2)
self.login()
assert self.repo.delete_key(-2) is False
self.not_called()
assert self.repo.delete_key(2)
self.mock_assertions()
def test_delete_subscription(self):
self.response('', 204)
self.delete(self.api + 'subscription')
self.assertRaises(github3.GitHubError, self.repo.delete_subscription)
self.not_called()
self.login()
assert self.repo.delete_subscription()
self.mock_assertions()
def test_edit(self):
self.response('repo')
self.patch(self.api[:-1])
self.conf = {'data': {'name': 'foo'}}
self.assertRaises(github3.GitHubError, self.repo.edit, 'Foo')
self.login()
assert self.repo.edit(None) is False
self.not_called()
assert self.repo.edit('foo')
self.mock_assertions()
self.conf['data']['description'] = 'bar'
assert self.repo.edit(**self.conf['data'])
self.mock_assertions()
def test_is_collaborator(self):
self.response('', 204)
self.get(self.api + 'collaborators/user')
assert self.repo.is_collaborator(None) is False
self.not_called()
assert self.repo.is_collaborator('user')
self.mock_assertions()
def test_git_commit(self):
self.response('git_commit')
self.get(self.api + 'git/commits/fakesha')
assert isinstance(self.repo.git_commit('fakesha'), github3.git.Commit)
self.mock_assertions()
def test_hook(self):
self.response('hook')
self.get(self.api + 'hooks/2')
self.assertRaises(github3.GitHubError, self.repo.hook, 2)
self.login()
assert isinstance(self.repo.hook(2), repos.hook.Hook)
self.mock_assertions()
def test_is_assignee(self):
self.response('', 204)
self.get(self.api + 'assignees/login')
assert self.repo.is_assignee(None) is False
self.not_called()
assert self.repo.is_assignee('login')
self.mock_assertions()
def test_issue(self):
self.response('issue')
self.get(self.api + 'issues/2')
assert isinstance(self.repo.issue(2), github3.issues.Issue)
self.mock_assertions()
def test_label(self):
self.response('label')
self.get(self.api + 'labels/name')
assert isinstance(self.repo.label('name'), github3.issues.label.Label)
self.mock_assertions()
def test_mark_notifications(self):
self.response('', 205)
self.put(self.api + 'notifications')
self.conf = {'data': {'read': True}}
self.assertRaises(github3.GitHubError, self.repo.mark_notifications)
self.not_called()
self.login()
assert self.repo.mark_notifications()
self.mock_assertions()
assert self.repo.mark_notifications('2013-01-18T19:53:04Z')
self.conf['data']['last_read_at'] = '2013-01-18T19:53:04Z'
self.mock_assertions()
def test_merge(self):
self.response('commit', 201)
self.post(self.api + 'merges')
self.conf = {'data': {'base': 'master', 'head': 'sigma/feature'}}
self.assertRaises(github3.GitHubError, self.repo.merge, 'foo', 'bar')
self.not_called()
self.login()
assert isinstance(self.repo.merge('master', 'sigma/feature'),
repos.commit.RepoCommit)
self.mock_assertions()
self.conf['data']['commit_message'] = 'Commit message'
self.repo.merge('master', 'sigma/feature', 'Commit message')
self.mock_assertions()
def test_milestone(self):
self.response('milestone', 200)
self.get(self.api + 'milestones/2')
assert isinstance(self.repo.milestone(2),
github3.issues.milestone.Milestone)
self.mock_assertions()
def test_parent(self):
json = self.repo.as_dict().copy()
json['parent'] = json.copy()
r = repos.Repository(json)
assert isinstance(r.parent, repos.Repository)
def test_permissions(self):
json = load('repo')
permissions = {"admin": True, "push": True, "pull": True}
assert json['permissions'] == permissions
assert self.repo.permissions == permissions
def test_pull_request(self):
self.response('pull', 200)
self.get(self.api + 'pulls/2')
assert isinstance(self.repo.pull_request(2), github3.pulls.PullRequest)
self.mock_assertions()
def test_readme(self):
self.response('readme', 200)
self.get(self.api + 'readme')
assert isinstance(self.repo.readme(), repos.contents.Contents)
self.mock_assertions()
def test_ref(self):
self.response('ref', 200)
self.get(self.api + 'git/refs/fakesha')
assert isinstance(self.repo.ref('fakesha'), github3.git.Reference)
self.mock_assertions()
def test_remove_collaborator(self):
self.response('', 204)
self.delete(self.api + 'collaborators/login')
self.assertRaises(github3.GitHubError, self.repo.remove_collaborator,
None)
self.not_called()
self.login()
assert self.repo.remove_collaborator(None) is False
self.not_called()
assert self.repo.remove_collaborator('login')
self.mock_assertions()
def test_repr(self):
assert repr(self.repo) == '<Repository [sigmavirus24/github3.py]>'
def test_source(self):
json = self.repo.as_dict().copy()
json['source'] = json.copy()
r = repos.Repository(json)
assert isinstance(r.source, repos.Repository)
def test_subscription(self):
self.response('subscription')
self.get(self.api + 'subscription')
self.assertRaises(github3.GitHubError, self.repo.subscription)
self.not_called()
self.login()
s = self.repo.subscription()
assert isinstance(s, github3.notifications.Subscription)
self.mock_assertions()
def test_tag(self):
self.response('tag')
self.get(self.api + 'git/tags/fakesha')
assert isinstance(self.repo.tag('fakesha'), github3.git.Tag)
self.mock_assertions()
def test_tree(self):
self.response('tree')
self.get(self.api + 'git/trees/fakesha')
assert isinstance(self.repo.tree('fakesha'), github3.git.Tree)
self.mock_assertions()
def test_update_label(self):
self.response('label')
self.patch(self.api + 'labels/Bug')
self.conf = {'data': {'name': 'big_bug', 'color': 'fafafa'}}
self.assertRaises(github3.GitHubError, self.repo.update_label,
'foo', 'bar')
self.not_called()
self.login()
with mock.patch.object(repos.Repository, 'label') as l:
l.return_value = None
assert self.repo.update_label('foo', 'bar') is False
self.not_called()
with mock.patch.object(repos.Repository, 'label') as l:
l.return_value = github3.issues.label.Label(load('label'), self.g)
assert self.repo.update_label('big_bug', 'fafafa')
self.mock_assertions()
def test_equality(self):
assert self.repo == repos.Repository(load('repo'))
def test_create_file(self):
self.response('create_content', 201)
self.put(self.api + 'contents/setup.py')
self.conf = {'data': {'message': 'Foo bar',
'content': 'Zm9vIGJhciBib2d1cw==',
'branch': 'develop',
'author': {'name': 'Ian', 'email': 'foo'},
'committer': {'name': 'Ian', 'email': 'foo'}}}
self.assertRaises(github3.GitHubError, self.repo.create_file,
None, None, None)
self.not_called()
self.login()
ret = self.repo.create_file('setup.py', 'Foo bar', b'foo bar bogus',
'develop',
{'name': 'Ian', 'email': 'foo'},
{'name': 'Ian', 'email': 'foo'})
assert isinstance(ret, dict)
assert isinstance(ret['commit'], github3.git.Commit)
assert isinstance(ret['content'], repos.contents.Contents)
self.mock_assertions()
def test_weekly_commit_count(self):
self.response('weekly_commit_count', ETag='"foobarbogus"')
self.request.return_value.headers['Last-Modified'] = 'foo'
self.get(self.api + 'stats/participation')
w = self.repo.weekly_commit_count()
self.assertTrue(w.get('owner') is not None)
self.assertTrue(w.get('all') is not None)
self.mock_assertions()
self.response('', 202)
w = self.repo.weekly_commit_count()
self.assertEqual(w, {})
self.mock_assertions()
class TestContents(BaseCase):
def __init__(self, methodName='runTest'):
super(TestContents, self).__init__(methodName)
self.contents = repos.contents.Contents(load('readme'))
self.api = self.contents._api
def setUp(self):
super(TestContents, self).setUp()
self.contents = repos.contents.Contents(self.contents.as_dict(),
self.g)
def test_equality(self):
contents = repos.contents.Contents(load('readme'))
assert self.contents == contents
contents.sha = 'fakesha'
assert self.contents != contents
def test_git_url(self):
assert self.contents.links['git'] == self.contents.git_url
def test_html_url(self):
assert self.contents.links['html'] == self.contents.html_url
def test_repr(self):
assert repr(self.contents) == '<Content [{0}]>'.format('README.rst')
@pytest.mark.xfail
def test_delete(self):
self.response('create_content', 200)
self.delete(self.api)
self.conf = {
'data': {
'message': 'foo',
'sha': self.contents.sha,
}
}
self.assertRaises(github3.GitHubError, self.contents.delete, None)
self.not_called()
self.login()
c = self.contents.delete('foo')
assert isinstance(c, github3.git.Commit)
self.mock_assertions()
@pytest.mark.xfail
def test_update(self):
self.response('create_content', 200)
self.put(self.api)
self.conf = {
'data': {
'message': 'foo',
'content': 'Zm9vIGJhciBib2d1cw==',
'sha': self.contents.sha,
}
}
self.assertRaises(github3.GitHubError, self.contents.update,
None, None)
self.not_called()
self.login()
ret = self.contents.update('foo', b'foo bar bogus')
assert isinstance(ret, github3.git.Commit)
self.mock_assertions()
class TestHook(BaseCase):
def __init__(self, methodName='runTest'):
super(TestHook, self).__init__(methodName)
self.hook = repos.hook.Hook(load('hook'))
self.api = ("https://api.github.com/repos/sigmavirus24/github3.py/"
"hooks/292492")
def setUp(self):
super(TestHook, self).setUp()
self.hook = repos.hook.Hook(self.hook.as_dict(), self.g)
def test_equality(self):
h = repos.hook.Hook(load('hook'))
assert self.hook == h
h._uniq = 1
assert self.hook != h
def test_repr(self):
assert repr(self.hook) == '<Hook [readthedocs]>'
def test_delete(self):
self.response('', 204)
self.delete(self.api)
self.assertRaises(github3.GitHubError, self.hook.delete)
self.not_called()
self.login()
assert self.hook.delete()
self.mock_assertions()
def test_edit(self):
self.response('hook', 200)
self.patch(self.api)
data = {
'config': {'push': 'http://example.com'},
'events': ['push'],
'add_events': ['fake_ev'],
'rm_events': ['fake_ev'],
'active': True,
}
self.conf = {'data': data.copy()}
self.conf['data']['remove_events'] = data['rm_events']
del(self.conf['data']['rm_events'])
self.assertRaises(github3.GitHubError, self.hook.edit, **data)
self.login()
self.not_called()
assert self.hook.edit(**data)
self.mock_assertions()
def test_edit_failed(self):
self.response('', 404)
self.patch(self.api)
self.conf = {}
self.login()
assert self.hook.edit() is False
self.mock_assertions()
def test_test(self):
# Funny name, no?
self.response('', 204)
self.post(self.api + '/tests')
self.conf = {}
self.assertRaises(github3.GitHubError, self.hook.test)
self.not_called()
self.login()
assert self.hook.test()
self.mock_assertions()
def test_ping(self):
# Funny name, no?
self.response('', 204)
self.post(self.api + '/pings')
self.conf = {}
self.assertRaises(github3.GitHubError, self.hook.ping)
self.not_called()
self.login()
assert self.hook.ping()
self.mock_assertions()
class TestRepoComment(BaseCase):
def __init__(self, methodName='runTest'):
super(TestRepoComment, self).__init__(methodName)
self.comment = repos.comment.RepoComment(load('repo_comment'))
self.api = ("https://api.github.com/repos/sigmavirus24/github3.py/"
"comments/1380832")
def setUp(self):
super(TestRepoComment, self).setUp()
self.comment = repos.comment.RepoComment(self.comment.as_dict(),
self.g)
def test_delete(self):
self.response('', 204)
self.delete(self.api)
self.assertRaises(github3.GitHubError, self.comment.delete)
self.not_called()
self.login()
assert self.comment.delete()
self.mock_assertions()
def test_repr(self):
assert repr(self.comment).startswith('<Repository Comment')
def test_update(self):
self.post(self.api)
self.response('repo_comment', 200)
self.conf = {'data': {'body': 'This is a comment body'}}
self.assertRaises(github3.GitHubError, self.comment.update, 'foo')
self.login()
assert self.comment.update(None) is False
self.not_called()
assert self.comment.update('This is a comment body')
self.mock_assertions()
class TestRepoCommit(BaseCase):
def __init__(self, methodName='runTest'):
super(TestRepoCommit, self).__init__(methodName)
self.commit = repos.commit.RepoCommit(load('commit'))
self.api = ("https://api.github.com/repos/sigmavirus24/github3.py/"
"commits/76dcc6cb4b9860034be81b7e58adc286a115aa97")
def test_equality(self):
c = repos.commit.RepoCommit(load('commit'))
assert self.commit == c
c._uniq = 'fake'
assert self.commit != c
def test_repr(self):
assert repr(self.commit).startswith('<Repository Commit')
def test_diff(self):
self.response('archive', 200)
self.get(self.api)
self.conf.update(headers={'Accept': 'application/vnd.github.diff'})
assert self.commit.diff().startswith(b'archive_data')
self.mock_assertions()
def test_patch(self):
self.response('archive', 200)
self.get(self.api)
self.conf.update(headers={'Accept': 'application/vnd.github.patch'})
assert self.commit.patch().startswith(b'archive_data')
self.mock_assertions()
class TestComparison(BaseCase):
def __init__(self, methodName='runTest'):
super(TestComparison, self).__init__(methodName)
self.comp = repos.comparison.Comparison(load('comparison'))
self.api = ("https://api.github.com/repos/sigmavirus24/github3.py/"
"compare/a811e1a270f65eecb65755eca38d888cbefcb0a7..."
"76dcc6cb4b9860034be81b7e58adc286a115aa97")
def test_repr(self):
assert repr(self.comp).startswith('<Comparison ')
def test_equality(self):
comp = repos.comparison.Comparison(load('comparison'))
assert self.comp == comp
comp.commits.pop(0)
assert self.comp != comp
def test_diff(self):
self.response('archive', 200)
self.get(self.api)
self.conf.update(headers={'Accept': 'application/vnd.github.diff'})
assert self.comp.diff().startswith(b'archive_data')
self.mock_assertions()
def test_patch(self):
self.response('archive', 200)
self.get(self.api)
self.conf.update(headers={'Accept': 'application/vnd.github.patch'})
assert self.comp.patch().startswith(b'archive_data')
self.mock_assertions()
class TestAsset(BaseCase):
def __init__(self, methodName='runTest'):
super(TestAsset, self).__init__(methodName)
self.asset = repos.release.Asset(load('asset'))
self.api = ("https://api.github.com/repos/sigmavirus24/github3.py/"
"releases/assets/37945")
def test_repr(self):
assert repr(self.asset) == '<Asset [github3.py-0.7.1.tar.gz]>'
@pytest.mark.xfail
def test_download(self):
headers = {'content-disposition': 'filename=foo'}
self.response('archive', 200, **headers)
self.get(self.api)
self.conf.update({
'stream': True,
'allow_redirects': False,
'headers': {'Accept': 'application/octet-stream'}
})
# 200, to default location
assert os.path.isfile('foo') is False
assert self.asset.download()
assert os.path.isfile('foo')
os.unlink('foo')
self.mock_assertions()
self.request.return_value.raw.seek(0)
self.request.return_value._content_consumed = False
# 200, to path
assert os.path.isfile('path_to_file') is False
assert self.asset.download('path_to_file')
assert os.path.isfile('path_to_file')
os.unlink('path_to_file')
self.mock_assertions()
self.request.return_value.raw.seek(0)
self.request.return_value._content_consumed = False
# 200, to file-like object
o = mock.mock_open()
with mock.patch('{0}.open'.format(__name__), o, create=True):
with open('download', 'wb+') as fd:
self.asset.download(fd)
o.assert_called_once_with('download', 'wb+')
fd = o()
fd.write.assert_called_once_with(b'archive_data')
self.mock_assertions()
self.request.return_value.raw.seek(0)
self.request.return_value._content_consumed = False
# 302, to file-like object
r = self.request.return_value
target = 'http://github.s3.example.com/foo'
self.response('', 302, location=target)
self.get(target)
self.request.side_effect = [self.request.return_value, r]
self.conf['headers'].update({
'Authorization': None,
'Content-Type': None,
})
del self.conf['allow_redirects']
o = mock.mock_open()
with mock.patch('{0}.open'.format(__name__), o, create=True):
with open('download', 'wb+') as fd:
self.asset.download(fd)
o.assert_called_once_with('download', 'wb+')
fd = o()
fd.write.assert_called_once_with(b'archive_data')
self.mock_assertions()
# 404
self.response('', 404)
self.request.side_effect = None
assert self.asset.download() is False
| ueg1990/github3.py | tests/test_repos.py | Python | bsd-3-clause | 25,952 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Description
- 获取IP代理。
Info
- author : "moran"
- github : "moranzcw@gmail.com"
- date : "2017.7.29"
"""
__author__ = """\
/\/\ ___ _ __ __ _ _ __
/ \ / _ \| '__/ _` | '_ \
/ /\/\ \ (_) | | | (_| | | | |
\/ \/\___/|_| \__,_|_| |_|"""
# 代理服务器
proxyHost = "http-dyn.abuyun.com"
proxyPort = "9020"
# 代理隧道验证信息
proxyUser = ""
proxyPass = ""
proxyMeta = "http://%(user)s:%(pass)s@%(host)s:%(port)s" % {
"host": proxyHost,
"port": proxyPort,
"user": proxyUser,
"pass": proxyPass,
}
proxies = {
"http": proxyMeta,
"https": proxyMeta,
}
def getproxies():
return proxies
| moranzcw/Zhihu-Spider | spider/proxy.py | Python | mit | 697 |
# -*- coding: utf-8 -*-
"""
requests.cookies
~~~~~~~~~~~~~~~~
Compatibility code to be able to use `cookielib.CookieJar` with requests.
requests.utils imports from here, so be careful with imports.
"""
import copy
import time
import calendar
import collections
from ._internal_utils import to_native_string
from .compat import cookielib, urlparse, urlunparse, Morsel
try:
import threading
except ImportError:
import dummy_threading as threading
class MockRequest(object):
"""Wraps a `requests.Request` to mimic a `urllib2.Request`.
The code in `cookielib.CookieJar` expects this interface in order to correctly
manage cookie policies, i.e., determine whether a cookie can be set, given the
domains of the request and the cookie.
The original request object is read-only. The client is responsible for collecting
the new headers via `get_new_headers()` and interpreting them appropriately. You
probably want `get_cookie_header`, defined below.
"""
def __init__(self, request):
self._r = request
self._new_headers = {}
self.type = urlparse(self._r.url).scheme
def get_type(self):
return self.type
def get_host(self):
return urlparse(self._r.url).netloc
def get_origin_req_host(self):
return self.get_host()
def get_full_url(self):
# Only return the response's URL if the user hadn't set the Host
# header
if not self._r.headers.get('Host'):
return self._r.url
# If they did set it, retrieve it and reconstruct the expected domain
host = to_native_string(self._r.headers['Host'], encoding='utf-8')
parsed = urlparse(self._r.url)
# Reconstruct the URL as we expect it
return urlunparse([
parsed.scheme, host, parsed.path, parsed.params, parsed.query,
parsed.fragment
])
def is_unverifiable(self):
return True
def has_header(self, name):
return name in self._r.headers or name in self._new_headers
def get_header(self, name, default=None):
return self._r.headers.get(name, self._new_headers.get(name, default))
def add_header(self, key, val):
"""cookielib has no legitimate use for this method; add it back if you find one."""
raise NotImplementedError("Cookie headers should be added with add_unredirected_header()")
def add_unredirected_header(self, name, value):
self._new_headers[name] = value
def get_new_headers(self):
return self._new_headers
@property
def unverifiable(self):
return self.is_unverifiable()
@property
def origin_req_host(self):
return self.get_origin_req_host()
@property
def host(self):
return self.get_host()
class MockResponse(object):
"""Wraps a `httplib.HTTPMessage` to mimic a `urllib.addinfourl`.
...what? Basically, expose the parsed HTTP headers from the server response
the way `cookielib` expects to see them.
"""
def __init__(self, headers):
"""Make a MockResponse for `cookielib` to read.
:param headers: a httplib.HTTPMessage or analogous carrying the headers
"""
self._headers = headers
def info(self):
return self._headers
def getheaders(self, name):
self._headers.getheaders(name)
def extract_cookies_to_jar(jar, request, response):
"""Extract the cookies from the response into a CookieJar.
:param jar: cookielib.CookieJar (not necessarily a RequestsCookieJar)
:param request: our own requests.Request object
:param response: urllib3.HTTPResponse object
"""
if not (hasattr(response, '_original_response') and
response._original_response):
return
# the _original_response field is the wrapped httplib.HTTPResponse object,
req = MockRequest(request)
# pull out the HTTPMessage with the headers and put it in the mock:
res = MockResponse(response._original_response.msg)
jar.extract_cookies(res, req)
def get_cookie_header(jar, request):
"""
Produce an appropriate Cookie header string to be sent with `request`, or None.
:rtype: str
"""
r = MockRequest(request)
jar.add_cookie_header(r)
return r.get_new_headers().get('Cookie')
def remove_cookie_by_name(cookiejar, name, domain=None, path=None):
"""Unsets a cookie by name, by default over all domains and paths.
Wraps CookieJar.clear(), is O(n).
"""
clearables = []
for cookie in cookiejar:
if cookie.name != name:
continue
if domain is not None and domain != cookie.domain:
continue
if path is not None and path != cookie.path:
continue
clearables.append((cookie.domain, cookie.path, cookie.name))
for domain, path, name in clearables:
cookiejar.clear(domain, path, name)
class CookieConflictError(RuntimeError):
"""There are two cookies that meet the criteria specified in the cookie jar.
Use .get and .set and include domain and path args in order to be more specific.
"""
class RequestsCookieJar(cookielib.CookieJar, collections.MutableMapping):
"""Compatibility class; is a cookielib.CookieJar, but exposes a dict
interface.
This is the CookieJar we create by default for requests and sessions that
don't specify one, since some clients may expect response.cookies and
session.cookies to support dict operations.
Requests does not use the dict interface internally; it's just for
compatibility with external client code. All requests code should work
out of the box with externally provided instances of ``CookieJar``, e.g.
``LWPCookieJar`` and ``FileCookieJar``.
Unlike a regular CookieJar, this class is pickleable.
.. warning:: dictionary operations that are normally O(1) may be O(n).
"""
def get(self, name, default=None, domain=None, path=None):
"""Dict-like get() that also supports optional domain and path args in
order to resolve naming collisions from using one cookie jar over
multiple domains.
.. warning:: operation is O(n), not O(1).
"""
try:
return self._find_no_duplicates(name, domain, path)
except KeyError:
return default
def set(self, name, value, **kwargs):
"""Dict-like set() that also supports optional domain and path args in
order to resolve naming collisions from using one cookie jar over
multiple domains.
"""
# support client code that unsets cookies by assignment of a None value:
if value is None:
remove_cookie_by_name(self, name, domain=kwargs.get('domain'), path=kwargs.get('path'))
return
if isinstance(value, Morsel):
c = morsel_to_cookie(value)
else:
c = create_cookie(name, value, **kwargs)
self.set_cookie(c)
return c
def iterkeys(self):
"""Dict-like iterkeys() that returns an iterator of names of cookies
from the jar.
.. seealso:: itervalues() and iteritems().
"""
for cookie in iter(self):
yield cookie.name
def keys(self):
"""Dict-like keys() that returns a list of names of cookies from the
jar.
.. seealso:: values() and items().
"""
return list(self.iterkeys())
def itervalues(self):
"""Dict-like itervalues() that returns an iterator of values of cookies
from the jar.
.. seealso:: iterkeys() and iteritems().
"""
for cookie in iter(self):
yield cookie.value
def values(self):
"""Dict-like values() that returns a list of values of cookies from the
jar.
.. seealso:: keys() and items().
"""
return list(self.itervalues())
def iteritems(self):
"""Dict-like iteritems() that returns an iterator of name-value tuples
from the jar.
.. seealso:: iterkeys() and itervalues().
"""
for cookie in iter(self):
yield cookie.name, cookie.value
def items(self):
"""Dict-like items() that returns a list of name-value tuples from the
jar. Allows client-code to call ``dict(RequestsCookieJar)`` and get a
vanilla python dict of key value pairs.
.. seealso:: keys() and values().
"""
return list(self.iteritems())
def list_domains(self):
"""Utility method to list all the domains in the jar."""
domains = []
for cookie in iter(self):
if cookie.domain not in domains:
domains.append(cookie.domain)
return domains
def list_paths(self):
"""Utility method to list all the paths in the jar."""
paths = []
for cookie in iter(self):
if cookie.path not in paths:
paths.append(cookie.path)
return paths
def multiple_domains(self):
"""Returns True if there are multiple domains in the jar.
Returns False otherwise.
:rtype: bool
"""
domains = []
for cookie in iter(self):
if cookie.domain is not None and cookie.domain in domains:
return True
domains.append(cookie.domain)
return False # there is only one domain in jar
def get_dict(self, domain=None, path=None):
"""Takes as an argument an optional domain and path and returns a plain
old Python dict of name-value pairs of cookies that meet the
requirements.
:rtype: dict
"""
dictionary = {}
for cookie in iter(self):
if (
(domain is None or cookie.domain == domain) and
(path is None or cookie.path == path)
):
dictionary[cookie.name] = cookie.value
return dictionary
def __contains__(self, name):
try:
return super(RequestsCookieJar, self).__contains__(name)
except CookieConflictError:
return True
def __getitem__(self, name):
"""Dict-like __getitem__() for compatibility with client code. Throws
exception if there are more than one cookie with name. In that case,
use the more explicit get() method instead.
.. warning:: operation is O(n), not O(1).
"""
return self._find_no_duplicates(name)
def __setitem__(self, name, value):
"""Dict-like __setitem__ for compatibility with client code. Throws
exception if there is already a cookie of that name in the jar. In that
case, use the more explicit set() method instead.
"""
self.set(name, value)
def __delitem__(self, name):
"""Deletes a cookie given a name. Wraps ``cookielib.CookieJar``'s
``remove_cookie_by_name()``.
"""
remove_cookie_by_name(self, name)
def set_cookie(self, cookie, *args, **kwargs):
if hasattr(cookie.value, 'startswith') and cookie.value.startswith('"') and cookie.value.endswith('"'):
cookie.value = cookie.value.replace('\\"', '')
return super(RequestsCookieJar, self).set_cookie(cookie, *args, **kwargs)
def update(self, other):
"""Updates this jar with cookies from another CookieJar or dict-like"""
if isinstance(other, cookielib.CookieJar):
for cookie in other:
self.set_cookie(copy.copy(cookie))
else:
super(RequestsCookieJar, self).update(other)
def _find(self, name, domain=None, path=None):
"""Requests uses this method internally to get cookie values.
If there are conflicting cookies, _find arbitrarily chooses one.
See _find_no_duplicates if you want an exception thrown if there are
conflicting cookies.
:param name: a string containing name of cookie
:param domain: (optional) string containing domain of cookie
:param path: (optional) string containing path of cookie
:return: cookie.value
"""
for cookie in iter(self):
if cookie.name == name:
if domain is None or cookie.domain == domain:
if path is None or cookie.path == path:
return cookie.value
raise KeyError('name=%r, domain=%r, path=%r' % (name, domain, path))
def _find_no_duplicates(self, name, domain=None, path=None):
"""Both ``__get_item__`` and ``get`` call this function: it's never
used elsewhere in Requests.
:param name: a string containing name of cookie
:param domain: (optional) string containing domain of cookie
:param path: (optional) string containing path of cookie
:raises KeyError: if cookie is not found
:raises CookieConflictError: if there are multiple cookies
that match name and optionally domain and path
:return: cookie.value
"""
toReturn = None
for cookie in iter(self):
if cookie.name == name:
if domain is None or cookie.domain == domain:
if path is None or cookie.path == path:
if toReturn is not None: # if there are multiple cookies that meet passed in criteria
raise CookieConflictError('There are multiple cookies with name, %r' % (name))
toReturn = cookie.value # we will eventually return this as long as no cookie conflict
if toReturn:
return toReturn
raise KeyError('name=%r, domain=%r, path=%r' % (name, domain, path))
def __getstate__(self):
"""Unlike a normal CookieJar, this class is pickleable."""
state = self.__dict__.copy()
# remove the unpickleable RLock object
state.pop('_cookies_lock')
return state
def __setstate__(self, state):
"""Unlike a normal CookieJar, this class is pickleable."""
self.__dict__.update(state)
if '_cookies_lock' not in self.__dict__:
self._cookies_lock = threading.RLock()
def copy(self):
"""Return a copy of this RequestsCookieJar."""
new_cj = RequestsCookieJar()
new_cj.update(self)
return new_cj
def _copy_cookie_jar(jar):
if jar is None:
return None
if hasattr(jar, 'copy'):
# We're dealing with an instance of RequestsCookieJar
return jar.copy()
# We're dealing with a generic CookieJar instance
new_jar = copy.copy(jar)
new_jar.clear()
for cookie in jar:
new_jar.set_cookie(copy.copy(cookie))
return new_jar
def create_cookie(name, value, **kwargs):
"""Make a cookie from underspecified parameters.
By default, the pair of `name` and `value` will be set for the domain ''
and sent on every request (this is sometimes called a "supercookie").
"""
result = dict(
version=0,
name=name,
value=value,
port=None,
domain='',
path='/',
secure=False,
expires=None,
discard=True,
comment=None,
comment_url=None,
rest={'HttpOnly': None},
rfc2109=False,)
badargs = set(kwargs) - set(result)
if badargs:
err = 'create_cookie() got unexpected keyword arguments: %s'
raise TypeError(err % list(badargs))
result.update(kwargs)
result['port_specified'] = bool(result['port'])
result['domain_specified'] = bool(result['domain'])
result['domain_initial_dot'] = result['domain'].startswith('.')
result['path_specified'] = bool(result['path'])
return cookielib.Cookie(**result)
def morsel_to_cookie(morsel):
"""Convert a Morsel object into a Cookie containing the one k/v pair."""
expires = None
if morsel['max-age']:
try:
expires = int(time.time() + int(morsel['max-age']))
except ValueError:
raise TypeError('max-age: %s must be integer' % morsel['max-age'])
elif morsel['expires']:
time_template = '%a, %d-%b-%Y %H:%M:%S GMT'
expires = calendar.timegm(
time.strptime(morsel['expires'], time_template)
)
return create_cookie(
comment=morsel['comment'],
comment_url=bool(morsel['comment']),
discard=False,
domain=morsel['domain'],
expires=expires,
name=morsel.key,
path=morsel['path'],
port=None,
rest={'HttpOnly': morsel['httponly']},
rfc2109=False,
secure=bool(morsel['secure']),
value=morsel.value,
version=morsel['version'] or 0,
)
def cookiejar_from_dict(cookie_dict, cookiejar=None, overwrite=True):
"""Returns a CookieJar from a key/value dictionary.
:param cookie_dict: Dict of key/values to insert into CookieJar.
:param cookiejar: (optional) A cookiejar to add the cookies to.
:param overwrite: (optional) If False, will not replace cookies
already in the jar with new ones.
"""
if cookiejar is None:
cookiejar = RequestsCookieJar()
if cookie_dict is not None:
names_from_jar = [cookie.name for cookie in cookiejar]
for name in cookie_dict:
if overwrite or (name not in names_from_jar):
cookiejar.set_cookie(create_cookie(name, cookie_dict[name]))
return cookiejar
def merge_cookies(cookiejar, cookies):
"""Add cookies to cookiejar and returns a merged CookieJar.
:param cookiejar: CookieJar object to add the cookies to.
:param cookies: Dictionary or CookieJar object to be added.
"""
if not isinstance(cookiejar, cookielib.CookieJar):
raise ValueError('You can only merge into CookieJar')
if isinstance(cookies, dict):
cookiejar = cookiejar_from_dict(
cookies, cookiejar=cookiejar, overwrite=False)
elif isinstance(cookies, cookielib.CookieJar):
try:
cookiejar.update(cookies)
except AttributeError:
for cookie_in_jar in cookies:
cookiejar.set_cookie(cookie_in_jar)
return cookiejar
| momm3/WelcomeBot | welcomebot/Lib/site-packages/requests/cookies.py | Python | mit | 18,208 |
######################################################################
##
## Copyright (C) 2006, Blekinge Institute of Technology
##
## Filename: SyscallGenerator.py
## Author: Simon Kagstrom <ska@bth.se>
## Description: System call generators
##
## $Id: syscallgenerator.py 14099 2007-03-10 07:51:59Z ska $
##
######################################################################
import sys, re, os, tempfile, struct
from Cibyl.BinaryTranslation.translator import Controller
from Cibyl import config
from function import *
def preprocess(defines, lines):
s = ""
for item in defines:
s = s + "%s " % item
# pipe cpp, -C menas keep comments, -P means emit no line information
stdin, stdout = os.popen2("%s -C -P %s" % (config.cpp, s))
if isinstance(lines, str):
stdin.write(lines)
else: # List
stdin.writelines(lines)
stdin.close()
out = stdout.readlines()
stdout.close()
return out
def readFile(name):
try:
f = open(name)
out = f.readlines()
f.close()
return out
except:
# No such file
return []
def readPreprocessedFile(name, defines):
return preprocess(defines, readFile(name))
def fileExists(name):
"Ugly hack warning"
try:
os.stat(name)
return True
except:
return False
def generateSyscallSetDependencies(dirs, syscallSets):
unresolved = []
for syscallDir in syscallSets:
unresolved.append(syscallDir)
while unresolved != []:
for syscallDir in unresolved:
# Add dependencies to the sets
for curDir in dirs:
for line in readFile(curDir + "/" + syscallDir + "/depends"):
line = line.strip()
if line in unresolved:
continue
# Add this dependency
unresolved.insert(0, line)
syscallSets.insert(0, line)
if syscallDir in unresolved:
unresolved.remove(syscallDir)
# Remove duplicates
return set(syscallSets)
class SyscallFile:
def __init__(self, filename):
self.filename = filename
self.all = []
self.functions = []
class SyscallGenerator:
def __init__(self, dirs, syscallSets):
self.functions = []
self.files = []
self.dirs = dirs
self.syscallSets = generateSyscallSetDependencies(self.dirs, syscallSets)
count = 0
for curDir in self.dirs:
for syscallDir in self.syscallSets:
# Read all the functions for this syscall dir
for root, dirs, files in os.walk(curDir + "/" + syscallDir + "/include"):
for cur in files:
if not cur.endswith(".h"):
continue
pathToFile = os.path.join(root, cur)
relativePath = pathToFile.replace(curDir + "/" + syscallDir + "/include/", "")
curFile = SyscallFile(relativePath)
f = readFile(os.path.join(root, cur))
for line in f:
match = fnRegexp.match(line)
if match == None:
curFile.all.append(line.strip())
continue
returnType = match.group(1)
name = match.group(2).strip()
arguments = match.group(3).split(",")
qualifier = match.group(4)
# Create a new function
function = Function(count, syscallDir,
returnType, name, arguments, qualifier)
count = count + 1
curFile.all.append(function)
curFile.functions.append(function)
self.functions.append(function)
self.files.append(curFile)
class SyscallHeaderGenerator(SyscallGenerator):
def __init__(self, dirname, syscallSets, outdir):
SyscallGenerator.__init__(self, [dirname], syscallSets)
self.outdir = outdir
def run(self):
for curFile in self.files:
path = "%s/%s" % (self.outdir, curFile.filename)
try:
os.makedirs(os.path.dirname(path))
except:
pass # We do nothing - the path already exists
outfile = open(path, "w")
for item in curFile.all:
if not isinstance(item, Function):
outfile.write("%s\n" % item)
continue
outfile.write( "#define __NR_%s %d /* %s */\n" % (item.getName(), item.getNr(), item.getSyscallSet()) )
outfile.write( "static inline _syscall%d(%s,%s" % (item.getNrArgs(), item.getReturnType(), item.getName()) )
if item.getNrArgs() > 0:
for arg in item.getArgs():
outfile.write(", %s" % arg)
outfile.write(") %s\n" % (item.getQualifier()) )
outfile.close()
class SyscallWrapperGenerator(SyscallGenerator):
def __init__(self, program, syscallDirectories, dirs, syscallSets, outdir,
defines=[], packageName = None, generateAllSyscalls = False):
self.dirs = dirs
self.syscallSets = generateSyscallSetDependencies(self.dirs, syscallSets)
self.functions = functionsFromHeaderDirectories(syscallDirectories)
self.generateAllSyscalls = generateAllSyscalls
if generateAllSyscalls:
self.controller = None
else:
self.controller = Controller(program, syscallDirectories, onlyReadSyscalls=True)
self.outdir = outdir
self.outfile = open(outdir + "/Syscalls.java", "w")
self.defines = defines
self.packageName = packageName
def run(self):
"""
Generate Java systemcall wrappers
"""
self.outfile.write("/* GENERATED, DON'T EDIT! */\n")
if self.packageName != None:
self.outfile.write("package %s;" % (self.packageName))
for curDir in self.dirs:
for syscallDir in self.syscallSets:
# Add all the imports
for line in readPreprocessedFile(curDir + "/" + syscallDir + "/imports", self.defines):
self.outfile.write("%s\n" % line.strip())
self.outfile.write("public class Syscalls {\n")
for curDir in self.dirs:
for syscallDir in self.syscallSets:
# And run the initialization
for line in readPreprocessedFile(curDir + "/" + syscallDir + "/init", self.defines):
self.outfile.write("%s\n" % line.strip())
for curDir in self.dirs:
lines = []
for item in self.functions:
if (self.generateAllSyscalls or self.controller.usesSyscall(item.name)) and fileExists(curDir + "/" + item.getSyscallSet()):
lines.append(item.generateJavaCall(curDir + "/" + item.getSyscallSet() ))
self.outfile.writelines( preprocess(self.defines, lines) )
self.outfile.write("}\n")
for s in self.syscallSets:
for curDir in self.dirs:
if not fileExists(curDir + "/" + s + "/classes"):
continue
for f in os.listdir(curDir + "/" + s + "/classes"):
if f.endswith(".java"):
data = readPreprocessedFile(curDir + "/" + s + "/classes/" + f, self.defines)
out = open("%s/%s" % (self.outdir, f), "w")
out.writelines(data)
out.close()
class SyscallDatabaseGenerator(SyscallGenerator):
def __init__(self, dirname, syscallSets, outfile):
SyscallGenerator.__init__(self, [dirname], syscallSets)
self.outfile = outfile
def encodeReturnType(self, item):
if item.getJavaReturnType() == "void":
return 0
if item.getJavaReturnType() == "int":
return 1
if item.getJavaReturnType() == "boolean":
return 2
# objref
return 3
def encodeQualifier(self, item):
# Assume no qualifier
out = 0
if item.getQualifier() == "/* Throws */":
out = out | 1
if item.getQualifier() == "/* Not generated */":
out = out | 2
return out
def encodeArgumentJavaType(self, arg):
offs = self.add_str(arg.getJavaType())
t = 0
if arg.isObjectReference():
t = 1
return ((t << 24) | offs)
def encodeArgumentType(self, arg):
offs = self.add_str(arg.getType())
t = 0
return ((t << 24) | offs)
def add_str(self, s):
# Add the string including null-termination
if s == None:
s = ""
try:
# If we already have this key, just return the offset
return self.strtab_by_contents[s]
except:
pass
out = self.strtab_offs
self.strtab_offs = self.strtab_offs + len(s) + 1
self.strs.append(s)
self.strtab_by_contents[s] = out
return out
def run(self):
items = []
strtab = {}
self.strtab_by_contents = {}
self.strtab_offs = 0
self.strs = []
sz = struct.calcsize("P")
# Read all syscall directories
of = open(self.outfile, "w")
for curFile in self.files:
for item in curFile.all:
if not isinstance(item, Function):
continue
items.append(item)
# Create the structure
out = []
args = []
for item in items:
name_offs = self.add_str(item.getName())
javaClass_offs = self.add_str(item.getJavaClass())
javaMethod_offs = self.add_str(item.getJavaMethod())
out.append( ( item.getNr(), self.encodeReturnType(item), item.getNrArgs(),
self.encodeQualifier(item), name_offs, javaClass_offs,
javaMethod_offs, self.add_str(item.getSyscallSet()), self.add_str(item.getJavaReturnType()) ) )
if item.getNrArgs() != 0:
for arg in item.args:
args.append((self.encodeArgumentJavaType(arg),
self.encodeArgumentType(arg),
self.add_str(arg.getName())))
# Size of each struct is 11
arg_offs = sz * 6 + sz * len(self.dirs) + sz * len(self.syscallSets) + sz * 11 * len(out)
strtab_offs = arg_offs + sz * len(args) * 4
# Write the header
of.write(struct.pack("P", 0x11b1c1d1)) # magic
of.write(struct.pack("P", len(self.dirs))) # Nr syscall directories
of.write(struct.pack("P", len(self.syscallSets))) # Nr syscall sets
of.write(struct.pack("P", len(out)) ) # Nr items
of.write(struct.pack("P", arg_offs))
of.write(struct.pack("P", strtab_offs))
# The path names, as strtab offsets
for d in self.dirs:
offs = self.add_str(os.path.abspath(d))
of.write(struct.pack("P", offs))
# The syscall set names, as strtab offsets
for s in self.syscallSets:
offs = self.add_str(s)
of.write(struct.pack("P", offs))
# Write the out structures
arg_count = 0
for s in out:
of.write(struct.pack("PPPPPPPPPPP",
s[0], s[1], s[2], s[3], s[4], s[5], s[6], s[7], s[8],
arg_count, 0)) # Last is for usage outside
arg_count = arg_count + sz * 4 * s[2]
# Write the arguments
for a in args:
of.write(struct.pack("PPPP", 0, a[0], a[1], a[2]))
# Write the string table
for item in self.strs:
of.write(item + '\0')
of.close()
| SimonKagstrom/cibyl | tools/python/Cibyl/SyscallHandling/syscallgenerator.py | Python | lgpl-2.1 | 10,603 |
from direct.distributed.DistributedNodeAI import DistributedNodeAI
from direct.distributed.ClockDelta import *
from direct.fsm import ClassicFSM, State
from direct.fsm import State
from direct.fsm import StateData
from toontown.safezone import DistributedChineseCheckersAI
from toontown.safezone import DistributedCheckersAI
from toontown.safezone import DistributedFindFourAI
class DistributedPicnicTableAI(DistributedNodeAI):
def __init__(self, air, zone, name, x, y, z, h, p, r):
DistributedNodeAI.__init__(self, air)
self.name = name
self.air = air
self.seats = [None,
None,
None,
None,
None,
None]
self.setPos(x, y, z)
self.setHpr(h, p, r)
self.playersSitting = 0
self.myPos = (x, y, z)
self.myHpr = (h, p, r)
self.playerIdList = []
self.checkersZoneId = None
self.generateOtpObject(air.districtId, zone, optionalFields=['setX',
'setY',
'setZ',
'setH',
'setP',
'setR'])
self.observers = []
self.allowPickers = []
self.hasPicked = False
self.game = None
self.gameDoId = None
self.isAccepting = True
return
def announceGenerate(self):
pass
def delete(self):
DistributedNodeAI.delete(self)
self.game = None
self.gameDoId = None
return
def setGameDoId(self, doId):
self.gameDoId = doId
self.game = self.air.doId2do.get(doId)
def requestTableState(self):
avId = self.air.getAvatarIdFromSender()
self.getTableState()
def getTableState(self):
tableStateList = []
for x in self.seats:
if x == None:
tableStateList.append(0)
else:
tableStateList.append(x)
if self.game and self.game.fsm.getCurrentState().getName() == 'playing':
self.sendUpdate('setTableState', [tableStateList, 1])
else:
self.sendUpdate('setTableState', [tableStateList, 0])
return
def sendIsPlaying(self):
if self.game.fsm.getCurrentState().getName() == 'playing':
self.sendUpdate('setIsPlaying', [1])
else:
self.sendUpdate('setIsPlaying', [0])
def announceWinner(self, gameName, avId):
self.sendUpdate('announceWinner', [gameName, avId])
self.gameDoId = None
self.game = None
return
def requestJoin(self, si, x, y, z, h, p, r):
avId = self.air.getAvatarIdFromSender()
if self.findAvatar(avId) != None:
self.notify.warning('Ignoring multiple requests from %s to board.' % avId)
return
av = self.air.doId2do.get(avId)
if av:
if av.hp > 0 and self.isAccepting and self.seats[si] == None:
self.notify.debug('accepting boarder %d' % avId)
self.acceptBoarder(avId, si, x, y, z, h, p, r)
else:
self.notify.debug('rejecting boarder %d' % avId)
self.sendUpdateToAvatarId(avId, 'rejectJoin', [])
else:
self.notify.warning('avid: %s does not exist, but tried to board a picnicTable' % avId)
return
def acceptBoarder(self, avId, seatIndex, x, y, z, h, p, r):
self.notify.debug('acceptBoarder %d' % avId)
if self.findAvatar(avId) != None:
return
isEmpty = True
for xx in self.seats:
if xx != None:
isEmpty = False
break
if isEmpty == True or self.hasPicked == False:
self.sendUpdateToAvatarId(avId, 'allowPick', [])
self.allowPickers.append(avId)
if self.hasPicked == True:
self.sendUpdateToAvatarId(avId, 'setZone', [self.game.zoneId])
self.seats[seatIndex] = avId
self.acceptOnce(self.air.getAvatarExitEvent(avId), self.__handleUnexpectedExit, extraArgs=[avId])
self.timeOfBoarding = globalClock.getRealTime()
if self.game:
self.game.informGameOfPlayer()
self.sendUpdate('fillSlot', [avId,
seatIndex,
x,
y,
z,
h,
p,
r,
globalClockDelta.localToNetworkTime(self.timeOfBoarding),
self.doId])
self.getTableState()
return
def requestPickedGame(self, gameNum):
avId = self.air.getAvatarIdFromSender()
if self.hasPicked == False and avId in self.allowPickers:
self.hasPicked = True
numPickers = len(self.allowPickers)
self.allowPickers = []
self.pickGame(gameNum)
if self.game:
for x in range(numPickers):
self.game.informGameOfPlayer()
def pickGame(self, gameNum):
x = 0
for x in self.seats:
if x != None:
x += 1
if gameNum == 1:
if simbase.config.GetBool('want-chinese', 0):
self.game = DistributedChineseCheckersAI.DistributedChineseCheckersAI(self.air, self.doId, 'chinese', self.getX(), self.getY(), self.getZ() + 2.83, self.getH(), self.getP(), self.getR())
self.sendUpdate('setZone', [self.game.zoneId])
elif gameNum == 2:
if x <= 2:
if simbase.config.GetBool('want-checkers', 0):
self.game = DistributedCheckersAI.DistributedCheckersAI(self.air, self.doId, 'checkers', self.getX(), self.getY(), self.getZ() + 2.83, self.getH(), self.getP(), self.getR())
self.sendUpdate('setZone', [self.game.zoneId])
elif x <= 2:
if simbase.config.GetBool('want-findfour', 0):
self.game = DistributedFindFourAI.DistributedFindFourAI(self.air, self.doId, 'findFour', self.getX(), self.getY(), self.getZ() + 2.83, self.getH(), self.getP(), self.getR())
self.sendUpdate('setZone', [self.game.zoneId])
return
def requestZone(self):
avId = self.air.getAvatarIdFromSender()
self.sendUpdateToAvatarId(avId, 'setZone', [self.game.zoneId])
def requestGameZone(self):
if self.hasPicked == True:
avId = self.air.getAvatarIdFromSender()
if self.game:
self.game.playersObserving.append(avId)
self.observers.append(avId)
self.acceptOnce(self.air.getAvatarExitEvent(avId), self.handleObserverExit, extraArgs=[avId])
if self.game:
if self.game.fsm.getCurrentState().getName() == 'playing':
self.sendUpdateToAvatarId(avId, 'setGameZone', [self.checkersZoneId, 1])
else:
self.sendUpdateToAvatarId(avId, 'setGameZone', [self.checkersZoneId, 0])
def leaveObserve(self):
avId = self.air.getAvatarIdFromSender()
if self.game:
if avId in self.game.playersObserving:
self.game.playersObserving.remove(avId)
def handleObserverExit(self, avId):
if self.game and avId in self.game.playersObserving:
if self.game:
self.game.playersObserving.remove(avId)
self.ignore(self.air.getAvatarExitEvent(avId))
def requestExit(self):
self.notify.debug('requestExit')
avId = self.air.getAvatarIdFromSender()
av = self.air.doId2do.get(avId)
if av:
if self.countFullSeats() > 0:
self.acceptExiter(avId)
else:
self.notify.debug('Player tried to exit after AI already kicked everyone out')
else:
self.notify.warning('avId: %s does not exist, but tried to exit picnicTable' % avId)
def acceptExiter(self, avId):
seatIndex = self.findAvatar(avId)
if seatIndex == None:
if avId in self.observers:
self.sendUpdateToAvatarId(avId, 'emptySlot', [avId, 255, globalClockDelta.getRealNetworkTime()])
else:
self.seats[seatIndex] = None
self.ignore(self.air.getAvatarExitEvent(avId))
self.sendUpdate('emptySlot', [avId, seatIndex, globalClockDelta.getRealNetworkTime()])
self.getTableState()
numActive = 0
for x in self.seats:
if x != None:
numActive = numActive + 1
if self.game:
self.game.informGameOfPlayerLeave()
self.game.handlePlayerExit(avId)
if numActive == 0:
self.isAccepting = True
if self.game:
self.game.handleEmptyGame()
self.game.requestDelete()
self.game = None
self.hasPicked = False
return
def __handleUnexpectedExit(self, avId):
self.notify.warning('Avatar: ' + str(avId) + ' has exited unexpectedly')
seatIndex = self.findAvatar(avId)
if seatIndex == None:
pass
else:
self.seats[seatIndex] = None
self.ignore(self.air.getAvatarExitEvent(avId))
if self.game:
self.game.informGameOfPlayerLeave()
self.game.handlePlayerExit(avId)
self.hasPicked = False
self.getTableState()
numActive = 0
for x in self.seats:
if x != None:
numActive = numActive + 1
if numActive == 0 and self.game:
simbase.air.deallocateZone(self.game.zoneId)
self.game.requestDelete()
self.game = None
self.gameDoId = None
return
def informGameOfPlayerExit(self, avId):
self.game.handlePlayerExit(avId)
def handleGameOver(self):
for x in self.observers:
self.acceptExiter(x)
self.observers.remove(x)
if self.game:
self.game.playersObserving = []
for x in self.seats:
if x != None:
self.acceptExiter(x)
self.game = None
self.gameDoId = None
self.hasPicked = False
return
def findAvatar(self, avId):
for i in range(len(self.seats)):
if self.seats[i] == avId:
return i
return None
def countFullSeats(self):
avCounter = 0
for i in self.seats:
if i:
avCounter += 1
return avCounter
def findAvailableSeat(self):
for i in range(len(self.seats)):
if self.seats[i] == None:
return i
return
def setCheckersZoneId(self, zoneId):
self.checkersZoneId = zoneId
| ksmit799/Toontown-Source | toontown/safezone/DistributedPicnicTableAI.py | Python | mit | 10,737 |
from pybloqs.block.base import BaseBlock
from pybloqs.block.convenience import Block
class Box(BaseBlock):
def __init__(self, contents, **kwargs):
"""
Wrap the supplied content (can be anything that is supported by the basic blocks)
in a container.
:param contents: Content to wrap
:param kwargs: Optional styling arguments. The `style` keyword argument has special
meaning in that it allows styling to be grouped as one argument.
It is also useful in case a styling parameter name clashes with a standard
block parameter.
"""
super(Box, self).__init__(**kwargs)
# Blockify the content
self._contents = Block(contents)
def _write_contents(self, *args, **kwargs):
self._contents._write_block(*args, **kwargs)
class Paragraph(Box):
"""
Wraps the content in a paragraph.
"""
container_tag = "p"
| manahl/PyBloqs | pybloqs/block/wrap.py | Python | lgpl-2.1 | 973 |
# Django settings for dj_apache project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASE_ENGINE = '' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
DATABASE_NAME = '' # Or path to database file if using sqlite3.
DATABASE_USER = '' # Not used with sqlite3.
DATABASE_PASSWORD = '' # Not used with sqlite3.
DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be avilable on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = ''
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = '5!x61m1lwmhjjwg!_u!ez=64-5q%bch4=%nlb4f_hbf4esq27%'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
# 'django.template.loaders.eggs.load_template_source',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.middleware.doc.XViewMiddleware',
)
ROOT_URLCONF = 'dj_apache.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
#'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'dj_apache.logview',
)
| lluxury/P_U_S_A | 11_gui/dj_apache/settings.py | Python | mit | 2,849 |
# Copyright 2019-2020 by Christopher C. Little.
# This file is part of Abydos.
#
# Abydos is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Abydos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Abydos. If not, see <http://www.gnu.org/licenses/>.
"""abydos.tests.distance.test_distance_cohen_kappa.
This module contains unit tests for abydos.distance.CohenKappa
"""
import unittest
from abydos.distance import CohenKappa
class CohenKappaTestCases(unittest.TestCase):
"""Test CohenKappa functions.
abydos.distance.CohenKappa
"""
cmp = CohenKappa()
cmp_no_d = CohenKappa(alphabet=0)
def test_cohen_kappa_sim(self):
"""Test abydos.distance.CohenKappa.sim."""
# Base cases
self.assertEqual(self.cmp.sim('', ''), 1.0)
self.assertEqual(self.cmp.sim('a', ''), 0.9987228607918263)
self.assertEqual(self.cmp.sim('', 'a'), 0.9987228607918263)
self.assertEqual(self.cmp.sim('abc', ''), 0.9974424552429667)
self.assertEqual(self.cmp.sim('', 'abc'), 0.9974424552429667)
self.assertEqual(self.cmp.sim('abc', 'abc'), 1.0)
self.assertEqual(self.cmp.sim('abcd', 'efgh'), 0.993581514762516)
self.assertAlmostEqual(self.cmp.sim('Nigel', 'Niall'), 0.9961439589)
self.assertAlmostEqual(self.cmp.sim('Niall', 'Nigel'), 0.9961439589)
self.assertAlmostEqual(self.cmp.sim('Colin', 'Coiln'), 0.9961439589)
self.assertAlmostEqual(self.cmp.sim('Coiln', 'Colin'), 0.9961439589)
self.assertAlmostEqual(
self.cmp.sim('ATCAACGAGT', 'AACGATTAG'), 0.9954751131
)
self.assertEqual(self.cmp_no_d.sim('abcd', 'efgh'), 0.0)
def test_cohen_kappa_dist(self):
"""Test abydos.distance.CohenKappa.dist."""
# Base cases
self.assertEqual(self.cmp.dist('', ''), 0.0)
self.assertEqual(self.cmp.dist('a', ''), 0.0012771392081737387)
self.assertEqual(self.cmp.dist('', 'a'), 0.0012771392081737387)
self.assertEqual(self.cmp.dist('abc', ''), 0.002557544757033292)
self.assertEqual(self.cmp.dist('', 'abc'), 0.002557544757033292)
self.assertEqual(self.cmp.dist('abc', 'abc'), 0.0)
self.assertEqual(self.cmp.dist('abcd', 'efgh'), 0.006418485237484006)
self.assertAlmostEqual(self.cmp.dist('Nigel', 'Niall'), 0.0038560411)
self.assertAlmostEqual(self.cmp.dist('Niall', 'Nigel'), 0.0038560411)
self.assertAlmostEqual(self.cmp.dist('Colin', 'Coiln'), 0.0038560411)
self.assertAlmostEqual(self.cmp.dist('Coiln', 'Colin'), 0.0038560411)
self.assertAlmostEqual(
self.cmp.dist('ATCAACGAGT', 'AACGATTAG'), 0.0045248869
)
self.assertEqual(self.cmp_no_d.dist('abcd', 'efgh'), 1.0)
if __name__ == '__main__':
unittest.main()
| chrislit/abydos | tests/distance/test_distance_cohen_kappa.py | Python | gpl-3.0 | 3,238 |
##
## Copyright(c) 2009 Syntext, Inc. All Rights Reserved.
## Contact: info@syntext.com, http://www.syntext.com
##
## This file is part of Syntext Serna XML Editor.
##
## COMMERCIAL USAGE
## Licensees holding valid Syntext Serna commercial licenses may use this file
## in accordance with the Syntext Serna Commercial License Agreement provided
## with the software, or, alternatively, in accorance with the terms contained
## in a written agreement between you and Syntext, Inc.
##
## GNU GENERAL PUBLIC LICENSE USAGE
## Alternatively, this file may be used under the terms of the GNU General
## Public License versions 2.0 or 3.0 as published by the Free Software
## Foundation and appearing in the file LICENSE.GPL included in the packaging
## of this file. In addition, as a special exception, Syntext, Inc. gives you
## certain additional rights, which are described in the Syntext, Inc. GPL
## Exception for Syntext Serna Free Edition, included in the file
## GPL_EXCEPTION.txt in this package.
##
## You should have received a copy of appropriate licenses along with this
## package. If not, see <http://www.syntext.com/legal/>. If you are unsure
## which license is appropriate for your use, please contact the sales
## department at sales@syntext.com.
##
## This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
## WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
##
from SernaApi import *
from SetAdpFileDialog import SetAdpFileDialog
from os.path import basename
# Root's attribute
ADP_FILE = "adp-file"
class DoubleClickWatcher(SimpleWatcher):
"""
Class for handling mouse double-clicks
"""
def __init__(self, plugin):
SimpleWatcher.__init__(self)
self.__plugin = ref(plugin)
def notifyChanged(self):
pos = self.__plugin().sernaDoc().structEditor().getCheckedPos()
if pos.isNull():
return True
if pos.node().asGroveElement().localName() == "serna-help":
self.__plugin().setAdpFile()
return False
return True
##############################################################
class ElementHelp(DocumentPlugin):
"""
Plugin for working with ElementHelp document
"""
def __init__(self, a1, a2):
DocumentPlugin.__init__(self, a1, a2)
self.buildPluginExecutors(True)
def postInit(self):
self.se = self.sernaDoc().structEditor()
self.__watcher = DoubleClickWatcher(self)
self.se.setDoubleClickWatcher(self.__watcher)
def executeUiEvent(self, evName, cmd):
if evName == "SetAdpFileDialogEvent":
self.setAdpFile()
def setAdpFile(self):
root = self.se.sourceGrove().document()
root_attrs = root.documentElement().attrs()
adp_fname_attr = root_attrs.getAttribute(ADP_FILE)
adp_fname = str(adp_fname_attr.value())
adp_fname = SetAdpFileDialog().getAdpFileName(adp_fname)
if adp_fname:
adp_fname = basename(str(adp_fname))
if not adp_fname_attr:
prop_item = PropertyNode(ADP_FILE, adp_fname)
cmd = self.se.groveEditor().addAttribute(root.documentElement(), prop_item)
else:
cmd = self.se.groveEditor().setAttribute(adp_fname_attr, adp_fname)
self.se.executeAndUpdate(cmd)
| malaterre/serna-free-backup | serna/dist/plugins/syntext/element-help/py/element-help.py | Python | gpl-3.0 | 3,379 |
# -*- coding: utf-8 -*-
# (c) 2016-2017 Andreas Motl, Elmyra UG <andreas.motl@elmyra.de>
import cgi
import json
import logging
from contextlib import contextmanager
from pyramid.httpexceptions import HTTPError
from pyramid.response import Response
log = logging.getLogger(__name__)
class GenericAdapterException(Exception):
def __init__(self, *args, **kwargs):
self.data = None
if kwargs.has_key('data'):
self.data = kwargs['data']
self.user_info = ''
if kwargs.has_key('user_info'):
self.user_info = kwargs['user_info']
super(GenericAdapterException, self).__init__(*args)
def get_message(self):
message = {'user': '', 'details': ''}
message_parts = []
if hasattr(self, 'user_info'):
#message_parts.append(ex.user_info)
message['user'] = cgi.escape(self.user_info)
if hasattr(self, 'message'):
message_parts.append(self.__class__.__name__ + u': ' + u'<pre>{message}</pre>'.format(message=cgi.escape(self.message)))
if hasattr(self, 'details'):
message_parts.append(u'<pre>{message}</pre>'.format(message=cgi.escape(self.details)))
message['details'] = u'<br/>'.join(message_parts)
return message
class SearchException(GenericAdapterException):
pass
class NoResultsException(GenericAdapterException):
pass
class ExampleJsonException(HTTPError, GenericAdapterException):
def __init__(self, data=None, status=404):
#body = {'status': 'error', 'errors': errors}
Response.__init__(self, json.dumps(data, use_decimal=True))
self.status = status
self.content_type = 'application/json'
# https://stackoverflow.com/questions/15572288/general-decorator-to-wrap-try-except-in-python/15573313#15573313
@contextmanager
def ignored(*exceptions):
try:
yield
except exceptions as ex:
log.warning('Ignored exception: {name}({ex})'.format(name=ex.__class__.__name__, ex=ex))
| ip-tools/ip-navigator | patzilla/access/generic/exceptions.py | Python | agpl-3.0 | 2,020 |
# encoding:utf-8
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2009 Benny Malengier
# Copyright (C) 2009 Douglas S. Blank
# Copyright (C) 2009 Nick Hall
# Copyright (C) 2011 Tim G L Lyons
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
MODULE_VERSION="5.2"
#------------------------------------------------------------------------
#
# default views of Gramps
#
#------------------------------------------------------------------------
register(VIEW,
id = 'eventview',
name = _("Events"),
description = _("The view showing all the events"),
version = '1.0',
gramps_target_version = MODULE_VERSION,
status = STABLE,
fname = 'eventview.py',
authors = ["The Gramps project"],
authors_email = ["http://gramps-project.org"],
category = ("Events", _("Events")),
viewclass = 'EventView',
order = START,
)
register(VIEW,
id = 'familyview',
name = _("Families"),
description = _("The view showing all families"),
version = '1.0',
gramps_target_version = MODULE_VERSION,
status = STABLE,
fname = 'familyview.py',
authors = ["The Gramps project"],
authors_email = ["http://gramps-project.org"],
category = ("Families", _("Families")),
viewclass = 'FamilyView',
order = START,
)
register(VIEW,
id = 'dashboardview',
name = _("Dashboard"),
description = _("The view showing Gramplets"),
version = '1.0',
gramps_target_version = MODULE_VERSION,
status = STABLE,
fname = 'dashboardview.py',
authors = ["The Gramps project"],
authors_email = ["http://gramps-project.org"],
category = ("Dashboard", _("Dashboard")),
viewclass = 'DashboardView',
order = START,
)
register(VIEW,
id = 'mediaview',
name = _("Media"),
description = _("The view showing all the media objects"),
version = '1.0',
gramps_target_version = MODULE_VERSION,
status = STABLE,
fname = 'mediaview.py',
authors = ["The Gramps project"],
authors_email = ["http://gramps-project.org"],
category = ("Media", _("Media")),
viewclass = 'MediaView',
order = START,
)
register(VIEW,
id = 'noteview',
name = _("Notes"),
description = _("The view showing all the notes"),
version = '1.0',
gramps_target_version = MODULE_VERSION,
status = STABLE,
fname = 'noteview.py',
authors = ["The Gramps project"],
authors_email = ["http://gramps-project.org"],
category = ("Notes", _("Notes")),
viewclass = 'NoteView',
order = START,
)
register(VIEW,
id = 'relview',
name = _("Relationships"),
description = _("The view showing all relationships of the selected person"),
version = '1.0',
gramps_target_version = MODULE_VERSION,
status = STABLE,
fname = 'relview.py',
authors = ["The Gramps project"],
authors_email = ["http://gramps-project.org"],
category = ("Relationships", _("Relationships")),
viewclass = 'RelationshipView',
order = START,
)
register(VIEW,
id = 'pedigreeview',
name = _("Pedigree"),
description = _("The view showing an ancestor pedigree of the selected person"),
version = '1.0',
gramps_target_version = MODULE_VERSION,
status = STABLE,
fname = 'pedigreeview.py',
authors = ["The Gramps project"],
authors_email = ["http://gramps-project.org"],
category = ("Ancestry", _("Charts")),
viewclass = 'PedigreeView',
order = START,
stock_icon = 'gramps-pedigree',
)
register(VIEW,
id = 'fanchartview',
name = _("Fan Chart"),
category = ("Ancestry", _("Charts")),
description = _("A view showing parents through a fanchart"),
version = '1.0',
gramps_target_version = MODULE_VERSION,
status = STABLE,
fname = 'fanchartview.py',
authors = ["Douglas S. Blank", "B. Malengier"],
authors_email = ["doug.blank@gmail.com", "benny.malengier@gmail.com"],
viewclass = 'FanChartView',
stock_icon = 'gramps-fanchart',
)
register(VIEW,
id = 'fanchartdescview',
name = _("Descendant Fan"),
category = ("Ancestry", _("Charts")),
description = _("Showing descendants through a fanchart"),
version = '1.0',
gramps_target_version = MODULE_VERSION,
status = STABLE,
fname = 'fanchartdescview.py',
authors = ["B. Malengier"],
authors_email = ["benny.malengier@gmail.com"],
viewclass = 'FanChartDescView',
stock_icon = 'gramps-fanchartdesc',
)
register(VIEW,
id = 'fanchart2wayview',
name = _("2-Way Fan"),
category = ("Ancestry", _("Charts")),
description = _("Showing ascendants and descendants through a fanchart"),
version = '1.0',
gramps_target_version = MODULE_VERSION,
status = STABLE,
fname = 'fanchart2wayview.py',
authors = ["B. Jacquet"],
authors_email = ["bastien.jacquet_dev@m4x.org"],
viewclass = 'FanChart2WayView',
stock_icon = 'gramps-fanchart2way',
)
register(VIEW,
id = 'personview',
name = _("Grouped People"),
description = _("The view showing all people in the Family Tree grouped per"
" family name"),
version = '1.0',
gramps_target_version = MODULE_VERSION,
status = STABLE,
fname = 'persontreeview.py',
authors = ["The Gramps project"],
authors_email = ["http://gramps-project.org"],
category = ("People", _("People")),
viewclass = 'PersonTreeView',
order = START,
stock_icon = 'gramps-tree-group',
)
register(VIEW,
id = 'personlistview',
name = _("People"),
description = _("The view showing all people in the Family Tree"
" in a flat list"),
version = '1.0',
gramps_target_version = MODULE_VERSION,
status = STABLE,
fname = 'personlistview.py',
authors = ["The Gramps project"],
authors_email = ["http://gramps-project.org"],
category = ("People", _("People")),
viewclass = 'PersonListView',
stock_icon = 'gramps-tree-list',
)
register(VIEW,
id = 'placelistview',
name = _("Places"),
description = _("The view showing all the places of the Family Tree"),
version = '1.0',
gramps_target_version = MODULE_VERSION,
status = STABLE,
fname = 'placelistview.py',
authors = ["The Gramps project"],
authors_email = ["http://gramps-project.org"],
category = ("Places", _("Places")),
viewclass = 'PlaceListView',
stock_icon = 'gramps-tree-list',
)
register(VIEW,
id = 'placetreeview',
name = _("Place Tree"),
description = _("A view displaying places in a tree format."),
version = '1.0',
gramps_target_version = MODULE_VERSION,
status = STABLE,
fname = 'placetreeview.py',
authors = ["Donald N. Allingham", "Gary Burton", "Nick Hall"],
authors_email = [""],
category = ("Places", _("Places")),
viewclass = 'PlaceTreeView',
stock_icon = 'gramps-tree-group',
order = START,
)
register(VIEW,
id = 'repoview',
name = _("Repositories"),
description = _("The view showing all the repositories"),
version = '1.0',
gramps_target_version = MODULE_VERSION,
status = STABLE,
fname = 'repoview.py',
authors = ["The Gramps project"],
authors_email = ["http://gramps-project.org"],
category = ("Repositories", _("Repositories")),
viewclass = 'RepositoryView',
order = START,
)
register(VIEW,
id = 'sourceview',
name = _("Sources"),
description = _("The view showing all the sources"),
version = '1.0',
gramps_target_version = MODULE_VERSION,
status = STABLE,
fname = 'sourceview.py',
authors = ["The Gramps project"],
authors_email = ["http://gramps-project.org"],
category = ("Sources", _("Sources")),
viewclass = 'SourceView',
order = START,
stock_icon = 'gramps-tree-list',
)
register(VIEW,
id = 'citationlistview',
name = _("Citations"),
description = _("The view showing all the citations"),
version = '1.0',
gramps_target_version = MODULE_VERSION,
status = STABLE,
fname = 'citationlistview.py',
authors = ["The Gramps project"],
authors_email = ["http://gramps-project.org"],
category = ("Citations", _("Citations")),
viewclass = 'CitationListView',
order = START,
)
register(VIEW,
id = 'citationtreeview',
name = _("Citation Tree"),
description = _("A view displaying citations and sources in a tree format."),
version = '1.0',
gramps_target_version = MODULE_VERSION,
status = STABLE,
fname = 'citationtreeview.py',
authors = ["Tim G L Lyons", "Nick Hall"],
authors_email = [""],
category = ("Sources", _("Sources")),
viewclass = 'CitationTreeView',
stock_icon = 'gramps-tree-select',
)
| Nick-Hall/gramps | gramps/plugins/view/view.gpr.py | Python | gpl-2.0 | 8,582 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Project.on_homepage'
db.add_column('storybase_user_project', 'on_homepage', self.gf('django.db.models.fields.BooleanField')(default=False), keep_default=False)
# Adding field 'Organization.on_homepage'
db.add_column('storybase_user_organization', 'on_homepage', self.gf('django.db.models.fields.BooleanField')(default=False), keep_default=False)
def backwards(self, orm):
# Deleting field 'Project.on_homepage'
db.delete_column('storybase_user_project', 'on_homepage')
# Deleting field 'Organization.on_homepage'
db.delete_column('storybase_user_organization', 'on_homepage')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'storybase_asset.asset': {
'Meta': {'object_name': 'Asset'},
'asset_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'asset_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'attribution': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'datasets': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'assets'", 'blank': 'True', 'to': "orm['storybase_asset.DataSet']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'license': ('django.db.models.fields.CharField', [], {'default': "'CC BY-NC-SA'", 'max_length': '25'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'assets'", 'null': 'True', 'to': "orm['auth.User']"}),
'published': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'section_specific': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'source_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "u'draft'", 'max_length': '10'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '10'})
},
'storybase_asset.dataset': {
'Meta': {'object_name': 'DataSet'},
'attribution': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'dataset_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'dataset_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'links_to_file': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'datasets'", 'null': 'True', 'to': "orm['auth.User']"}),
'published': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'source': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "u'draft'", 'max_length': '10'})
},
'storybase_geo.geolevel': {
'Meta': {'object_name': 'GeoLevel'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['storybase_geo.GeoLevel']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'storybase_geo.location': {
'Meta': {'object_name': 'Location'},
'address': ('storybase.fields.ShortTextField', [], {'blank': 'True'}),
'address2': ('storybase.fields.ShortTextField', [], {'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lat': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'lng': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'location_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'name': ('storybase.fields.ShortTextField', [], {'blank': 'True'}),
'point': ('django.contrib.gis.db.models.fields.PointField', [], {'null': 'True', 'blank': 'True'}),
'postcode': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'storybase_geo.place': {
'Meta': {'object_name': 'Place'},
'boundary': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {'null': 'True', 'blank': 'True'}),
'children': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['storybase_geo.Place']", 'null': 'True', 'through': "orm['storybase_geo.PlaceRelation']", 'blank': 'True'}),
'geolevel': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'places'", 'null': 'True', 'to': "orm['storybase_geo.GeoLevel']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('storybase.fields.ShortTextField', [], {}),
'place_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'})
},
'storybase_geo.placerelation': {
'Meta': {'unique_together': "(('parent', 'child'),)", 'object_name': 'PlaceRelation'},
'child': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'place_parent'", 'to': "orm['storybase_geo.Place']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'place_child'", 'to': "orm['storybase_geo.Place']"})
},
'storybase_story.story': {
'Meta': {'object_name': 'Story'},
'assets': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'stories'", 'blank': 'True', 'to': "orm['storybase_asset.Asset']"}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'stories'", 'null': 'True', 'to': "orm['auth.User']"}),
'byline': ('django.db.models.fields.TextField', [], {}),
'contact_info': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'featured_assets': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'featured_in_stories'", 'blank': 'True', 'to': "orm['storybase_asset.Asset']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'license': ('django.db.models.fields.CharField', [], {'default': "'CC BY-NC-SA'", 'max_length': '25'}),
'locations': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'stories'", 'blank': 'True', 'to': "orm['storybase_geo.Location']"}),
'on_homepage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'organizations': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'stories'", 'blank': 'True', 'to': "orm['storybase_user.Organization']"}),
'places': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'stories'", 'blank': 'True', 'to': "orm['storybase_geo.Place']"}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'stories'", 'blank': 'True', 'to': "orm['storybase_user.Project']"}),
'published': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "u'draft'", 'max_length': '10'}),
'story_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'structure_type': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'topics': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'stories'", 'blank': 'True', 'to': "orm['storybase_taxonomy.Category']"})
},
'storybase_taxonomy.category': {
'Meta': {'object_name': 'Category'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['storybase_taxonomy.Category']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'storybase_user.organization': {
'Meta': {'object_name': 'Organization'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'curated_stories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'curated_in_organizations'", 'blank': 'True', 'through': "orm['storybase_user.OrganizationStory']", 'to': "orm['storybase_story.Story']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'organizations'", 'blank': 'True', 'to': "orm['auth.User']"}),
'on_homepage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'organization_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'}),
'website_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'storybase_user.organizationstory': {
'Meta': {'object_name': 'OrganizationStory'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_user.Organization']"}),
'story': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_story.Story']"}),
'weight': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'storybase_user.organizationtranslation': {
'Meta': {'unique_together': "(('organization', 'language'),)", 'object_name': 'OrganizationTranslation'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'en'", 'max_length': '15'}),
'last_edited': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('storybase.fields.ShortTextField', [], {}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_user.Organization']"}),
'translation_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'})
},
'storybase_user.project': {
'Meta': {'object_name': 'Project'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'curated_stories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'curated_in_projects'", 'blank': 'True', 'through': "orm['storybase_user.ProjectStory']", 'to': "orm['storybase_story.Story']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'projects'", 'blank': 'True', 'to': "orm['auth.User']"}),
'on_homepage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'organizations': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'projects'", 'blank': 'True', 'to': "orm['storybase_user.Organization']"}),
'project_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'}),
'website_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'storybase_user.projectstory': {
'Meta': {'object_name': 'ProjectStory'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_user.Project']"}),
'story': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_story.Story']"}),
'weight': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'storybase_user.projecttranslation': {
'Meta': {'unique_together': "(('project', 'language'),)", 'object_name': 'ProjectTranslation'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'en'", 'max_length': '15'}),
'name': ('storybase.fields.ShortTextField', [], {}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_user.Project']"}),
'translation_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'})
}
}
complete_apps = ['storybase_user']
| denverfoundation/storybase | apps/storybase_user/migrations/0008_auto__add_field_project_on_homepage__add_field_organization_on_homepag.py | Python | mit | 20,050 |
import unittest
from unittest.mock import patch
from django import forms
from django.conf import settings
from django.shortcuts import resolve_url
from django.test import TestCase
from django.urls import reverse
from .utils import UserMixin
try:
from otp_yubikey.models import ValidationService, RemoteYubikeyDevice
except ImportError:
ValidationService = RemoteYubikeyDevice = None
@unittest.skipUnless(ValidationService, 'No YubiKey support')
class YubiKeyTest(UserMixin, TestCase):
@patch('otp_yubikey.models.RemoteYubikeyDevice.verify_token')
def test_setup(self, verify_token):
user = self.create_user()
self.login_user()
verify_token.return_value = [True, False] # only first try is valid
# Should be able to select YubiKey method
response = self.client.post(reverse('two_factor:setup'),
data={'setup_view-current_step': 'welcome'})
self.assertContains(response, 'YubiKey')
# Without ValidationService it won't work
with self.assertRaisesMessage(KeyError, "No ValidationService "
"found with name 'default'"):
self.client.post(reverse('two_factor:setup'),
data={'setup_view-current_step': 'method',
'method-method': 'yubikey'})
# With a ValidationService, should be able to input a YubiKey
ValidationService.objects.create(name='default', param_sl='', param_timeout='')
response = self.client.post(reverse('two_factor:setup'),
data={'setup_view-current_step': 'method',
'method-method': 'yubikey'})
self.assertContains(response, 'YubiKey:')
# Should call verify_token and create the device on finish
token = 'jlvurcgekuiccfcvgdjffjldedjjgugk'
response = self.client.post(reverse('two_factor:setup'),
data={'setup_view-current_step': 'yubikey',
'yubikey-token': token})
self.assertRedirects(response, reverse('two_factor:setup_complete'))
verify_token.assert_called_with(token)
yubikeys = user.remoteyubikeydevice_set.all()
self.assertEqual(len(yubikeys), 1)
self.assertEqual(yubikeys[0].name, 'default')
@patch('otp_yubikey.models.RemoteYubikeyDevice.verify_token')
def test_login(self, verify_token):
user = self.create_user()
verify_token.return_value = [True, False] # only first try is valid
service = ValidationService.objects.create(name='default', param_sl='', param_timeout='')
user.remoteyubikeydevice_set.create(service=service, name='default')
# Input type should be text, not numbers like other tokens
response = self.client.post(reverse('two_factor:login'),
data={'auth-username': 'bouke@example.com',
'auth-password': 'secret',
'login_view-current_step': 'auth'})
self.assertContains(response, 'YubiKey:')
self.assertIsInstance(response.context_data['wizard']['form'].fields['otp_token'],
forms.CharField)
# Should call verify_token
token = 'cjikftknbiktlitnbltbitdncgvrbgic'
response = self.client.post(reverse('two_factor:login'),
data={'token-otp_token': token,
'login_view-current_step': 'token'})
self.assertRedirects(response, resolve_url(settings.LOGIN_REDIRECT_URL))
verify_token.assert_called_with(token)
def test_show_correct_label(self):
"""
The token form replaces the input field when the user's device is a
YubiKey. However when the user decides to enter a backup token, the
normal backup token form should be shown. Refs #50.
"""
user = self.create_user()
service = ValidationService.objects.create(name='default', param_sl='', param_timeout='')
user.remoteyubikeydevice_set.create(service=service, name='default')
backup = user.staticdevice_set.create(name='backup')
backup.token_set.create(token='RANDOM')
response = self.client.post(reverse('two_factor:login'),
data={'auth-username': 'bouke@example.com',
'auth-password': 'secret',
'login_view-current_step': 'auth'})
self.assertContains(response, 'YubiKey:')
response = self.client.post(reverse('two_factor:login'),
data={'wizard_goto_step': 'backup'})
self.assertNotContains(response, 'YubiKey:')
self.assertContains(response, 'Token:')
def test_missing_management_data(self):
# missing management data
response = self.client.post(reverse('two_factor:login'),
data={'auth-username': 'bouke@example.com',
'auth-password': 'secret'})
# view should return HTTP 400 Bad Request
self.assertEqual(response.status_code, 400)
| Bouke/django-two-factor-auth | tests/test_yubikey.py | Python | mit | 5,356 |
"""Imports new symbols."""
import os
import tokenize
from collections import defaultdict
try:
from ConfigParser import ConfigParser
except:
from configparser import ConfigParser
from importmagic.six import StringIO
class Iterator(object):
def __init__(self, tokens, start=None, end=None):
self._tokens = tokens
self._cursor = start or 0
self._end = end or len(self._tokens)
def rewind(self):
self._cursor -= 1
def next(self):
if not self:
return None, None
token = self._tokens[self._cursor]
index = self._cursor
self._cursor += 1
return index, token
def peek(self):
return self._tokens[self._cursor] if self else None
def until(self, type):
tokens = []
while self:
index, token = self.next()
tokens.append((index, token))
if type == token[0]:
break
return tokens
def __nonzero__(self):
return self._cursor < self._end
__bool__ = __nonzero__
class Import(object):
def __init__(self, location, name, alias):
self.location = location
self.name = name
self.alias = alias
def __repr__(self):
return 'Import(location=%r, name=%r, alias=%r)' % \
(self.location, self.name, self.alias)
def __hash__(self):
return hash((self.location, self.name, self.alias))
def __eq__(self, other):
return self.location == other.location and self.name == other.name and self.alias == other.alias
def __ne__(self, other):
return not (self == other)
def __lt__(self, other):
return self.location < other.location \
or self.name < other.name \
or (self.alias is not None and other.alias is not None and self.alias < other.alias)
# See SymbolIndex.LOCATIONS for details.
LOCATION_ORDER = 'FS3L'
PROJECT_CONFIG_FILE = 'setup.cfg'
class Imports(object):
_style = {'multiline': 'parentheses', 'max_columns': 79}
def __init__(self, index, source, root_dir=None):
self._imports = set()
self._imports_from = defaultdict(set)
self._imports_begin = self._imports_end = None
self._source = source
self._index = index
self._root_dir = root_dir
self._parse(source)
if root_dir:
style = self.get_style_from_config()
self._style.update(style)
@classmethod
def set_style(cls, **kwargs):
cls._style.update(kwargs)
def get_style_from_config(self):
style = {}
cfg_path = os.path.join(self._root_dir, PROJECT_CONFIG_FILE)
config = ConfigParser()
config.read([cfg_path])
imp_cfg = config['importmagic']
if imp_cfg['multiline']:
style['multiline'] = imp_cfg['multiline']
if imp_cfg['max_columns']:
style['max_columns'] = int(imp_cfg['max_columns'])
return style
def add_import(self, name, alias=None):
location = LOCATION_ORDER.index(self._index.location_for(name))
self._imports.add(Import(location, name, alias))
def add_import_from(self, module, name, alias=None):
location = LOCATION_ORDER.index(self._index.location_for(module))
self._imports_from[module].add(Import(location, name, alias))
def remove(self, references):
for imp in list(self._imports):
if imp.name in references:
self._imports.remove(imp)
for name, imports in self._imports_from.items():
for imp in list(imports):
if imp.name in references:
imports.remove(imp)
def get_update(self):
groups = []
for expected_location in range(len(LOCATION_ORDER)):
out = StringIO()
for imp in sorted(self._imports):
if expected_location != imp.location:
continue
out.write('import {module}{alias}\n'.format(
module=imp.name,
alias=' as {alias}'.format(alias=imp.alias) if imp.alias else '',
))
for module, imports in sorted(self._imports_from.items()):
imports = sorted(imports)
if not imports or expected_location != imports[0].location:
continue
line = 'from {module} import '.format(module=module)
clauses = ['{name}{alias}'.format(
name=i.name,
alias=' as {alias}'.format(alias=i.alias) if i.alias else ''
) for i in imports]
clauses.reverse()
line_len = len(line)
line_pieces = []
paren_used = False
while clauses:
clause = clauses.pop()
next_len = line_len + len(clause) + 2
if next_len > self._style['max_columns']:
imported_items = ', '.join(line_pieces)
if self._style['multiline'] == 'parentheses':
line_tail = ',\n'
if not paren_used:
line += '('
paren_used = True
line_pieces.append('\n')
else:
# Use a backslash
line_tail = ', \\\n'
out.write(line + imported_items + line_tail)
line = ' '
line_len = len(line) + len(clause) + 2
line_pieces = [clause]
else:
line_pieces.append(clause)
line_len = next_len
line += ', '.join(line_pieces) + (')\n' if paren_used else '\n')
if line.strip():
out.write(line)
text = out.getvalue()
if text:
groups.append(text)
start = self._tokens[self._imports_begin][2][0] - 1
end = self._tokens[min(len(self._tokens) - 1, self._imports_end)][2][0] - 1
if groups:
text = '\n'.join(groups) + '\n\n'
else:
text = ''
return start, end, text
def update_source(self):
start, end, text = self.get_update()
lines = self._source.splitlines()
lines[start:end] = text.splitlines()
return '\n'.join(lines) + '\n'
def _parse(self, source):
reader = StringIO(source)
# parse until EOF or TokenError (allows incomplete modules)
tokens = []
try:
tokens.extend(tokenize.generate_tokens(reader.readline))
except tokenize.TokenError:
# TokenError happens always at EOF, for unclosed strings or brackets.
# We don't care about that here, since we still can recover the whole
# source code.
pass
self._tokens = tokens
it = Iterator(self._tokens)
self._imports_begin, self._imports_end = self._find_import_range(it)
it = Iterator(self._tokens, start=self._imports_begin, end=self._imports_end)
self._parse_imports(it)
def _find_import_range(self, it):
ranges = self._find_import_ranges(it)
start, end = ranges[0][1:]
return start, end
def _find_import_ranges(self, it):
ranges = []
indentation = 0
explicit = False
size = 0
start = None
potential_end_index = -1
while it:
index, token = it.next()
if token[0] == tokenize.INDENT:
indentation += 1
continue
elif token[0] == tokenize.DEDENT:
indentation += 1
continue
if indentation:
continue
# Explicitly tell importmagic to manage the following block of imports
if token[1] == '# importmagic: manage':
ranges = []
start = index + 2 # Start managing imports after directive comment + newline.
explicit = True
continue
elif token[0] in (tokenize.STRING, tokenize.COMMENT):
# If a non-import statement follows, stop the range *before*
# this string or comment, in order to keep it out of the
# updated import block.
if potential_end_index == -1:
potential_end_index = index
continue
elif token[0] in (tokenize.NEWLINE, tokenize.NL):
continue
if not ranges:
ranges.append((0, index, index))
# Accumulate imports
if token[1] in ('import', 'from'):
potential_end_index = -1
if start is None:
start = index
size += 1
while it:
token = it.peek()
if token[0] == tokenize.NEWLINE or token[1] == ';':
break
index, _ = it.next()
# Terminate this import range
elif start is not None and token[1].strip():
if potential_end_index > -1:
index = potential_end_index
potential_end_index = -1
ranges.append((size, start, index))
start = None
size = 0
if explicit:
break
if start is not None:
ranges.append((size, start, index))
ranges.sort(reverse=True)
return ranges
def _parse_imports(self, it):
while it:
index, token = it.next()
if token[1] not in ('import', 'from') and token[1].strip():
continue
type = token[1]
if type in ('import', 'from'):
tokens = it.until(tokenize.NEWLINE)
tokens = [
t[1] for i, t in tokens
if t[0] == tokenize.NAME or t[1] in (',', '.', '...')]
tokens.reverse()
self._parse_import(type, tokens)
def _parse_import(self, type, tokens):
module = None
if type == 'from':
module = ''
while tokens and tokens[-1] != 'import':
module += tokens.pop()
assert tokens.pop() == 'import'
while tokens:
name = ''
while True:
name += tokens.pop()
next = tokens.pop() if tokens else None
if next == '.':
name += next
else:
break
alias = None
if next == 'as':
alias = tokens.pop()
if alias == name:
alias = None
next = tokens.pop() if tokens else None
if next == ',':
pass
if type == 'import':
self.add_import(name, alias=alias)
else:
self.add_import_from(module, name, alias=alias)
def __repr__(self):
return 'Imports(imports=%r, imports_from=%r)' % (self._imports, self._imports_from)
def _process_imports(src, index, unresolved, unreferenced, project_root):
imports = Imports(index, src, project_root)
imports.remove(unreferenced)
for symbol in unresolved:
scores = index.symbol_scores(symbol)
if not scores:
continue
_, module, variable = scores[0]
# Direct module import: eg. os.path
if variable is None:
# sys.path sys path -> import sys
# os.path.basename os.path basename -> import os.path
imports.add_import(module)
else:
# basename os.path basename -> from os.path import basename
# path.basename os.path basename -> from os import path
imports.add_import_from(module, variable)
return imports
def get_update(src, index, unresolved, unreferenced, project_root=None):
imports = _process_imports(src, index, unresolved, unreferenced, project_root)
return imports.get_update()
def update_imports(src, index, unresolved, unreferenced, project_root=None):
imports = _process_imports(src, index, unresolved, unreferenced, project_root)
return imports.update_source()
| TeamSPoon/logicmoo_workspace | packs_web/butterfly/lib/python3.7/site-packages/importmagic/importer.py | Python | mit | 12,592 |
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# Copyright (c) 2008-2021 pyglet contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
"""FFmpeg wrapping"""
from .libavcodec import *
from .libavutil import *
from .libavformat import *
from .libswresample import *
from .libswscale import *
| calexil/FightstickDisplay | pyglet/media/codecs/ffmpeg_lib/__init__.py | Python | gpl-3.0 | 1,916 |
#! /usr/bin/env python3
# -*- coding: UTF-8 -*-
import json
from unittest import TestCase, main
from tornado.gen import coroutine
from tornado.escape import url_escape
from tornado.testing import AsyncHTTPTestCase, gen_test
import calculator_api as ca
class TestOperations(TestCase):
def assertNumReduceOperation(
self, operation, *data):
msg = \
"Reduce operation {op} over " \
"{ops} isn't equal to {r}!"
for r, ops in data:
with self.subTest(result=r, operands=ops):
fmsg = msg.format(
op=operation, ops=ops, r=r)
self.assertAlmostEqual(
operation(ops), r, msg=fmsg)
def test_sum(self):
self.assertNumReduceOperation(
ca.operations['+'],
(6, (1, 2, 3)),
(3, (1, 2)),
(-6, (-1, -2, -3)),
(12, (1, 2, 3, 1, 2, 3)),
(-2, (-1, 2, -3)),
(2, (1, -2, 3)),
(1, (0.333333333333, 0.666666666666)),
(13/17, (4/17, 3/17, 7/17, -1/17)),
)
def test_sub(self):
self.assertNumReduceOperation(
ca.operations['-'],
(-4, (1, 2, 3)),
(-1, (1, 2)),
(4, (-1, -2, -3)),
(-10, (1, 2, 3, 1, 2, 3)),
(0, (-1, 2, -3)),
(0, (1, -2, 3)),
(-0.33333333, (0.333333333333, 0.666666666666)),
(-5/17, (4/17, 3/17, 7/17, -1/17)),
)
def test_mul(self):
self.assertNumReduceOperation(
ca.operations['*'],
(6, (1, 2, 3)),
(2, (1, 2)),
(-6, (-1, -2, -3)),
(36, (1, 2, 3, 1, 2, 3)),
(6, (-1, 2, -3)),
(-6, (1, -2, 3)),
(0.222222222, (0.333333333333, 0.666666666666)),
(-84/83521, (4/17, 3/17, 7/17, -1/17)),
)
def test_div(self):
self.assertNumReduceOperation(
ca.operations['/'],
(1/6, (1, 2, 3)),
(0.5, (1, 2)),
(-1/6, (-1, -2, -3)),
(1/36, (1, 2, 3, 1, 2, 3)),
(1/6, (-1, 2, -3)),
(-1/6, (1, -2, 3)),
(0.5, (0.333333333333, 0.666666666666)),
(-4*17*17/(3*7), (4/17, 3/17, 7/17, -1/17)),
)
class TestApp(AsyncHTTPTestCase):
@property
def url(self):
if not hasattr(self, '_url'):
self._url = self.get_url('/')
return self._url
def get_app(self):
return ca.get_app()
def get_body(self, op, *ops):
json_ops = json.dumps(ops)
json_query = '{{"op": "{}", "ops": {}}}'.format(
op, json_ops)
return 'query=' + url_escape(json_query)
@coroutine
def send_query(self, http_body):
response = yield self.http_client.fetch(
self.url, method='POST', body=http_body)
response = json.loads(
response.body.decode()
)
self.assertIsInstance(response, dict)
self.assertIn('result', response)
return response
@coroutine
def send_f_query(self, op, *ops):
response = yield self.send_query(
self.get_body(op, *ops)
)
return response
@gen_test
def test_query_parsing(self):
querys = [
'',
'query=',
'query=hola',
'query=' + url_escape('{}'),
self.get_body('^', 1, 1),
self.get_body('*'),
'&%$(&%(&%$ +++////))',
]
for query in querys:
with self.subTest(query=query):
response = yield self.send_query(query)
self.assertIsNone(response['result'])
response = yield self.send_f_query('+', 1, 1)
self.assertIsNotNone(response['result'])
@gen_test
def test_single_number_ops(self):
ops = ['+', '-', '*', '/']
for op in ops:
with self.subTest(operation=op):
response = yield self.send_f_query(op, 6)
self.assertEqual(response['result'], 6)
@gen_test
def test_sum(self):
op = '+'
response = yield self.send_f_query(op, 1, 2, 3)
self.assertEqual(response['result'], 6)
response = yield self.send_f_query(
op, 0.333333333, 0.6666666666)
self.assertAlmostEqual(response['result'], 1)
@gen_test
def test_sub(self):
op = '-'
response = yield self.send_f_query(op, 1, 2, 3)
self.assertEqual(response['result'], -4)
response = yield self.send_f_query(
op, 0.333333333, 0.6666666666)
self.assertAlmostEqual(
response['result'], -0.3333333333)
@gen_test
def test_mul(self):
op = '*'
response = yield self.send_f_query(op, 1, 2, 3)
self.assertEqual(response['result'], 6)
response = yield self.send_f_query(
op, 0.333333333, 0.6666666666)
self.assertAlmostEqual(
response['result'], 0.2222222222222)
@gen_test
def test_div(self):
op = '/'
response = yield self.send_f_query(op, 1, 2, 3)
self.assertAlmostEqual(
response['result'], 0.166666666667)
response = yield self.send_f_query(
op, 0.333333333, 0.6666666666)
self.assertAlmostEqual(response['result'], 0.5)
if __name__ == '__main__':
main()
| cganterh/tornado_calculator_api | test_calculator_api.py | Python | gpl-2.0 | 5,442 |
#
# Copyright (C) 2013 UNINETT
#
# This file is part of Network Administration Visualized (NAV).
#
# NAV is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License version 2 as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details. You should have received a copy of the GNU General Public License
# along with NAV. If not, see <http://www.gnu.org/licenses/>.
#
"""Sensors collection and logging to graphite"""
from twisted.internet import defer
import time
from nav.ipdevpoll import Plugin
from nav.ipdevpoll.db import autocommit, run_in_thread
from nav.metrics.carbon import send_metrics
from nav.metrics.templates import metric_path_for_sensor
from nav.models.manage import Sensor
# Ask for no more than this number of values in a single SNMP GET operation
MAX_SENSORS_PER_REQUEST = 10
class StatSensors(Plugin):
"""Collects measurement values from registered sensors and pushes to
Graphite.
"""
@classmethod
@defer.inlineCallbacks
def can_handle(cls, netbox):
base_can_handle = yield defer.maybeDeferred(
super(StatSensors, cls).can_handle, netbox)
if base_can_handle:
i_can_handle = yield run_in_thread(cls._has_sensors, netbox)
defer.returnValue(i_can_handle)
defer.returnValue(base_can_handle)
@classmethod
@autocommit
def _has_sensors(cls, netbox):
return Sensor.objects.filter(netbox=netbox.id).count() > 0
@defer.inlineCallbacks
def handle(self):
sensors = yield run_in_thread(self._get_sensors)
self._logger.debug("retrieving data from %d sensors", len(sensors))
oids = sensors.keys()
requests = [oids[x:x+MAX_SENSORS_PER_REQUEST]
for x in range(0, len(oids), MAX_SENSORS_PER_REQUEST)]
for req in requests:
data = yield self.agent.get(req).addCallback(
self._response_to_metrics, sensors)
self._logger.debug("got data from sensors: %r", data)
@autocommit
def _get_sensors(self):
sensors = Sensor.objects.filter(netbox=self.netbox.id).values()
return dict((row['oid'], row) for row in sensors)
def _response_to_metrics(self, result, sensors):
metrics = []
timestamp = time.time()
data = ((sensors[oid], value) for oid, value in result.iteritems()
if oid in sensors)
for sensor, value in data:
value = convert_to_precision(value, sensor)
path = metric_path_for_sensor(self.netbox,
sensor['internal_name'])
metrics.append((path, (timestamp, value)))
send_metrics(metrics)
return metrics
def convert_to_precision(value, sensor):
"""Moves the decimal point of a value according to the precision defined
for sensor
"""
prec = sensor.get('precision', 0)
return value * (10 ** -prec) if value and prec else value
| alexanderfefelov/nav | python/nav/ipdevpoll/plugins/statsensors.py | Python | gpl-2.0 | 3,216 |
# Test the signal module
from test_support import verbose, TestSkipped
import signal
import os
import sys
if sys.platform[:3] in ('win', 'os2') or sys.platform=='riscos':
raise TestSkipped, "Can't test signal on %s" % sys.platform
if verbose:
x = '-x'
else:
x = '+x'
pid = os.getpid()
# Shell script that will send us asynchronous signals
script = """
(
set %(x)s
sleep 2
kill -5 %(pid)d
sleep 2
kill -2 %(pid)d
sleep 2
kill -3 %(pid)d
) &
""" % vars()
def handlerA(*args):
if verbose:
print "handlerA", args
HandlerBCalled = "HandlerBCalled" # Exception
def handlerB(*args):
if verbose:
print "handlerB", args
raise HandlerBCalled, args
signal.alarm(20) # Entire test lasts at most 20 sec.
signal.signal(5, handlerA)
signal.signal(2, handlerB)
signal.signal(3, signal.SIG_IGN)
signal.signal(signal.SIGALRM, signal.default_int_handler)
os.system(script)
print "starting pause() loop..."
try:
while 1:
if verbose:
print "call pause()..."
try:
signal.pause()
if verbose:
print "pause() returned"
except HandlerBCalled:
if verbose:
print "HandlerBCalled exception caught"
else:
pass
except KeyboardInterrupt:
if verbose:
print "KeyboardInterrupt (assume the alarm() went off)"
| neopoly/rubyfox-server | lib/rubyfox/server/data/lib/Lib/test/test_signal.py | Python | mit | 1,460 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import copy
# Define a collection of group_by functions
GROUP_BY_MAP = {}
def group_by(name):
def wrapper(func):
GROUP_BY_MAP[name] = func
return func
return wrapper
def group_tasks(config, tasks):
group_by_fn = GROUP_BY_MAP[config['group-by']]
groups = group_by_fn(config, tasks)
for combinations in groups.values():
dependencies = [copy.deepcopy(t) for t in combinations]
yield dependencies
@group_by('build-type')
def build_type_grouping(config, tasks):
groups = {}
kind_dependencies = config.get('kind-dependencies')
only_build_type = config.get('only-for-build-types')
for task in tasks:
if task.kind not in kind_dependencies:
continue
if only_build_type:
build_type = task.attributes.get('build-type')
if build_type not in only_build_type:
continue
build_type = task.attributes.get('build-type')
groups.setdefault(build_type, []).append(task)
return groups
@group_by('attributes')
def attributes_grouping(config, tasks):
groups = {}
kind_dependencies = config.get('kind-dependencies')
only_attributes = config.get('only-for-attributes')
for task in tasks:
if task.kind not in kind_dependencies:
continue
group_attr = None
if only_attributes:
if not any(attr in task.attributes for attr in only_attributes):
continue
else:
continue
groups.setdefault(task.label, []).append(task)
return groups
| mozilla-mobile/focus-android | taskcluster/focus_android_taskgraph/loader/__init__.py | Python | mpl-2.0 | 1,789 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file './Plugins/VcsPlugins/vcsMercurial/HgDiffDialog.ui'
#
# Created: Tue Nov 18 17:53:57 2014
# by: PyQt5 UI code generator 5.3.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_HgDiffDialog(object):
def setupUi(self, HgDiffDialog):
HgDiffDialog.setObjectName("HgDiffDialog")
HgDiffDialog.resize(749, 646)
self.vboxlayout = QtWidgets.QVBoxLayout(HgDiffDialog)
self.vboxlayout.setObjectName("vboxlayout")
self.contentsGroup = QtWidgets.QGroupBox(HgDiffDialog)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(3)
sizePolicy.setHeightForWidth(self.contentsGroup.sizePolicy().hasHeightForWidth())
self.contentsGroup.setSizePolicy(sizePolicy)
self.contentsGroup.setObjectName("contentsGroup")
self.verticalLayout = QtWidgets.QVBoxLayout(self.contentsGroup)
self.verticalLayout.setObjectName("verticalLayout")
self.filesCombo = QtWidgets.QComboBox(self.contentsGroup)
self.filesCombo.setObjectName("filesCombo")
self.verticalLayout.addWidget(self.filesCombo)
self.contents = QtWidgets.QTextEdit(self.contentsGroup)
self.contents.setLineWrapMode(QtWidgets.QTextEdit.NoWrap)
self.contents.setReadOnly(True)
self.contents.setTabStopWidth(8)
self.contents.setAcceptRichText(False)
self.contents.setObjectName("contents")
self.verticalLayout.addWidget(self.contents)
self.vboxlayout.addWidget(self.contentsGroup)
self.errorGroup = QtWidgets.QGroupBox(HgDiffDialog)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(1)
sizePolicy.setHeightForWidth(self.errorGroup.sizePolicy().hasHeightForWidth())
self.errorGroup.setSizePolicy(sizePolicy)
self.errorGroup.setObjectName("errorGroup")
self.vboxlayout1 = QtWidgets.QVBoxLayout(self.errorGroup)
self.vboxlayout1.setObjectName("vboxlayout1")
self.errors = QtWidgets.QTextEdit(self.errorGroup)
self.errors.setReadOnly(True)
self.errors.setAcceptRichText(False)
self.errors.setObjectName("errors")
self.vboxlayout1.addWidget(self.errors)
self.vboxlayout.addWidget(self.errorGroup)
self.inputGroup = QtWidgets.QGroupBox(HgDiffDialog)
self.inputGroup.setObjectName("inputGroup")
self.gridlayout = QtWidgets.QGridLayout(self.inputGroup)
self.gridlayout.setObjectName("gridlayout")
spacerItem = QtWidgets.QSpacerItem(327, 29, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridlayout.addItem(spacerItem, 1, 1, 1, 1)
self.sendButton = QtWidgets.QPushButton(self.inputGroup)
self.sendButton.setObjectName("sendButton")
self.gridlayout.addWidget(self.sendButton, 1, 2, 1, 1)
self.input = QtWidgets.QLineEdit(self.inputGroup)
self.input.setObjectName("input")
self.gridlayout.addWidget(self.input, 0, 0, 1, 3)
self.passwordCheckBox = QtWidgets.QCheckBox(self.inputGroup)
self.passwordCheckBox.setObjectName("passwordCheckBox")
self.gridlayout.addWidget(self.passwordCheckBox, 1, 0, 1, 1)
self.vboxlayout.addWidget(self.inputGroup)
self.buttonBox = QtWidgets.QDialogButtonBox(HgDiffDialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Close|QtWidgets.QDialogButtonBox.Save)
self.buttonBox.setObjectName("buttonBox")
self.vboxlayout.addWidget(self.buttonBox)
self.retranslateUi(HgDiffDialog)
self.buttonBox.rejected.connect(HgDiffDialog.close)
QtCore.QMetaObject.connectSlotsByName(HgDiffDialog)
HgDiffDialog.setTabOrder(self.filesCombo, self.contents)
HgDiffDialog.setTabOrder(self.contents, self.errors)
HgDiffDialog.setTabOrder(self.errors, self.input)
HgDiffDialog.setTabOrder(self.input, self.passwordCheckBox)
HgDiffDialog.setTabOrder(self.passwordCheckBox, self.sendButton)
HgDiffDialog.setTabOrder(self.sendButton, self.buttonBox)
def retranslateUi(self, HgDiffDialog):
_translate = QtCore.QCoreApplication.translate
HgDiffDialog.setWindowTitle(_translate("HgDiffDialog", "Mercurial Diff"))
self.contentsGroup.setTitle(_translate("HgDiffDialog", "Difference"))
self.contents.setWhatsThis(_translate("HgDiffDialog", "<b>Mercurial Diff</b><p>This shows the output of the hg diff command.</p>"))
self.errorGroup.setTitle(_translate("HgDiffDialog", "Errors"))
self.inputGroup.setTitle(_translate("HgDiffDialog", "Input"))
self.sendButton.setToolTip(_translate("HgDiffDialog", "Press to send the input to the hg process"))
self.sendButton.setText(_translate("HgDiffDialog", "&Send"))
self.sendButton.setShortcut(_translate("HgDiffDialog", "Alt+S"))
self.input.setToolTip(_translate("HgDiffDialog", "Enter data to be sent to the hg process"))
self.passwordCheckBox.setToolTip(_translate("HgDiffDialog", "Select to switch the input field to password mode"))
self.passwordCheckBox.setText(_translate("HgDiffDialog", "&Password Mode"))
self.passwordCheckBox.setShortcut(_translate("HgDiffDialog", "Alt+P"))
| davy39/eric | Plugins/VcsPlugins/vcsMercurial/Ui_HgDiffDialog.py | Python | gpl-3.0 | 5,660 |
# Copyright (c) 2001-2014, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# IRC #navitia on freenode
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from collections import deque, defaultdict
from nose.tools import *
import json
from navitiacommon import request_pb2, response_pb2
from datetime import datetime
import logging
import re
from shapely.geometry import shape
"""
Some small functions to check the service responses
"""
def check_url(tester, url, might_have_additional_args=False, **kwargs):
"""
Test url status code to 200 and if valid format response as json
if might_have_additional_args is set to True,
we just don't want a 404 error (the url might be in error because of mandatory params not provided)
else
we don't want an error on the url
"""
#tester = app.test_client(tester)
response = tester.get(url, **kwargs)
assert response, "response for url {} is null".format(url)
if might_have_additional_args:
assert response.status_code != 404, "unreachable url {}"\
.format(json.dumps(json.loads(response.data), indent=2))
else:
eq_(response.status_code, 200, "invalid return code, response : {}"
.format(json.dumps(json.loads(response.data), indent=2)))
return response
def get_not_null(dict, field):
assert field in dict
val = dict[field]
if type(val) == bool:
return val # no check for booleans
if type(val) == int:
return val # no check for integer
assert val, "value of field {} is null".format(field)
return val
days_regexp = re.compile("^(0|1){366}$")
def is_valid_days(days):
m = days_regexp.match(days)
return m is not None
version_number_regexp = re.compile("v[0-9]+\.[0-9]+\.[0-9]+[-.*]?")
def is_valid_navitia_version_number(str):
"""
check that the version number is valid
it must contains at least v{major}.{minor}.{hotfix}
it can also contains the git sha1 at the end
>>> is_valid_navitia_version_number("v1.12.126")
True
>>> is_valid_navitia_version_number("v1.3.1-73-g4c7524b")
True
>>> is_valid_navitia_version_number("1.12.126")
Traceback (most recent call last):
AssertionError
>>> is_valid_navitia_version_number("v12.126-73-g4c7524b")
Traceback (most recent call last):
AssertionError
"""
m = version_number_regexp.match(str)
assert m
return True
def get_valid_datetime(str, possible_errors=False):
"""
Check is the string is a valid date and return it
if possible_errors, the string might be equals to
"not-a-date-time"
>>> get_valid_datetime("bob")
Traceback (most recent call last):
AssertionError
>>> get_valid_datetime("")
Traceback (most recent call last):
AssertionError
>>> get_valid_datetime("20123101T215030") # month is badly set
Traceback (most recent call last):
AssertionError
>>> get_valid_datetime("20123101T215030", possible_errors=True)
Traceback (most recent call last):
AssertionError
>>> get_valid_datetime("not-a-date-time", possible_errors=True)
>>> get_valid_datetime("not-a-date-time")
Traceback (most recent call last):
AssertionError
>>> get_valid_datetime("20120131T215030")
datetime.datetime(2012, 1, 31, 21, 50, 30)
"""
assert str
try:
return datetime.strptime(str, "%Y%m%dT%H%M%S")
except ValueError:
if possible_errors:
assert str == "not-a-date-time"
return None
logging.error("string '{}' is no valid datetime".format(str))
assert False
def get_valid_time(str):
"""
Check is the string is a valid time and return it
>>> get_valid_time("bob")
Traceback (most recent call last):
AssertionError
>>> get_valid_time("")
Traceback (most recent call last):
AssertionError
>>> get_valid_time("20120131T215030") # it's a datetime, not valid
Traceback (most recent call last):
AssertionError
>>> get_valid_time("215030") #time is HHMMSS
datetime.datetime(1900, 1, 1, 21, 50, 30)
>>> get_valid_time("501230") # MMHHSS, not valid
Traceback (most recent call last):
AssertionError
"""
assert str
try:
#AD:we use a datetime anyway because I don't know what to use instead
return datetime.strptime(str, "%H%M%S")
except ValueError:
logging.error("string '{}' is no valid time".format(str))
assert False
def is_valid_date(str):
"""
Check is the string is a valid date
>>> is_valid_date("bob")
False
>>> is_valid_date("")
Traceback (most recent call last):
AssertionError
>>> is_valid_date("20123101") # month is badly set
False
>>> is_valid_date("20120131")
True
"""
assert str
try:
datetime.strptime(str, "%Y%m%d")
except ValueError:
logging.error("string '{}' is no valid date".format(str))
return False
return True
def is_valid_bool(str):
if type(str) is bool:
return True
assert str
#else check as string
lower = str.lower()
return lower == "true" or lower == "false"
def get_valid_float(str):
if type(str) is float:
return str
try:
return float(str)
except ValueError:
assert "cannot convert {} to float".format(str)
def get_valid_int(str):
assert str != ""
if type(str) is int:
return str
try:
return int(str)
except ValueError:
assert False
def is_valid_lat(str):
lat = get_valid_float(str)
assert -90.0 <= lat <= 90.0, "lat should be between -90 and 90"
def is_valid_lon(str):
lat = get_valid_float(str)
assert 180.0 >= lat >= -180.0, "lon should be between -180 and 180"
def is_valid_coord(coord):
lat = get_not_null(coord, "lat")
lon = get_not_null(coord, "lon")
is_valid_lat(lat)
is_valid_lon(lon)
def get_links_dict(response):
"""
get links as dict ordered by 'rel' or 'type"
"""
raw_links = get_not_null(response, "links")
#create a dict with the 'rel' field as key
links = {link.get('rel', link.get('type', None)): link for link in raw_links}
return links
def check_links(object, tester):
"""
get the links as dict ordered by 'rel' and check:
- all links must have the attributes:
* 'internal' --> optional but must be a boolean
* 'href' --> valid url if not templated, empty if internal
* 'rel' --> not empty if internal
* 'title' --> optional
* 'templated' --> optional but must be a boolean
* 'type' --> not empty
"""
links = get_links_dict(object)
for link_name, link in links.iteritems():
def get_bool(name):
""" give boolean if in dict, else False"""
if name in link:
assert is_valid_bool(link[name])
if bool(link[name]):
return True
return False
internal = get_bool('internal')
templated = get_bool('templated')
if not internal:
assert 'href' in link, "no href in link"
if not templated and not internal:
#we check that the url is valid
assert check_url(tester, link['href'].replace('http://localhost', ''),
might_have_additional_args=False), "href's link must be a valid url"
if internal:
assert 'rel' in link
assert link['rel']
assert 'type' in link
assert link['type']
return links
def check_internal_links(response, tester):
"""
We want to check that all 'internal' link are correctly linked to an element in the response
for that we first collect all internal link
then iterate on all node and remove a link if we find a node with
* a name equals to link.'rel'
* an id equals to link.'id'
At the end the internal link list must be empty
"""
from jormungandr import utils #import late not to load it before updating the conf for the tests
internal_links_id = set()
internal_link_types = set() # set with the types we look for
def add_link_visitor(name, val):
if val and name == 'links':
if 'internal' in val and bool(val['internal']):
internal_links_id.add(val['id'])
internal_link_types.add(val['rel'])
utils.walk_dict(response, add_link_visitor)
def check_node(name, val):
if name in internal_link_types:
if 'id' in val and val['id'] in internal_links_id:
#found one ref, we can remove the link
internal_links_id.remove(val['id'])
utils.walk_dict(response, check_node)
assert not internal_links_id, "cannot find correct ref for internal links : {}".\
format([lid for lid in internal_links_id])
class unique_dict(dict):
"""
We often have to check that a set of values are uniq, this container is there to do the job
>>> d = unique_dict('id')
>>> d['bob'] = 1
>>> d['bobette'] = 1
>>> d['bob'] = 2
Traceback (most recent call last):
...
AssertionError: the id if must be unique, but 'bob' is not
"""
def __init__(self, key_name):
self.key_name = key_name
def __setitem__(self, key, value):
assert not key in self, \
"the {} if must be unique, but '{}' is not".format(self.key_name, key)
dict.__setitem__(self, key, value)
def query_from_str(str):
"""
for convenience, convert a url to a dict
>>> query_from_str("toto/tata?bob=toto&bobette=tata&bobinos=tutu")
{'bobette': 'tata', 'bobinos': 'tutu', 'bob': 'toto'}
>>> query_from_str("toto/tata?bob=toto&bob=tata&bob=titi&bob=tata&bobinos=tutu")
{'bobinos': 'tutu', 'bob': ['toto', 'tata', 'titi', 'tata']}
"""
query = {}
last_elt = str.split("?")[-1]
for s in last_elt.split("&"):
k, v = s.split("=")
if k in query:
old_val = query[k]
if isinstance(old_val, list):
old_val.append(v)
else:
query[k] = [old_val, v]
else:
query[k] = v
return query
def is_valid_feed_publisher(feed_publisher):
get_not_null(feed_publisher, 'id')
get_not_null(feed_publisher, 'name')
get_not_null(feed_publisher, 'license')
get_not_null(feed_publisher, 'url')
def is_valid_journey_response(response, tester, query_str):
query_dict = query_from_str(query_str)
journeys = get_not_null(response, "journeys")
all_sections = unique_dict('id')
assert len(journeys) > 0, "we must at least have one journey"
for j in journeys:
is_valid_journey(j, tester, query_dict)
for s in j['sections']:
all_sections[s['id']] = s
# check the fare section
# the fares must be structurally valid and all link to sections must be ok
all_tickets = unique_dict('id')
fares = response['tickets']
for f in fares:
is_valid_ticket(f, tester)
all_tickets[f['id']] = f
check_internal_links(response, tester)
#check other links
check_links(response, tester)
# more checks on links, we want the prev/next/first/last,
# to have forwarded all params, (and the time must be right)
journeys_links = get_links_dict(response)
for l in ["prev", "next", "first", "last"]:
assert l in journeys_links
url = journeys_links[l]['href']
additional_args = query_from_str(url)
for k, v in additional_args.iteritems():
if k == 'datetime':
#TODO check datetime
continue
if k == 'datetime_represents':
query_dt_rep = query_dict.get('datetime_represents', 'departure')
if l in ['prev', 'last']:
#the datetime_represents is negated
if query_dt_rep == 'departure':
assert v == 'arrival'
else:
assert v == 'departure'
else:
query_dt_rep == v
continue
assert query_dict[k] == v, "we must have the same query"
feed_publishers = get_not_null(response, "feed_publishers")
feed_publisher = feed_publishers[0]
is_valid_feed_publisher(feed_publisher)
assert (feed_publisher["id"] == "builder")
assert (feed_publisher["name"] == "canal tp")
assert (feed_publisher["license"] == "ODBL")
assert (feed_publisher["url"] == "www.canaltp.fr")
def is_valid_journey(journey, tester, query):
arrival = get_valid_datetime(journey['arrival_date_time'])
departure = get_valid_datetime(journey['departure_date_time'])
request = get_valid_datetime(journey['requested_date_time'])
assert arrival >= departure
if 'datetime_represents' not in query or query['datetime_represents'] == "departure":
#for 'departure after' query, the departure must be... after \o/
assert departure >= request
else:
assert arrival <= request
#we want to test that all departure match de previous section arrival
last_arrival = departure
for s in journey['sections']:
is_valid_section(s, query)
section_departure = get_valid_datetime(s['departure_date_time'])
assert (section_departure - last_arrival).seconds <= 1 # there cannot be more than one second between the 2
last_arrival = get_valid_datetime(s['arrival_date_time'])
# test if geojson is valid
g = s.get('geojson')
g is None or shape(g)
assert get_valid_datetime(journey['sections'][-1]['arrival_date_time']) == last_arrival
def is_valid_section(section, query):
arrival = get_valid_datetime(section['arrival_date_time'])
departure = get_valid_datetime(section['departure_date_time'])
assert (arrival - departure).seconds == section['duration']
assert section['type'] # type cannot be empty
#for street network section, we must have a valid path
if section['type'] == 'street_network':
assert section['mode'] # mode cannot be empty for street network
total_duration = 0
for p in section['path']:
assert get_valid_int(p['length']) >= 0
assert -180 <= get_valid_int(p['direction']) <= 180 # direction is an angle
#No constraint on name, it can be empty
dur = get_valid_int(p['duration'])
assert dur >= 0
total_duration += dur
assert total_duration == section['duration']
#TODO check geojson
#TODO check stop_date_times
#TODO check from/to
def is_valid_ticket(ticket, tester):
found = get_not_null(ticket, 'found')
assert is_valid_bool(found)
get_not_null(ticket, 'id')
get_not_null(ticket, 'name')
cost = get_not_null(ticket, 'cost')
if found:
#for found ticket, we must have a non empty currency
get_not_null(cost, 'currency')
get_valid_float(get_not_null(cost, 'value'))
check_links(ticket, tester)
def is_valid_stop_area(stop_area, depth_check=1):
"""
check the structure of a stop area
"""
get_not_null(stop_area, "name")
coord = get_not_null(stop_area, "coord")
is_valid_label(get_not_null(stop_area, "label"))
is_valid_coord(coord)
for c in stop_area.get('comments', []):
is_valid_comment(c)
def is_valid_stop_point(stop_point, depth_check=1):
"""
check the structure of a stop point
"""
get_not_null(stop_point, "name")
is_valid_label(get_not_null(stop_point, "label"))
coord = get_not_null(stop_point, "coord")
is_valid_coord(coord)
for c in stop_point.get('comments', []):
is_valid_comment(c)
if depth_check > 0:
is_valid_stop_area(get_not_null(stop_point, "stop_area"), depth_check-1)
else:
assert "stop_area" not in stop_point
def is_valid_route(route, depth_check=1):
get_not_null(route, "name")
is_valid_bool(get_not_null(route, "is_frequence"))
direction = get_not_null(route, "direction")
is_valid_place(direction, depth_check - 1)
#the direction of the route must always be a stop point
assert get_not_null(direction, "embedded_type") == "stop_area"
is_valid_stop_area(get_not_null(direction, "stop_area"), depth_check - 1)
if depth_check > 0:
is_valid_line(get_not_null(route, "line"), depth_check - 1)
else:
assert 'line' not in route
for c in route.get('comments', []):
is_valid_comment(c)
# test if geojson is valid
g = route.get('geojson')
g is None or shape(g) #TODO check length
def is_valid_company(company, depth_check=1):
get_not_null(company, "name")
get_not_null(company, "id")
def is_valid_physical_mode(physical_mode, depth_check=1):
get_not_null(physical_mode, "name")
get_not_null(physical_mode, "id")
def is_valid_line(line, depth_check=1):
get_not_null(line, "name")
get_not_null(line, "id")
for c in line.get('comments', []):
is_valid_comment(c)
if depth_check > 0:
is_valid_network(get_not_null(line, 'network'), depth_check - 1)
routes = get_not_null(line, 'routes')
for r in routes:
is_valid_route(r, depth_check - 1)
else:
assert 'network' not in line
assert 'routes' not in line
# test if geojson is valid
g = line.get('geojson')
g is None or shape(g) #TODO check length
def is_valid_line_group(line_group, depth_check=1):
get_not_null(line_group, "name")
get_not_null(line_group, "id")
if depth_check > 0:
# the main_line is always displayed with a depth of 0 to reduce duplicated informations
is_valid_line(get_not_null(line_group, "main_line"), 0)
for l in line_group.get('lines', []):
is_valid_line(l, depth_check - 1)
def is_valid_codes(codes):
for code in codes:
get_not_null(code, "type")
get_not_null(code, "value")
def is_valid_places(places, depth_check=1):
for p in places:
is_valid_place(p, depth_check)
def is_valid_place(place, depth_check=1):
if depth_check < 0:
return
n = get_not_null(place, "name")
get_not_null(place, "id")
type = get_not_null(place, "embedded_type")
if type == "address":
address = get_not_null(place, "address")
is_valid_address(address, depth_check)
elif type == "stop_area":
stop_area = get_not_null(place, "stop_area")
is_valid_stop_area(stop_area, depth_check)
#for stops name should be the label
is_valid_label(n)
assert stop_area['label'] == n
elif type == "stop_point":
stop_point = get_not_null(place, "stop_point")
is_valid_stop_point(stop_point, depth_check)
is_valid_label(n)
assert stop_point['label'] == n
elif type == "poi":
poi = get_not_null(place, "poi")
# TODO
#is_valid_poi(poi, depth_check)
else:
assert(False, "invalid type")
def is_valid_address(address, depth_check=1):
id = get_not_null(address, "id")
lon, lat = id.split(';')
is_valid_lon(lon)
is_valid_lat(lat)
get_not_null(address, "house_number")
get_not_null(address, "name")
get_not_null(address, "administrative_regions") # TODO test
coord = get_not_null(address, "coord")
is_valid_coord(coord)
def is_valid_validity_pattern(validity_pattern, depth_check=1):
beginning_date = get_not_null(validity_pattern, "beginning_date")
assert is_valid_date(beginning_date)
days = get_not_null(validity_pattern, "days")
assert is_valid_days(days)
def is_valid_network(network, depth_check=1):
get_not_null(network, "id")
get_not_null(network, "name")
def is_valid_vehicle_journey(vj, depth_check=1):
if depth_check < 0:
return
get_not_null(vj, "id")
get_not_null(vj, "name")
for c in vj.get('comments', []):
is_valid_comment(c)
if depth_check > 0:
is_valid_journey_pattern(get_not_null(vj, 'journey_pattern'), depth_check=depth_check-1)
is_valid_validity_pattern(get_not_null(vj, 'validity_pattern'), depth_check=depth_check-1)
stoptimes = get_not_null(vj, 'stop_times')
for st in stoptimes:
get_valid_time(get_not_null(st, 'arrival_time'))
get_valid_time(get_not_null(st, 'departure_time'))
if depth_check > 1:
#with depth > 1 (we are already in the stoptime nested object), we don't want jpp
is_valid_journey_pattern_point(get_not_null(st, 'journey_pattern_point'), depth_check - 2)
else:
assert 'journey_pattern_point' not in st
else:
#with depth = 0, we don't want the stop times, the jp, vp, ...
assert 'stop_times' not in vj
assert 'journey_pattern' not in vj
assert 'validity_pattern' not in vj
def is_valid_journey_pattern(jp, depth_check=1):
if depth_check < 0:
return
get_not_null(jp, "id")
get_not_null(jp, "name")
def is_valid_journey_pattern_point(jpp, depth_check=1):
get_not_null(jpp, "id")
if depth_check > 0:
is_valid_stop_point(get_not_null(jpp, 'stop_point'), depth_check=depth_check - 1)
else:
assert 'stop_point' not in jpp
def is_valid_comment(comment):
get_not_null(comment, 'type')
get_not_null(comment, 'value')
def is_valid_region_status(status):
get_not_null(status, 'status')
get_valid_int(get_not_null(status, 'data_version'))
get_valid_int(get_not_null(status, 'nb_threads'))
is_valid_bool(get_not_null(status, 'last_load_status'))
is_valid_bool(get_not_null(status, 'is_connected_to_rabbitmq'))
is_valid_date(get_not_null(status, 'end_production_date'))
is_valid_date(get_not_null(status, 'start_production_date'))
get_valid_datetime(get_not_null(status, 'last_load_at'), possible_errors=True)
get_valid_datetime(get_not_null(status, 'publication_date'), possible_errors=True)
# for () are mandatory for the label even if is reality it is not
# (for example if the admin has no post code or a stop no admin)
# This make the test data a bit more difficult to create, but that way we can check the label creation
label_regexp = re.compile(".* \(.*\)")
def is_valid_label(label):
m = label_regexp.match(label)
return m is not None
def get_disruptions(obj, response):
"""
unref disruption links are return the list of disruptions
"""
all_disruptions = {d['id']: d for d in response['disruptions']}
if 'links' not in obj:
return None
return [all_disruptions[d['id']] for d in obj['links'] if d['type'] == 'disruption']
def is_valid_disruption(disruption):
get_not_null(disruption, 'id')
get_not_null(disruption, 'disruption_id')
s = get_not_null(disruption, 'severity')
get_not_null(s, 'name')
get_not_null(s, 'color')
get_not_null(s, 'effect')
msg = get_not_null(disruption, 'messages')
assert len(msg) > 0
for m in msg:
get_not_null(m, "text")
channel = get_not_null(m, 'channel')
get_not_null(channel, "content_type")
get_not_null(channel, "id")
get_not_null(channel, "name")
s_coord = "0.0000898312;0.0000898312" # coordinate of S in the dataset
r_coord = "0.00188646;0.00071865" # coordinate of R in the dataset
#default journey query used in various test
journey_basic_query = "journeys?from={from_coord}&to={to_coord}&datetime={datetime}"\
.format(from_coord=s_coord, to_coord=r_coord, datetime="20120614T080000")
def get_all_disruptions(elem, response):
"""
return a map with the disruption id as key and the list of disruption + impacted object as value for a item of the response
"""
disruption_by_obj = defaultdict(list)
all_disruptions = {d['id']: d for d in response['disruptions']}
def disruptions_filler(_, obj):
try:
if 'links' not in obj:
return
except TypeError:
return
real_disruptions = [all_disruptions[d['id']] for d in obj['links'] if d['type'] == 'disruption']
for d in real_disruptions:
disruption_by_obj[d['id']].append((d, obj))
#we import utils here else it will import jormungandr too early in the test
from jormungandr import utils
utils.walk_dict(elem, disruptions_filler)
return disruption_by_obj
| VincentCATILLON/navitia | source/jormungandr/tests/check_utils.py | Python | agpl-3.0 | 25,565 |
"""Multi-layer Perceptron
"""
# Authors: Issam H. Laradji <issam.laradji@gmail.com>
# Andreas Mueller
# Jiyuan Qian
# License: BSD 3 clause
import numpy as np
from abc import ABCMeta, abstractmethod
import warnings
import scipy.optimize
from ..base import BaseEstimator, ClassifierMixin, RegressorMixin
from ..base import is_classifier
from ._base import ACTIVATIONS, DERIVATIVES, LOSS_FUNCTIONS
from ._stochastic_optimizers import SGDOptimizer, AdamOptimizer
from ..model_selection import train_test_split
from ..preprocessing import LabelBinarizer
from ..utils import gen_batches, check_random_state
from ..utils import shuffle
from ..utils import _safe_indexing
from ..utils import check_array, column_or_1d
from ..exceptions import ConvergenceWarning
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_is_fitted, _deprecate_positional_args
from ..utils.multiclass import _check_partial_fit_first_call, unique_labels
from ..utils.multiclass import type_of_target
from ..utils.optimize import _check_optimize_result
_STOCHASTIC_SOLVERS = ['sgd', 'adam']
def _pack(coefs_, intercepts_):
"""Pack the parameters into a single vector."""
return np.hstack([l.ravel() for l in coefs_ + intercepts_])
class BaseMultilayerPerceptron(BaseEstimator, metaclass=ABCMeta):
"""Base class for MLP classification and regression.
Warning: This class should not be used directly.
Use derived classes instead.
.. versionadded:: 0.18
"""
@abstractmethod
def __init__(self, hidden_layer_sizes, activation, solver,
alpha, batch_size, learning_rate, learning_rate_init, power_t,
max_iter, loss, shuffle, random_state, tol, verbose,
warm_start, momentum, nesterovs_momentum, early_stopping,
validation_fraction, beta_1, beta_2, epsilon,
n_iter_no_change, max_fun):
self.activation = activation
self.solver = solver
self.alpha = alpha
self.batch_size = batch_size
self.learning_rate = learning_rate
self.learning_rate_init = learning_rate_init
self.power_t = power_t
self.max_iter = max_iter
self.loss = loss
self.hidden_layer_sizes = hidden_layer_sizes
self.shuffle = shuffle
self.random_state = random_state
self.tol = tol
self.verbose = verbose
self.warm_start = warm_start
self.momentum = momentum
self.nesterovs_momentum = nesterovs_momentum
self.early_stopping = early_stopping
self.validation_fraction = validation_fraction
self.beta_1 = beta_1
self.beta_2 = beta_2
self.epsilon = epsilon
self.n_iter_no_change = n_iter_no_change
self.max_fun = max_fun
def _unpack(self, packed_parameters):
"""Extract the coefficients and intercepts from packed_parameters."""
for i in range(self.n_layers_ - 1):
start, end, shape = self._coef_indptr[i]
self.coefs_[i] = np.reshape(packed_parameters[start:end], shape)
start, end = self._intercept_indptr[i]
self.intercepts_[i] = packed_parameters[start:end]
def _forward_pass(self, activations):
"""Perform a forward pass on the network by computing the values
of the neurons in the hidden layers and the output layer.
Parameters
----------
activations : list, length = n_layers - 1
The ith element of the list holds the values of the ith layer.
"""
hidden_activation = ACTIVATIONS[self.activation]
# Iterate over the hidden layers
for i in range(self.n_layers_ - 1):
activations[i + 1] = safe_sparse_dot(activations[i],
self.coefs_[i])
activations[i + 1] += self.intercepts_[i]
# For the hidden layers
if (i + 1) != (self.n_layers_ - 1):
hidden_activation(activations[i + 1])
# For the last layer
output_activation = ACTIVATIONS[self.out_activation_]
output_activation(activations[i + 1])
return activations
def _forward_pass_fast(self, X):
"""Predict using the trained model
This is the same as _forward_pass but does not record the activations
of all layers and only returns the last layer's activation.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data.
Returns
-------
y_pred : ndarray of shape (n_samples,) or (n_samples, n_outputs)
The decision function of the samples for each class in the model.
"""
X = check_array(X, accept_sparse=['csr', 'csc'])
# Initialize first layer
activation = X
# Forward propagate
hidden_activation = ACTIVATIONS[self.activation]
for i in range(self.n_layers_ - 1):
activation = safe_sparse_dot(activation, self.coefs_[i])
activation += self.intercepts_[i]
if i != self.n_layers_ - 2:
hidden_activation(activation)
output_activation = ACTIVATIONS[self.out_activation_]
output_activation(activation)
return activation
def _compute_loss_grad(self, layer, n_samples, activations, deltas,
coef_grads, intercept_grads):
"""Compute the gradient of loss with respect to coefs and intercept for
specified layer.
This function does backpropagation for the specified one layer.
"""
coef_grads[layer] = safe_sparse_dot(activations[layer].T,
deltas[layer])
coef_grads[layer] += (self.alpha * self.coefs_[layer])
coef_grads[layer] /= n_samples
intercept_grads[layer] = np.mean(deltas[layer], 0)
def _loss_grad_lbfgs(self, packed_coef_inter, X, y, activations, deltas,
coef_grads, intercept_grads):
"""Compute the MLP loss function and its corresponding derivatives
with respect to the different parameters given in the initialization.
Returned gradients are packed in a single vector so it can be used
in lbfgs
Parameters
----------
packed_coef_inter : ndarray
A vector comprising the flattened coefficients and intercepts.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data.
y : ndarray of shape (n_samples,)
The target values.
activations : list, length = n_layers - 1
The ith element of the list holds the values of the ith layer.
deltas : list, length = n_layers - 1
The ith element of the list holds the difference between the
activations of the i + 1 layer and the backpropagated error.
More specifically, deltas are gradients of loss with respect to z
in each layer, where z = wx + b is the value of a particular layer
before passing through the activation function
coef_grads : list, length = n_layers - 1
The ith element contains the amount of change used to update the
coefficient parameters of the ith layer in an iteration.
intercept_grads : list, length = n_layers - 1
The ith element contains the amount of change used to update the
intercept parameters of the ith layer in an iteration.
Returns
-------
loss : float
grad : array-like, shape (number of nodes of all layers,)
"""
self._unpack(packed_coef_inter)
loss, coef_grads, intercept_grads = self._backprop(
X, y, activations, deltas, coef_grads, intercept_grads)
grad = _pack(coef_grads, intercept_grads)
return loss, grad
def _backprop(self, X, y, activations, deltas, coef_grads,
intercept_grads):
"""Compute the MLP loss function and its corresponding derivatives
with respect to each parameter: weights and bias vectors.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data.
y : ndarray of shape (n_samples,)
The target values.
activations : list, length = n_layers - 1
The ith element of the list holds the values of the ith layer.
deltas : list, length = n_layers - 1
The ith element of the list holds the difference between the
activations of the i + 1 layer and the backpropagated error.
More specifically, deltas are gradients of loss with respect to z
in each layer, where z = wx + b is the value of a particular layer
before passing through the activation function
coef_grads : list, length = n_layers - 1
The ith element contains the amount of change used to update the
coefficient parameters of the ith layer in an iteration.
intercept_grads : list, length = n_layers - 1
The ith element contains the amount of change used to update the
intercept parameters of the ith layer in an iteration.
Returns
-------
loss : float
coef_grads : list, length = n_layers - 1
intercept_grads : list, length = n_layers - 1
"""
n_samples = X.shape[0]
# Forward propagate
activations = self._forward_pass(activations)
# Get loss
loss_func_name = self.loss
if loss_func_name == 'log_loss' and self.out_activation_ == 'logistic':
loss_func_name = 'binary_log_loss'
loss = LOSS_FUNCTIONS[loss_func_name](y, activations[-1])
# Add L2 regularization term to loss
values = 0
for s in self.coefs_:
s = s.ravel()
values += np.dot(s, s)
loss += (0.5 * self.alpha) * values / n_samples
# Backward propagate
last = self.n_layers_ - 2
# The calculation of delta[last] here works with following
# combinations of output activation and loss function:
# sigmoid and binary cross entropy, softmax and categorical cross
# entropy, and identity with squared loss
deltas[last] = activations[-1] - y
# Compute gradient for the last layer
self._compute_loss_grad(
last, n_samples, activations, deltas, coef_grads, intercept_grads)
inplace_derivative = DERIVATIVES[self.activation]
# Iterate over the hidden layers
for i in range(self.n_layers_ - 2, 0, -1):
deltas[i - 1] = safe_sparse_dot(deltas[i], self.coefs_[i].T)
inplace_derivative(activations[i], deltas[i - 1])
self._compute_loss_grad(
i - 1, n_samples, activations, deltas, coef_grads,
intercept_grads)
return loss, coef_grads, intercept_grads
def _initialize(self, y, layer_units, dtype):
# set all attributes, allocate weights etc for first call
# Initialize parameters
self.n_iter_ = 0
self.t_ = 0
self.n_outputs_ = y.shape[1]
# Compute the number of layers
self.n_layers_ = len(layer_units)
# Output for regression
if not is_classifier(self):
self.out_activation_ = 'identity'
# Output for multi class
elif self._label_binarizer.y_type_ == 'multiclass':
self.out_activation_ = 'softmax'
# Output for binary class and multi-label
else:
self.out_activation_ = 'logistic'
# Initialize coefficient and intercept layers
self.coefs_ = []
self.intercepts_ = []
for i in range(self.n_layers_ - 1):
coef_init, intercept_init = self._init_coef(layer_units[i],
layer_units[i + 1],
dtype)
self.coefs_.append(coef_init)
self.intercepts_.append(intercept_init)
if self.solver in _STOCHASTIC_SOLVERS:
self.loss_curve_ = []
self._no_improvement_count = 0
if self.early_stopping:
self.validation_scores_ = []
self.best_validation_score_ = -np.inf
else:
self.best_loss_ = np.inf
def _init_coef(self, fan_in, fan_out, dtype):
# Use the initialization method recommended by
# Glorot et al.
factor = 6.
if self.activation == 'logistic':
factor = 2.
init_bound = np.sqrt(factor / (fan_in + fan_out))
# Generate weights and bias:
coef_init = self._random_state.uniform(-init_bound, init_bound,
(fan_in, fan_out))
intercept_init = self._random_state.uniform(-init_bound, init_bound,
fan_out)
coef_init = coef_init.astype(dtype, copy=False)
intercept_init = intercept_init.astype(dtype, copy=False)
return coef_init, intercept_init
def _fit(self, X, y, incremental=False):
# Make sure self.hidden_layer_sizes is a list
hidden_layer_sizes = self.hidden_layer_sizes
if not hasattr(hidden_layer_sizes, "__iter__"):
hidden_layer_sizes = [hidden_layer_sizes]
hidden_layer_sizes = list(hidden_layer_sizes)
# Validate input parameters.
self._validate_hyperparameters()
if np.any(np.array(hidden_layer_sizes) <= 0):
raise ValueError("hidden_layer_sizes must be > 0, got %s." %
hidden_layer_sizes)
X, y = self._validate_input(X, y, incremental)
n_samples, n_features = X.shape
# Ensure y is 2D
if y.ndim == 1:
y = y.reshape((-1, 1))
self.n_outputs_ = y.shape[1]
layer_units = ([n_features] + hidden_layer_sizes +
[self.n_outputs_])
# check random state
self._random_state = check_random_state(self.random_state)
if not hasattr(self, 'coefs_') or (not self.warm_start and not
incremental):
# First time training the model
self._initialize(y, layer_units, X.dtype)
# Initialize lists
activations = [X] + [None] * (len(layer_units) - 1)
deltas = [None] * (len(activations) - 1)
coef_grads = [np.empty((n_fan_in_, n_fan_out_), dtype=X.dtype)
for n_fan_in_,
n_fan_out_ in zip(layer_units[:-1],
layer_units[1:])]
intercept_grads = [np.empty(n_fan_out_, dtype=X.dtype)
for n_fan_out_ in
layer_units[1:]]
# Run the Stochastic optimization solver
if self.solver in _STOCHASTIC_SOLVERS:
self._fit_stochastic(X, y, activations, deltas, coef_grads,
intercept_grads, layer_units, incremental)
# Run the LBFGS solver
elif self.solver == 'lbfgs':
self._fit_lbfgs(X, y, activations, deltas, coef_grads,
intercept_grads, layer_units)
return self
def _validate_hyperparameters(self):
if not isinstance(self.shuffle, bool):
raise ValueError("shuffle must be either True or False, got %s." %
self.shuffle)
if self.max_iter <= 0:
raise ValueError("max_iter must be > 0, got %s." % self.max_iter)
if self.max_fun <= 0:
raise ValueError("max_fun must be > 0, got %s." % self.max_fun)
if self.alpha < 0.0:
raise ValueError("alpha must be >= 0, got %s." % self.alpha)
if (self.learning_rate in ["constant", "invscaling", "adaptive"] and
self.learning_rate_init <= 0.0):
raise ValueError("learning_rate_init must be > 0, got %s." %
self.learning_rate)
if self.momentum > 1 or self.momentum < 0:
raise ValueError("momentum must be >= 0 and <= 1, got %s" %
self.momentum)
if not isinstance(self.nesterovs_momentum, bool):
raise ValueError("nesterovs_momentum must be either True or False,"
" got %s." % self.nesterovs_momentum)
if not isinstance(self.early_stopping, bool):
raise ValueError("early_stopping must be either True or False,"
" got %s." % self.early_stopping)
if self.validation_fraction < 0 or self.validation_fraction >= 1:
raise ValueError("validation_fraction must be >= 0 and < 1, "
"got %s" % self.validation_fraction)
if self.beta_1 < 0 or self.beta_1 >= 1:
raise ValueError("beta_1 must be >= 0 and < 1, got %s" %
self.beta_1)
if self.beta_2 < 0 or self.beta_2 >= 1:
raise ValueError("beta_2 must be >= 0 and < 1, got %s" %
self.beta_2)
if self.epsilon <= 0.0:
raise ValueError("epsilon must be > 0, got %s." % self.epsilon)
if self.n_iter_no_change <= 0:
raise ValueError("n_iter_no_change must be > 0, got %s."
% self.n_iter_no_change)
# raise ValueError if not registered
if self.activation not in ACTIVATIONS:
raise ValueError("The activation '%s' is not supported. Supported "
"activations are %s."
% (self.activation, list(sorted(ACTIVATIONS))))
if self.learning_rate not in ["constant", "invscaling", "adaptive"]:
raise ValueError("learning rate %s is not supported. " %
self.learning_rate)
supported_solvers = _STOCHASTIC_SOLVERS + ["lbfgs"]
if self.solver not in supported_solvers:
raise ValueError("The solver %s is not supported. "
" Expected one of: %s" %
(self.solver, ", ".join(supported_solvers)))
def _fit_lbfgs(self, X, y, activations, deltas, coef_grads,
intercept_grads, layer_units):
# Store meta information for the parameters
self._coef_indptr = []
self._intercept_indptr = []
start = 0
# Save sizes and indices of coefficients for faster unpacking
for i in range(self.n_layers_ - 1):
n_fan_in, n_fan_out = layer_units[i], layer_units[i + 1]
end = start + (n_fan_in * n_fan_out)
self._coef_indptr.append((start, end, (n_fan_in, n_fan_out)))
start = end
# Save sizes and indices of intercepts for faster unpacking
for i in range(self.n_layers_ - 1):
end = start + layer_units[i + 1]
self._intercept_indptr.append((start, end))
start = end
# Run LBFGS
packed_coef_inter = _pack(self.coefs_,
self.intercepts_)
if self.verbose is True or self.verbose >= 1:
iprint = 1
else:
iprint = -1
opt_res = scipy.optimize.minimize(
self._loss_grad_lbfgs, packed_coef_inter,
method="L-BFGS-B", jac=True,
options={
"maxfun": self.max_fun,
"maxiter": self.max_iter,
"iprint": iprint,
"gtol": self.tol
},
args=(X, y, activations, deltas, coef_grads, intercept_grads))
self.n_iter_ = _check_optimize_result("lbfgs", opt_res, self.max_iter)
self.loss_ = opt_res.fun
self._unpack(opt_res.x)
def _fit_stochastic(self, X, y, activations, deltas, coef_grads,
intercept_grads, layer_units, incremental):
if not incremental or not hasattr(self, '_optimizer'):
params = self.coefs_ + self.intercepts_
if self.solver == 'sgd':
self._optimizer = SGDOptimizer(
params, self.learning_rate_init, self.learning_rate,
self.momentum, self.nesterovs_momentum, self.power_t)
elif self.solver == 'adam':
self._optimizer = AdamOptimizer(
params, self.learning_rate_init, self.beta_1, self.beta_2,
self.epsilon)
# early_stopping in partial_fit doesn't make sense
early_stopping = self.early_stopping and not incremental
if early_stopping:
# don't stratify in multilabel classification
should_stratify = is_classifier(self) and self.n_outputs_ == 1
stratify = y if should_stratify else None
X, X_val, y, y_val = train_test_split(
X, y, random_state=self._random_state,
test_size=self.validation_fraction,
stratify=stratify)
if is_classifier(self):
y_val = self._label_binarizer.inverse_transform(y_val)
else:
X_val = None
y_val = None
n_samples = X.shape[0]
sample_idx = np.arange(n_samples, dtype=int)
if self.batch_size == 'auto':
batch_size = min(200, n_samples)
else:
if self.batch_size < 1 or self.batch_size > n_samples:
warnings.warn("Got `batch_size` less than 1 or larger than "
"sample size. It is going to be clipped")
batch_size = np.clip(self.batch_size, 1, n_samples)
try:
for it in range(self.max_iter):
if self.shuffle:
# Only shuffle the sample indices instead of X and y to
# reduce the memory footprint. These indices will be used
# to slice the X and y.
sample_idx = shuffle(sample_idx,
random_state=self._random_state)
accumulated_loss = 0.0
for batch_slice in gen_batches(n_samples, batch_size):
if self.shuffle:
X_batch = _safe_indexing(X, sample_idx[batch_slice])
y_batch = y[sample_idx[batch_slice]]
else:
X_batch = X[batch_slice]
y_batch = y[batch_slice]
activations[0] = X_batch
batch_loss, coef_grads, intercept_grads = self._backprop(
X_batch, y_batch, activations, deltas,
coef_grads, intercept_grads)
accumulated_loss += batch_loss * (batch_slice.stop -
batch_slice.start)
# update weights
grads = coef_grads + intercept_grads
self._optimizer.update_params(grads)
self.n_iter_ += 1
self.loss_ = accumulated_loss / X.shape[0]
self.t_ += n_samples
self.loss_curve_.append(self.loss_)
if self.verbose:
print("Iteration %d, loss = %.8f" % (self.n_iter_,
self.loss_))
# update no_improvement_count based on training loss or
# validation score according to early_stopping
self._update_no_improvement_count(early_stopping, X_val, y_val)
# for learning rate that needs to be updated at iteration end
self._optimizer.iteration_ends(self.t_)
if self._no_improvement_count > self.n_iter_no_change:
# not better than last `n_iter_no_change` iterations by tol
# stop or decrease learning rate
if early_stopping:
msg = ("Validation score did not improve more than "
"tol=%f for %d consecutive epochs." % (
self.tol, self.n_iter_no_change))
else:
msg = ("Training loss did not improve more than tol=%f"
" for %d consecutive epochs." % (
self.tol, self.n_iter_no_change))
is_stopping = self._optimizer.trigger_stopping(
msg, self.verbose)
if is_stopping:
break
else:
self._no_improvement_count = 0
if incremental:
break
if self.n_iter_ == self.max_iter:
warnings.warn(
"Stochastic Optimizer: Maximum iterations (%d) "
"reached and the optimization hasn't converged yet."
% self.max_iter, ConvergenceWarning)
except KeyboardInterrupt:
warnings.warn("Training interrupted by user.")
if early_stopping:
# restore best weights
self.coefs_ = self._best_coefs
self.intercepts_ = self._best_intercepts
def _update_no_improvement_count(self, early_stopping, X_val, y_val):
if early_stopping:
# compute validation score, use that for stopping
self.validation_scores_.append(self.score(X_val, y_val))
if self.verbose:
print("Validation score: %f" % self.validation_scores_[-1])
# update best parameters
# use validation_scores_, not loss_curve_
# let's hope no-one overloads .score with mse
last_valid_score = self.validation_scores_[-1]
if last_valid_score < (self.best_validation_score_ +
self.tol):
self._no_improvement_count += 1
else:
self._no_improvement_count = 0
if last_valid_score > self.best_validation_score_:
self.best_validation_score_ = last_valid_score
self._best_coefs = [c.copy() for c in self.coefs_]
self._best_intercepts = [i.copy()
for i in self.intercepts_]
else:
if self.loss_curve_[-1] > self.best_loss_ - self.tol:
self._no_improvement_count += 1
else:
self._no_improvement_count = 0
if self.loss_curve_[-1] < self.best_loss_:
self.best_loss_ = self.loss_curve_[-1]
def fit(self, X, y):
"""Fit the model to data matrix X and target(s) y.
Parameters
----------
X : ndarray or sparse matrix of shape (n_samples, n_features)
The input data.
y : ndarray of shape (n_samples,) or (n_samples, n_outputs)
The target values (class labels in classification, real numbers in
regression).
Returns
-------
self : returns a trained MLP model.
"""
return self._fit(X, y, incremental=False)
@property
def partial_fit(self):
"""Update the model with a single iteration over the given data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data.
y : ndarray of shape (n_samples,)
The target values.
Returns
-------
self : returns a trained MLP model.
"""
if self.solver not in _STOCHASTIC_SOLVERS:
raise AttributeError("partial_fit is only available for stochastic"
" optimizers. %s is not stochastic."
% self.solver)
return self._partial_fit
def _partial_fit(self, X, y):
return self._fit(X, y, incremental=True)
class MLPClassifier(ClassifierMixin, BaseMultilayerPerceptron):
"""Multi-layer Perceptron classifier.
This model optimizes the log-loss function using LBFGS or stochastic
gradient descent.
.. versionadded:: 0.18
Parameters
----------
hidden_layer_sizes : tuple, length = n_layers - 2, default=(100,)
The ith element represents the number of neurons in the ith
hidden layer.
activation : {'identity', 'logistic', 'tanh', 'relu'}, default='relu'
Activation function for the hidden layer.
- 'identity', no-op activation, useful to implement linear bottleneck,
returns f(x) = x
- 'logistic', the logistic sigmoid function,
returns f(x) = 1 / (1 + exp(-x)).
- 'tanh', the hyperbolic tan function,
returns f(x) = tanh(x).
- 'relu', the rectified linear unit function,
returns f(x) = max(0, x)
solver : {'lbfgs', 'sgd', 'adam'}, default='adam'
The solver for weight optimization.
- 'lbfgs' is an optimizer in the family of quasi-Newton methods.
- 'sgd' refers to stochastic gradient descent.
- 'adam' refers to a stochastic gradient-based optimizer proposed
by Kingma, Diederik, and Jimmy Ba
Note: The default solver 'adam' works pretty well on relatively
large datasets (with thousands of training samples or more) in terms of
both training time and validation score.
For small datasets, however, 'lbfgs' can converge faster and perform
better.
alpha : float, default=0.0001
L2 penalty (regularization term) parameter.
batch_size : int, default='auto'
Size of minibatches for stochastic optimizers.
If the solver is 'lbfgs', the classifier will not use minibatch.
When set to "auto", `batch_size=min(200, n_samples)`
learning_rate : {'constant', 'invscaling', 'adaptive'}, default='constant'
Learning rate schedule for weight updates.
- 'constant' is a constant learning rate given by
'learning_rate_init'.
- 'invscaling' gradually decreases the learning rate at each
time step 't' using an inverse scaling exponent of 'power_t'.
effective_learning_rate = learning_rate_init / pow(t, power_t)
- 'adaptive' keeps the learning rate constant to
'learning_rate_init' as long as training loss keeps decreasing.
Each time two consecutive epochs fail to decrease training loss by at
least tol, or fail to increase validation score by at least tol if
'early_stopping' is on, the current learning rate is divided by 5.
Only used when ``solver='sgd'``.
learning_rate_init : double, default=0.001
The initial learning rate used. It controls the step-size
in updating the weights. Only used when solver='sgd' or 'adam'.
power_t : double, default=0.5
The exponent for inverse scaling learning rate.
It is used in updating effective learning rate when the learning_rate
is set to 'invscaling'. Only used when solver='sgd'.
max_iter : int, default=200
Maximum number of iterations. The solver iterates until convergence
(determined by 'tol') or this number of iterations. For stochastic
solvers ('sgd', 'adam'), note that this determines the number of epochs
(how many times each data point will be used), not the number of
gradient steps.
shuffle : bool, default=True
Whether to shuffle samples in each iteration. Only used when
solver='sgd' or 'adam'.
random_state : int, RandomState instance, default=None
Determines random number generation for weights and bias
initialization, train-test split if early stopping is used, and batch
sampling when solver='sgd' or 'adam'.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
tol : float, default=1e-4
Tolerance for the optimization. When the loss or score is not improving
by at least ``tol`` for ``n_iter_no_change`` consecutive iterations,
unless ``learning_rate`` is set to 'adaptive', convergence is
considered to be reached and training stops.
verbose : bool, default=False
Whether to print progress messages to stdout.
warm_start : bool, default=False
When set to True, reuse the solution of the previous
call to fit as initialization, otherwise, just erase the
previous solution. See :term:`the Glossary <warm_start>`.
momentum : float, default=0.9
Momentum for gradient descent update. Should be between 0 and 1. Only
used when solver='sgd'.
nesterovs_momentum : bool, default=True
Whether to use Nesterov's momentum. Only used when solver='sgd' and
momentum > 0.
early_stopping : bool, default=False
Whether to use early stopping to terminate training when validation
score is not improving. If set to true, it will automatically set
aside 10% of training data as validation and terminate training when
validation score is not improving by at least tol for
``n_iter_no_change`` consecutive epochs. The split is stratified,
except in a multilabel setting.
Only effective when solver='sgd' or 'adam'
validation_fraction : float, default=0.1
The proportion of training data to set aside as validation set for
early stopping. Must be between 0 and 1.
Only used if early_stopping is True
beta_1 : float, default=0.9
Exponential decay rate for estimates of first moment vector in adam,
should be in [0, 1). Only used when solver='adam'
beta_2 : float, default=0.999
Exponential decay rate for estimates of second moment vector in adam,
should be in [0, 1). Only used when solver='adam'
epsilon : float, default=1e-8
Value for numerical stability in adam. Only used when solver='adam'
n_iter_no_change : int, default=10
Maximum number of epochs to not meet ``tol`` improvement.
Only effective when solver='sgd' or 'adam'
.. versionadded:: 0.20
max_fun : int, default=15000
Only used when solver='lbfgs'. Maximum number of loss function calls.
The solver iterates until convergence (determined by 'tol'), number
of iterations reaches max_iter, or this number of loss function calls.
Note that number of loss function calls will be greater than or equal
to the number of iterations for the `MLPClassifier`.
.. versionadded:: 0.22
Attributes
----------
classes_ : ndarray or list of ndarray of shape (n_classes,)
Class labels for each output.
loss_ : float
The current loss computed with the loss function.
best_loss_ : float
The minimum loss reached by the solver throughout fitting.
loss_curve_ : list of shape (`n_iter_`,)
The ith element in the list represents the loss at the ith iteration.
t_ : int
The number of training samples seen by the solver during fitting.
coefs_ : list of shape (n_layers - 1,)
The ith element in the list represents the weight matrix corresponding
to layer i.
intercepts_ : list of shape (n_layers - 1,)
The ith element in the list represents the bias vector corresponding to
layer i + 1.
n_iter_ : int
The number of iterations the solver has ran.
n_layers_ : int
Number of layers.
n_outputs_ : int
Number of outputs.
out_activation_ : str
Name of the output activation function.
loss_curve_ : list of shape (n_iters,)
Loss value evaluated at the end of each training step.
t_ : int
Mathematically equals `n_iters * X.shape[0]`, it means
`time_step` and it is used by optimizer's learning rate scheduler.
Examples
--------
>>> from sklearn.neural_network import MLPClassifier
>>> from sklearn.datasets import make_classification
>>> from sklearn.model_selection import train_test_split
>>> X, y = make_classification(n_samples=100, random_state=1)
>>> X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y,
... random_state=1)
>>> clf = MLPClassifier(random_state=1, max_iter=300).fit(X_train, y_train)
>>> clf.predict_proba(X_test[:1])
array([[0.038..., 0.961...]])
>>> clf.predict(X_test[:5, :])
array([1, 0, 1, 0, 1])
>>> clf.score(X_test, y_test)
0.8...
Notes
-----
MLPClassifier trains iteratively since at each time step
the partial derivatives of the loss function with respect to the model
parameters are computed to update the parameters.
It can also have a regularization term added to the loss function
that shrinks model parameters to prevent overfitting.
This implementation works with data represented as dense numpy arrays or
sparse scipy arrays of floating point values.
References
----------
Hinton, Geoffrey E.
"Connectionist learning procedures." Artificial intelligence 40.1
(1989): 185-234.
Glorot, Xavier, and Yoshua Bengio. "Understanding the difficulty of
training deep feedforward neural networks." International Conference
on Artificial Intelligence and Statistics. 2010.
He, Kaiming, et al. "Delving deep into rectifiers: Surpassing human-level
performance on imagenet classification." arXiv preprint
arXiv:1502.01852 (2015).
Kingma, Diederik, and Jimmy Ba. "Adam: A method for stochastic
optimization." arXiv preprint arXiv:1412.6980 (2014).
"""
@_deprecate_positional_args
def __init__(self, hidden_layer_sizes=(100,), activation="relu", *,
solver='adam', alpha=0.0001,
batch_size='auto', learning_rate="constant",
learning_rate_init=0.001, power_t=0.5, max_iter=200,
shuffle=True, random_state=None, tol=1e-4,
verbose=False, warm_start=False, momentum=0.9,
nesterovs_momentum=True, early_stopping=False,
validation_fraction=0.1, beta_1=0.9, beta_2=0.999,
epsilon=1e-8, n_iter_no_change=10, max_fun=15000):
super().__init__(
hidden_layer_sizes=hidden_layer_sizes,
activation=activation, solver=solver, alpha=alpha,
batch_size=batch_size, learning_rate=learning_rate,
learning_rate_init=learning_rate_init, power_t=power_t,
max_iter=max_iter, loss='log_loss', shuffle=shuffle,
random_state=random_state, tol=tol, verbose=verbose,
warm_start=warm_start, momentum=momentum,
nesterovs_momentum=nesterovs_momentum,
early_stopping=early_stopping,
validation_fraction=validation_fraction,
beta_1=beta_1, beta_2=beta_2, epsilon=epsilon,
n_iter_no_change=n_iter_no_change, max_fun=max_fun)
def _validate_input(self, X, y, incremental):
X, y = self._validate_data(X, y, accept_sparse=['csr', 'csc'],
multi_output=True,
dtype=(np.float64, np.float32))
if y.ndim == 2 and y.shape[1] == 1:
y = column_or_1d(y, warn=True)
if not incremental:
self._label_binarizer = LabelBinarizer()
self._label_binarizer.fit(y)
self.classes_ = self._label_binarizer.classes_
elif self.warm_start:
classes = unique_labels(y)
if set(classes) != set(self.classes_):
raise ValueError("warm_start can only be used where `y` has "
"the same classes as in the previous "
"call to fit. Previously got %s, `y` has %s" %
(self.classes_, classes))
else:
classes = unique_labels(y)
if len(np.setdiff1d(classes, self.classes_, assume_unique=True)):
raise ValueError("`y` has classes not in `self.classes_`."
" `self.classes_` has %s. 'y' has %s." %
(self.classes_, classes))
# This downcast to bool is to prevent upcasting when working with
# float32 data
y = self._label_binarizer.transform(y).astype(bool)
return X, y
def predict(self, X):
"""Predict using the multi-layer perceptron classifier
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data.
Returns
-------
y : ndarray, shape (n_samples,) or (n_samples, n_classes)
The predicted classes.
"""
check_is_fitted(self)
y_pred = self._forward_pass_fast(X)
if self.n_outputs_ == 1:
y_pred = y_pred.ravel()
return self._label_binarizer.inverse_transform(y_pred)
def fit(self, X, y):
"""Fit the model to data matrix X and target(s) y.
Parameters
----------
X : ndarray or sparse matrix of shape (n_samples, n_features)
The input data.
y : ndarray, shape (n_samples,) or (n_samples, n_outputs)
The target values (class labels in classification, real numbers in
regression).
Returns
-------
self : returns a trained MLP model.
"""
return self._fit(X, y, incremental=(self.warm_start and
hasattr(self, "classes_")))
@property
def partial_fit(self):
"""Update the model with a single iteration over the given data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data.
y : array-like, shape (n_samples,)
The target values.
classes : array, shape (n_classes), default None
Classes across all calls to partial_fit.
Can be obtained via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
Returns
-------
self : returns a trained MLP model.
"""
if self.solver not in _STOCHASTIC_SOLVERS:
raise AttributeError("partial_fit is only available for stochastic"
" optimizer. %s is not stochastic"
% self.solver)
return self._partial_fit
def _partial_fit(self, X, y, classes=None):
if _check_partial_fit_first_call(self, classes):
self._label_binarizer = LabelBinarizer()
if type_of_target(y).startswith('multilabel'):
self._label_binarizer.fit(y)
else:
self._label_binarizer.fit(classes)
super()._partial_fit(X, y)
return self
def predict_log_proba(self, X):
"""Return the log of probability estimates.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
The input data.
Returns
-------
log_y_prob : ndarray of shape (n_samples, n_classes)
The predicted log-probability of the sample for each class
in the model, where classes are ordered as they are in
`self.classes_`. Equivalent to log(predict_proba(X))
"""
y_prob = self.predict_proba(X)
return np.log(y_prob, out=y_prob)
def predict_proba(self, X):
"""Probability estimates.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data.
Returns
-------
y_prob : ndarray of shape (n_samples, n_classes)
The predicted probability of the sample for each class in the
model, where classes are ordered as they are in `self.classes_`.
"""
check_is_fitted(self)
y_pred = self._forward_pass_fast(X)
if self.n_outputs_ == 1:
y_pred = y_pred.ravel()
if y_pred.ndim == 1:
return np.vstack([1 - y_pred, y_pred]).T
else:
return y_pred
class MLPRegressor(RegressorMixin, BaseMultilayerPerceptron):
"""Multi-layer Perceptron regressor.
This model optimizes the squared-loss using LBFGS or stochastic gradient
descent.
.. versionadded:: 0.18
Parameters
----------
hidden_layer_sizes : tuple, length = n_layers - 2, default=(100,)
The ith element represents the number of neurons in the ith
hidden layer.
activation : {'identity', 'logistic', 'tanh', 'relu'}, default='relu'
Activation function for the hidden layer.
- 'identity', no-op activation, useful to implement linear bottleneck,
returns f(x) = x
- 'logistic', the logistic sigmoid function,
returns f(x) = 1 / (1 + exp(-x)).
- 'tanh', the hyperbolic tan function,
returns f(x) = tanh(x).
- 'relu', the rectified linear unit function,
returns f(x) = max(0, x)
solver : {'lbfgs', 'sgd', 'adam'}, default='adam'
The solver for weight optimization.
- 'lbfgs' is an optimizer in the family of quasi-Newton methods.
- 'sgd' refers to stochastic gradient descent.
- 'adam' refers to a stochastic gradient-based optimizer proposed by
Kingma, Diederik, and Jimmy Ba
Note: The default solver 'adam' works pretty well on relatively
large datasets (with thousands of training samples or more) in terms of
both training time and validation score.
For small datasets, however, 'lbfgs' can converge faster and perform
better.
alpha : float, default=0.0001
L2 penalty (regularization term) parameter.
batch_size : int, default='auto'
Size of minibatches for stochastic optimizers.
If the solver is 'lbfgs', the classifier will not use minibatch.
When set to "auto", `batch_size=min(200, n_samples)`
learning_rate : {'constant', 'invscaling', 'adaptive'}, default='constant'
Learning rate schedule for weight updates.
- 'constant' is a constant learning rate given by
'learning_rate_init'.
- 'invscaling' gradually decreases the learning rate ``learning_rate_``
at each time step 't' using an inverse scaling exponent of 'power_t'.
effective_learning_rate = learning_rate_init / pow(t, power_t)
- 'adaptive' keeps the learning rate constant to
'learning_rate_init' as long as training loss keeps decreasing.
Each time two consecutive epochs fail to decrease training loss by at
least tol, or fail to increase validation score by at least tol if
'early_stopping' is on, the current learning rate is divided by 5.
Only used when solver='sgd'.
learning_rate_init : double, default=0.001
The initial learning rate used. It controls the step-size
in updating the weights. Only used when solver='sgd' or 'adam'.
power_t : double, default=0.5
The exponent for inverse scaling learning rate.
It is used in updating effective learning rate when the learning_rate
is set to 'invscaling'. Only used when solver='sgd'.
max_iter : int, default=200
Maximum number of iterations. The solver iterates until convergence
(determined by 'tol') or this number of iterations. For stochastic
solvers ('sgd', 'adam'), note that this determines the number of epochs
(how many times each data point will be used), not the number of
gradient steps.
shuffle : bool, default=True
Whether to shuffle samples in each iteration. Only used when
solver='sgd' or 'adam'.
random_state : int, RandomState instance, default=None
Determines random number generation for weights and bias
initialization, train-test split if early stopping is used, and batch
sampling when solver='sgd' or 'adam'.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
tol : float, default=1e-4
Tolerance for the optimization. When the loss or score is not improving
by at least ``tol`` for ``n_iter_no_change`` consecutive iterations,
unless ``learning_rate`` is set to 'adaptive', convergence is
considered to be reached and training stops.
verbose : bool, default=False
Whether to print progress messages to stdout.
warm_start : bool, default=False
When set to True, reuse the solution of the previous
call to fit as initialization, otherwise, just erase the
previous solution. See :term:`the Glossary <warm_start>`.
momentum : float, default=0.9
Momentum for gradient descent update. Should be between 0 and 1. Only
used when solver='sgd'.
nesterovs_momentum : bool, default=True
Whether to use Nesterov's momentum. Only used when solver='sgd' and
momentum > 0.
early_stopping : bool, default=False
Whether to use early stopping to terminate training when validation
score is not improving. If set to true, it will automatically set
aside 10% of training data as validation and terminate training when
validation score is not improving by at least ``tol`` for
``n_iter_no_change`` consecutive epochs.
Only effective when solver='sgd' or 'adam'
validation_fraction : float, default=0.1
The proportion of training data to set aside as validation set for
early stopping. Must be between 0 and 1.
Only used if early_stopping is True
beta_1 : float, default=0.9
Exponential decay rate for estimates of first moment vector in adam,
should be in [0, 1). Only used when solver='adam'
beta_2 : float, default=0.999
Exponential decay rate for estimates of second moment vector in adam,
should be in [0, 1). Only used when solver='adam'
epsilon : float, default=1e-8
Value for numerical stability in adam. Only used when solver='adam'
n_iter_no_change : int, default=10
Maximum number of epochs to not meet ``tol`` improvement.
Only effective when solver='sgd' or 'adam'
.. versionadded:: 0.20
max_fun : int, default=15000
Only used when solver='lbfgs'. Maximum number of function calls.
The solver iterates until convergence (determined by 'tol'), number
of iterations reaches max_iter, or this number of function calls.
Note that number of function calls will be greater than or equal to
the number of iterations for the MLPRegressor.
.. versionadded:: 0.22
Attributes
----------
loss_ : float
The current loss computed with the loss function.
best_loss_ : float
The minimum loss reached by the solver throughout fitting.
loss_curve_ : list of shape (`n_iter_`,)
The ith element in the list represents the loss at the ith iteration.
t_ : int
The number of training samples seen by the solver during fitting.
coefs_ : list of shape (n_layers - 1,)
The ith element in the list represents the weight matrix corresponding
to layer i.
intercepts_ : list of shape (n_layers - 1,)
The ith element in the list represents the bias vector corresponding to
layer i + 1.
n_iter_ : int
The number of iterations the solver has ran.
n_layers_ : int
Number of layers.
n_outputs_ : int
Number of outputs.
out_activation_ : str
Name of the output activation function.
loss_curve_ : list of shape (n_iters,)
Loss value evaluated at the end of each training step.
t_ : int
Mathematically equals `n_iters * X.shape[0]`, it means
`time_step` and it is used by optimizer's learning rate scheduler.
Examples
--------
>>> from sklearn.neural_network import MLPRegressor
>>> from sklearn.datasets import make_regression
>>> from sklearn.model_selection import train_test_split
>>> X, y = make_regression(n_samples=200, random_state=1)
>>> X_train, X_test, y_train, y_test = train_test_split(X, y,
... random_state=1)
>>> regr = MLPRegressor(random_state=1, max_iter=500).fit(X_train, y_train)
>>> regr.predict(X_test[:2])
array([-0.9..., -7.1...])
>>> regr.score(X_test, y_test)
0.4...
Notes
-----
MLPRegressor trains iteratively since at each time step
the partial derivatives of the loss function with respect to the model
parameters are computed to update the parameters.
It can also have a regularization term added to the loss function
that shrinks model parameters to prevent overfitting.
This implementation works with data represented as dense and sparse numpy
arrays of floating point values.
References
----------
Hinton, Geoffrey E.
"Connectionist learning procedures." Artificial intelligence 40.1
(1989): 185-234.
Glorot, Xavier, and Yoshua Bengio. "Understanding the difficulty of
training deep feedforward neural networks." International Conference
on Artificial Intelligence and Statistics. 2010.
He, Kaiming, et al. "Delving deep into rectifiers: Surpassing human-level
performance on imagenet classification." arXiv preprint
arXiv:1502.01852 (2015).
Kingma, Diederik, and Jimmy Ba. "Adam: A method for stochastic
optimization." arXiv preprint arXiv:1412.6980 (2014).
"""
@_deprecate_positional_args
def __init__(self, hidden_layer_sizes=(100,), activation="relu", *,
solver='adam', alpha=0.0001,
batch_size='auto', learning_rate="constant",
learning_rate_init=0.001,
power_t=0.5, max_iter=200, shuffle=True,
random_state=None, tol=1e-4,
verbose=False, warm_start=False, momentum=0.9,
nesterovs_momentum=True, early_stopping=False,
validation_fraction=0.1, beta_1=0.9, beta_2=0.999,
epsilon=1e-8, n_iter_no_change=10, max_fun=15000):
super().__init__(
hidden_layer_sizes=hidden_layer_sizes,
activation=activation, solver=solver, alpha=alpha,
batch_size=batch_size, learning_rate=learning_rate,
learning_rate_init=learning_rate_init, power_t=power_t,
max_iter=max_iter, loss='squared_loss', shuffle=shuffle,
random_state=random_state, tol=tol, verbose=verbose,
warm_start=warm_start, momentum=momentum,
nesterovs_momentum=nesterovs_momentum,
early_stopping=early_stopping,
validation_fraction=validation_fraction,
beta_1=beta_1, beta_2=beta_2, epsilon=epsilon,
n_iter_no_change=n_iter_no_change, max_fun=max_fun)
def predict(self, X):
"""Predict using the multi-layer perceptron model.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data.
Returns
-------
y : ndarray of shape (n_samples, n_outputs)
The predicted values.
"""
check_is_fitted(self)
y_pred = self._forward_pass_fast(X)
if y_pred.shape[1] == 1:
return y_pred.ravel()
return y_pred
def _validate_input(self, X, y, incremental):
X, y = self._validate_data(X, y, accept_sparse=['csr', 'csc'],
multi_output=True, y_numeric=True,
dtype=(np.float64, np.float32))
if y.ndim == 2 and y.shape[1] == 1:
y = column_or_1d(y, warn=True)
return X, y
| bnaul/scikit-learn | sklearn/neural_network/_multilayer_perceptron.py | Python | bsd-3-clause | 56,734 |
#!/usr/bin/env python
# Copyright 2016 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: this test needs rework and is not functional currently.
print("VOL_TEST kv_store skippped")
exit(0)
'''
Standalone tests for metadata volume operations
Create vm and try to attach/detach disks with metadata
This test is NOT a part of 'make testremote' or Drone CI pass
'''
import os
import atexit
import sys, getopt
import subprocess
import vmci_srv as vmci
import volume_kv as kv
# Default volumes dir
vmName = "testVM"
vols = ['vol1', 'vol2', 'vol3', 'vol4', 'vol5', 'vol6', 'vol7', 'vol8', 'vol9',
'vol10']
volopts = "size:1gb"
def doCreate(volDir):
print "Creating volumes"
for vol in vols:
volPath = os.path.join(volDir, "%s.vmdk" % vol)
vmci.createVMDK(volPath, vol, None)
print "Verifying volume metadata"
for vol in vols:
volPath = os.path.join(volDir, "%s.vmdk" % vol)
volDict = kv.getAll(volPath)
if not volDict:
print "Failed to fetch volume meta-data for ", volPath
continue
print "Vol metadata 'status' - %s, 'volOpts' - %s" % (
volDict[kv.STATUS], volDict[kv.VOL_OPTS])
if volDict[kv.STATUS] != kv.DETACHED:
print 'Found volume %s with status %s, expected %s' % (
vol, volDict[kv.STATUS], kv.DETACHED)
return
def doAttach(volDir, vmName):
print "Attaching volumes"
for vol in vols:
volPath = os.path.join(volDir, "%s.vmdk" % vol)
vmci.attachVMDK(volPath, vmName)
print "Verifying volume metadata"
for vol in vols:
volPath = os.path.join(volDir, "%s.vmdk" % vol)
volDict = kv.getAll(volPath)
if volDict[kv.STATUS] != kv.ATTACHED:
print 'Found volume %s with status %s, expected %s' % (
vol, volDict[kv.STATUS], kv.ATTACHED)
return
def doDetach(volDir, vmName):
print "Detaching volumes"
for vol in vols:
volPath = os.path.join(volDir, "%s.vmdk" % vol)
vmci.detachVMDK(volPath, vmName)
print "Verifying volume metadata"
for vol in vols:
volPath = os.path.join(volDir, "%s.vmdk" % vol)
volDict = kv.getAll(volPath)
if volDict[kv.STATUS] != kv.DETACHED:
print 'Found volume %s with status %s, expected %s' % (
vol, volDict[kv.STATUS], kv.DETACHED)
return
def doVolDelete(volDir):
print 'Removing volumes'
for vol in vols:
volPath = os.path.join(volDir, "%s.vmdk" % vol)
vmci.removeVMDK(volPath)
return
def cleanup(vmId):
cmd = 'vim-cmd vmsvc/power.off %s' % vmId
subprocess.call(cmd, shell=True)
cmd = 'vim-cmd vmsvc/destroy %s' % vmId
subprocess.call(cmd, shell=True)
def main(argv):
if argv == []:
print 'vol_tests.py -d <test dir>'
sys.exit(2)
try:
opts, args = getopt.getopt(argv, "hd:")
except getopt.GetoptError:
print 'vol_tests.py -d <test dir>'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'vol_tests.py -v <vm config path> -d <volumes dir>'
sys.exit()
elif opt in ("-d"):
volDir = arg
# Init logging
logfile = "%s/test.log" % volDir
vmci.LogSetup(logfile)
# Init KV
kv.init()
cmd = 'vim-cmd vmsvc/createdummyvm %s %s' % (vmName, volDir)
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
s = proc.communicate()[0]
vmId = s.rstrip()
ret = proc.returncode
atexit.register(cleanup, vmId)
if ret != 0:
print "Failed to power on VM, exiting", vmName
sys.exit(0)
# Start VM
print "Starting VM %s with id %s ..." % (vmName, vmId)
cmd = 'vim-cmd vmsvc/power.on %s' % vmId
subprocess.call(cmd, shell=True)
# Create volumes
doCreate(volDir)
# Attach/Re-attach volumes
#doAttach(volDir, vmName)
# Check volume meta-data
#doVerifyVolMeta(volDir)
# Detach volumes
#doDetach(volDir, vmName)
# Delete volumes
doVolDelete(volDir)
# start the server
if __name__ == "__main__":
main(sys.argv[1:])
| BaluDontu/docker-volume-vsphere | esx_service/vol_test.py | Python | apache-2.0 | 4,800 |
#!/usr/bin/env python
import subprocess
short_name = 'Opt 3'
disp_name = 'Option 3 Submenu'
otype = 'Routine'
need = ['need 1: ', 'need 2: ', 'need 3: ']
answers = []
def run():
global answers
while True:
subprocess.call('clear')
i = 0
while i < len(need):
ans = input(need[i])
if validate(ans):
answers.append(ans)
i += 1
final = 'Doing something with '
for a in answers:
final = '{}, {}'.format(final, a)
print(final)
input()
return
def validate(char):
if char:
return True
return False | kbknapp/ConsoleMenu-py3x | examples/menu/opt3.py | Python | gpl-2.0 | 532 |
from django.shortcuts import render
from django.views.generic import ListView, DetailView
from django.views.generic import FormView, TemplateView
from .models import Player
from .forms import PlayerForm
# Create your views here.
class HomePage(TemplateView):
template_name = 'players/index.html'
class PlayerList(ListView):
model = Player
template_name = 'player_list.html'
context_object_name = 'players'
class PlayerDetail(DetailView):
model = Player
#template_name = 'player_detail.html'
context_object_name = 'player'
class SignUpForm(FormView):
template_name = 'players/signup.html'
form_class = PlayerForm
success_url = '/'
| biddellns/litsl | players/views.py | Python | gpl-3.0 | 675 |
# -*- coding: utf-8 -*-
# Dojima, a markets client.
# Copyright (C) 2012 Emery Hemingway
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public Licnense for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os.path
import otapi
from PyQt4 import QtCore, QtGui
#!! TODO check the size of files before importing, else bad things might happen !!#
class _ContractImportDialog(QtGui.QDialog):
file_filter = QtCore.QCoreApplication.translate('ContractImportDialog',
"Open Transactions contracts (*.otc);;"
"All files (*)")
def __init__(self, parent=None):
super(_ContractImportDialog, self).__init__(parent)
self.import_occurred = False
self.recent_dir = QtGui.QDesktopServices.storageLocation(QtGui.QDesktopServices.HomeLocation)
self.import_box = QtGui.QPlainTextEdit()
#TODO perhaps resize the box base on pasted text?
self.import_box.setMinimumWidth(512)
file_button = QtGui.QPushButton(QtCore.QCoreApplication.translate(
'SContractImportDialog', "File"))
paste_button = QtGui.QPushButton(QtCore.QCoreApplication.translate(
'ContractImportDialog', "Paste"))
import_button = QtGui.QPushButton(QtCore.QCoreApplication.translate(
'ContractImportDialog', "Import"))
button_box = QtGui.QDialogButtonBox()
button_box.addButton(file_button, QtGui.QDialogButtonBox.ActionRole)
button_box.addButton(paste_button, QtGui.QDialogButtonBox.ActionRole)
button_box.addButton(import_button, QtGui.QDialogButtonBox.ActionRole)
button_box.addButton(QtGui.QDialogButtonBox.Close)
layout = QtGui.QVBoxLayout()
layout.addWidget(self.import_box)
layout.addWidget(button_box)
self.setLayout(layout)
file_button.clicked.connect(self.import_file)
paste_button.clicked.connect(self.paste_text)
import_button.clicked.connect(self.parse_text)
button_box.accepted.connect(self.accept)
button_box.rejected.connect(self.accept)
def paste_text(self):
clipboard = QtGui.QApplication.clipboard()
self.import_box.setPlainText(clipboard.text())
def import_file(self):
filenames = QtGui.QFileDialog.getOpenFileNames(self,
QtCore.QCoreApplication.translate(
'ContractImportDialog', "select contract file"),
self.recent_dir, self.file_filter)
if not len(filenames):
return
self.recent_dir = os.path.dirname(filenames[-1])
for filename in filenames:
contract_file = QtCore.QFile(filename)
if not contract_file.open(QtCore.QIODevice.ReadOnly |
QtCore.QIODevice.Text):
continue
stream = QtCore.QTextStream(contract_file)
# TODO this could exaust memory if one loaded a malicious file
contract = stream.readAll()
self.parse_contract(contract)
def parse_text(self):
text = self.import_box.toPlainText()
if not len(text):
self.accept()
return
self.parse_contract(text)
def parse_contract(self, text):
# TODO extract the contract name and put that in the result dialog
# since if multiple contract files are imported the result can only
# be distinguished by the order they pop up
parse_result = self.contract_import_method(text)
subdialog_title = QtCore.QCoreApplication.translate(
'ContractImportDialog', "contract import result")
if parse_result == 1:
self.import_box.clear()
QtGui.QMessageBox.information(self, subdialog_title,
QtCore.QCoreApplication.translate(
'ContractImportDialog',
"contract imported (if not already present)" ))
self.import_occured = True
# TODO get the proper indexes and emit
#self.parent.ot_asset_model.dataChanged.emit(
else:
QtGui.QMessageBox.warning(self, subdialog_title,
otapi.OTAPI_Basic_PeekMemlogFront())
def close(self):
if self.import_occured is True:
self.accept()
self.reject()
class AssetContractImportDialog(_ContractImportDialog):
def contract_import_method(self, text):
return otapi.OTAPI_Basic_AddAssetContract(text)
class NymImportDialog(_ContractImportDialog):
file_filter = QtCore.QCoreApplication.translate('NymImportDialog',
"Nym public/private key (*)")
def contract_import_method(self, text):
return otapi.OTAPI_Basic_Wallet_ImportNym(text)
class ServerContractImportDialog(_ContractImportDialog):
def contract_import_method(self, text):
return otapi.OTAPI_Basic_AddServerContract(text)
| choperlizer/Dojima | dojima/ui/ot/contract.py | Python | gpl-3.0 | 5,531 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.