code stringlengths 2 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int32 2 1.05M |
|---|---|---|---|---|---|
# Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = "Grace Yu (grace.yu@huawei.com)"
| baigk/compass-core | compass/deployment/utils/__init__.py | Python | apache-2.0 | 638 |
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from minifi import * # noqa F403
from argparse import ArgumentParser
from ctypes import cdll # noqa F401
import ctypes # noqa F401
import sys
from _cffi_backend import callback # noqa F401
class GetFilePrinterProcessor(PyProcessor): # noqa F405
def __init__(self, minifi, flow):
PyProcessor.__init__(self, minifi, flow) # noqa F405
self._callback = None
def _onTriggerCallback(self):
def onTrigger(session, context):
flow_file = self.get(session, context)
if flow_file:
if flow_file.add_attribute("python_test", "value"):
print("Add attribute succeeded")
if not flow_file.add_attribute("python_test", "value2"):
print("Cannot add the same attribute twice!")
print("original file name: " + flow_file.get_attribute("filename"))
target_relationship = "success"
if not self.transfer(session, flow_file, target_relationship):
print("transfer to relationship " + target_relationship + " failed")
return CALLBACK(onTrigger) # noqa F405
parser = ArgumentParser()
parser.add_argument("-s", "--dll", dest="dll_file",
help="DLL filename", metavar="FILE")
parser.add_argument("-n", "--nifi", dest="nifi_instance",
help="NiFi Instance")
parser.add_argument("-i", "--input", dest="input_port",
help="NiFi Input Port")
parser.add_argument("-d", "--dir", dest="dir",
help="GetFile Dir to monitor", metavar="FILE")
args = parser.parse_args()
""" dll_file is the path to the shared object """
minifi = MiNiFi(dll_file=args.dll_file, url=args.nifi_instance.encode('utf-8'), port=args.input_port.encode('utf-8')) # noqa F405
minifi.set_property("nifi.remote.input.http.enabled", "true")
processor = minifi.add_processor(GetFile()) # noqa F405
processor.set_property("Input Directory", args.dir)
processor.set_property("Keep Source File", "true")
current_module = sys.modules[__name__]
processor = minifi.create_python_processor(current_module, "GetFilePrinterProcessor")
ff = minifi.get_next_flowfile()
if ff:
minifi.transmit_flowfile(ff)
| dtrodrigues/nifi-minifi-cpp | python/getFile.py | Python | apache-2.0 | 3,036 |
# Copyright 2013 PLUMgrid, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Edgar Magana, emagana@plumgrid.com, PLUMgrid, Inc.
from neutron.extensions import providernet as provider
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class Plumlib():
"""
Class PLUMgrid Fake Library. This library is a by-pass implementation
for the PLUMgrid Library. This class is being used by the unit test
integration in Neutron.
"""
def __init__(self):
LOG.info(_('Python PLUMgrid Fake Library Started '))
pass
def director_conn(self, director_plumgrid, director_port, timeout,
director_admin, director_password):
LOG.info(_('Fake Director: %s'),
director_plumgrid + ':' + director_port)
pass
def create_network(self, tenant_id, net_db, network):
net_db["network"] = {}
for key in (provider.NETWORK_TYPE,
provider.PHYSICAL_NETWORK,
provider.SEGMENTATION_ID):
net_db["network"][key] = network["network"][key]
return net_db
def update_network(self, tenant_id, net_id):
pass
def delete_network(self, net_db, net_id):
pass
def create_subnet(self, sub_db, net_db, ipnet):
pass
def update_subnet(self, orig_sub_db, new_sub_db, ipnet):
pass
def delete_subnet(self, tenant_id, net_db, net_id):
pass
def create_port(self, port_db, router_db):
pass
def update_port(self, port_db, router_db):
pass
def delete_port(self, port_db, router_db):
pass
def create_router(self, tenant_id, router_db):
pass
def update_router(self, router_db, router_id):
pass
def delete_router(self, tenant_id, router_id):
pass
def add_router_interface(self, tenant_id, router_id, port_db, ipnet):
pass
def remove_router_interface(self, tenant_id, net_id, router_id):
pass
def create_floatingip(self, floating_ip):
pass
def update_floatingip(self, floating_ip_orig, floating_ip, id):
pass
def delete_floatingip(self, floating_ip_orig, id):
pass
def disassociate_floatingips(self, fip, port_id):
return dict((key, fip[key]) for key in ("id", "floating_network_id",
"floating_ip_address"))
| onecloud/neutron | neutron/plugins/plumgrid/drivers/fake_plumlib.py | Python | apache-2.0 | 2,992 |
# Copyright 2014 CERN.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from openstackclient.identity.v3 import federation_protocol
from openstackclient.tests.unit import fakes
from openstackclient.tests.unit.identity.v3 import fakes as identity_fakes
class TestProtocol(identity_fakes.TestFederatedIdentity):
def setUp(self):
super(TestProtocol, self).setUp()
federation_lib = self.app.client_manager.identity.federation
self.protocols_mock = federation_lib.protocols
self.protocols_mock.reset_mock()
class TestProtocolCreate(TestProtocol):
def setUp(self):
super(TestProtocolCreate, self).setUp()
proto = copy.deepcopy(identity_fakes.PROTOCOL_OUTPUT)
resource = fakes.FakeResource(None, proto, loaded=True)
self.protocols_mock.create.return_value = resource
self.cmd = federation_protocol.CreateProtocol(self.app, None)
def test_create_protocol(self):
argslist = [
identity_fakes.protocol_id,
'--identity-provider', identity_fakes.idp_id,
'--mapping', identity_fakes.mapping_id
]
verifylist = [
('federation_protocol', identity_fakes.protocol_id),
('identity_provider', identity_fakes.idp_id),
('mapping', identity_fakes.mapping_id)
]
parsed_args = self.check_parser(self.cmd, argslist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.protocols_mock.create.assert_called_with(
protocol_id=identity_fakes.protocol_id,
identity_provider=identity_fakes.idp_id,
mapping=identity_fakes.mapping_id)
collist = ('id', 'identity_provider', 'mapping')
self.assertEqual(collist, columns)
datalist = (identity_fakes.protocol_id,
identity_fakes.idp_id,
identity_fakes.mapping_id)
self.assertEqual(datalist, data)
class TestProtocolDelete(TestProtocol):
def setUp(self):
super(TestProtocolDelete, self).setUp()
# This is the return value for utils.find_resource()
self.protocols_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.PROTOCOL_OUTPUT),
loaded=True,
)
self.protocols_mock.delete.return_value = None
self.cmd = federation_protocol.DeleteProtocol(self.app, None)
def test_delete_identity_provider(self):
arglist = [
'--identity-provider', identity_fakes.idp_id,
identity_fakes.protocol_id
]
verifylist = [
('federation_protocol', [identity_fakes.protocol_id]),
('identity_provider', identity_fakes.idp_id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
self.protocols_mock.delete.assert_called_with(
identity_fakes.idp_id, identity_fakes.protocol_id)
self.assertIsNone(result)
class TestProtocolList(TestProtocol):
def setUp(self):
super(TestProtocolList, self).setUp()
self.protocols_mock.get.return_value = fakes.FakeResource(
None, identity_fakes.PROTOCOL_ID_MAPPING, loaded=True)
self.protocols_mock.list.return_value = [fakes.FakeResource(
None, identity_fakes.PROTOCOL_ID_MAPPING, loaded=True)]
self.cmd = federation_protocol.ListProtocols(self.app, None)
def test_list_protocols(self):
arglist = ['--identity-provider', identity_fakes.idp_id]
verifylist = [('identity_provider', identity_fakes.idp_id)]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.protocols_mock.list.assert_called_with(identity_fakes.idp_id)
class TestProtocolSet(TestProtocol):
def setUp(self):
super(TestProtocolSet, self).setUp()
self.protocols_mock.get.return_value = fakes.FakeResource(
None, identity_fakes.PROTOCOL_OUTPUT, loaded=True)
self.protocols_mock.update.return_value = fakes.FakeResource(
None, identity_fakes.PROTOCOL_OUTPUT_UPDATED, loaded=True)
self.cmd = federation_protocol.SetProtocol(self.app, None)
def test_set_new_mapping(self):
arglist = [
identity_fakes.protocol_id,
'--identity-provider', identity_fakes.idp_id,
'--mapping', identity_fakes.mapping_id
]
verifylist = [('identity_provider', identity_fakes.idp_id),
('federation_protocol', identity_fakes.protocol_id),
('mapping', identity_fakes.mapping_id)]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.protocols_mock.update.assert_called_with(
identity_fakes.idp_id, identity_fakes.protocol_id,
identity_fakes.mapping_id)
collist = ('id', 'identity_provider', 'mapping')
self.assertEqual(collist, columns)
datalist = (identity_fakes.protocol_id, identity_fakes.idp_id,
identity_fakes.mapping_id_updated)
self.assertEqual(datalist, data)
class TestProtocolShow(TestProtocol):
def setUp(self):
super(TestProtocolShow, self).setUp()
self.protocols_mock.get.return_value = fakes.FakeResource(
None, identity_fakes.PROTOCOL_OUTPUT, loaded=False)
self.cmd = federation_protocol.ShowProtocol(self.app, None)
def test_show_protocol(self):
arglist = [identity_fakes.protocol_id, '--identity-provider',
identity_fakes.idp_id]
verifylist = [('federation_protocol', identity_fakes.protocol_id),
('identity_provider', identity_fakes.idp_id)]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.protocols_mock.get.assert_called_with(identity_fakes.idp_id,
identity_fakes.protocol_id)
collist = ('id', 'identity_provider', 'mapping')
self.assertEqual(collist, columns)
datalist = (identity_fakes.protocol_id,
identity_fakes.idp_id,
identity_fakes.mapping_id)
self.assertEqual(datalist, data)
| openstack/python-openstackclient | openstackclient/tests/unit/identity/v3/test_protocol.py | Python | apache-2.0 | 6,982 |
############################################################################
# Joshua Boverhof<JRBoverhof@lbl.gov>, LBNL
# Monte Goode <MMGoode@lbl.gov>, LBNL
# See Copyright for copyright notice!
############################################################################
import exceptions, sys, optparse, os, warnings, traceback
from os.path import isfile, join, split
#from operator import xor
import ZSI
from ConfigParser import ConfigParser
from ZSI.generate.wsdl2python import WriteServiceModule, ServiceDescription as wsdl2pyServiceDescription
from ZSI.wstools import WSDLTools, XMLSchema
from ZSI.wstools.logging import setBasicLoggerDEBUG
from ZSI.generate import containers, utility
from ZSI.generate.utility import NCName_to_ClassName as NC_to_CN, TextProtect
from ZSI.generate.wsdl2dispatch import ServiceModuleWriter as ServiceDescription
from ZSI.generate.wsdl2dispatch import WSAServiceModuleWriter as ServiceDescriptionWSA
warnings.filterwarnings('ignore', '', exceptions.UserWarning)
def SetDebugCallback(option, opt, value, parser, *args, **kwargs):
setBasicLoggerDEBUG()
warnings.resetwarnings()
def SetPyclassMetaclass(option, opt, value, parser, *args, **kwargs):
"""set up pyclass metaclass for complexTypes"""
from ZSI.generate.containers import ServiceHeaderContainer,\
TypecodeContainerBase, TypesHeaderContainer
TypecodeContainerBase.metaclass = kwargs['metaclass']
TypesHeaderContainer.imports.append(\
'from %(module)s import %(metaclass)s' %kwargs
)
ServiceHeaderContainer.imports.append(\
'from %(module)s import %(metaclass)s' %kwargs
)
def SetUpLazyEvaluation(option, opt, value, parser, *args, **kwargs):
from ZSI.generate.containers import TypecodeContainerBase
TypecodeContainerBase.lazy = True
def wsdl2py(args=None):
"""Utility for automatically generating client/service interface code from
a wsdl definition, and a set of classes representing element declarations
and type definitions. By default invoking this script produces three files,
each named after the wsdl definition name, in the current working directory.
Generated Modules Suffix:
_client.py -- client locator, rpc proxy port, messages
_types.py -- typecodes representing
_server.py -- server-side bindings
Parameters:
args -- optional can provide arguments, rather than parsing
command-line.
return:
Default behavior is to return None, if args are provided then
return names of the generated files.
"""
op = optparse.OptionParser(usage="USAGE: %wsdl2py [options] WSDL",
description=wsdl2py.__doc__)
# Basic options
op.add_option("-x", "--schema",
action="store_true", dest="schema", default=False,
help="process just the schema from an xsd file [no services]")
op.add_option("-d", "--debug",
action="callback", callback=SetDebugCallback,
help="debug output")
# WS Options
op.add_option("-a", "--address",
action="store_true", dest="address", default=False,
help="ws-addressing support, must include WS-Addressing schema.")
# pyclass Metaclass
op.add_option("-b", "--complexType",
action="callback", callback=SetPyclassMetaclass,
callback_kwargs={'module':'ZSI.generate.pyclass',
'metaclass':'pyclass_type'},
help="add convenience functions for complexTypes, including Getters, Setters, factory methods, and properties (via metaclass). *** DONT USE WITH --simple-naming ***")
# Lazy Evaluation of Typecodes (done at serialization/parsing when needed).
op.add_option("-l", "--lazy",
action="callback", callback=SetUpLazyEvaluation,
callback_kwargs={},
help="EXPERIMENTAL: recursion error solution, lazy evalution of typecodes")
# Use Twisted
op.add_option("-w", "--twisted",
action="store_true", dest='twisted', default=False,
help="generate a twisted.web client/server, dependencies python>=2.4, Twisted>=2.0.0, TwistedWeb>=0.5.0")
op.add_option("-o", "--output-dir",
action="store", dest="output_dir", default=".", type="string",
help="save files in directory")
op.add_option("-s", "--simple-naming",
action="store_true", dest="simple_naming", default=False,
help="map element names directly to python attributes")
op.add_option("-p", "--pydoc",
action="store_true", dest="pydoc", default=False,
help="top-level directory for pydoc documentation.")
is_cmdline = args is None
if is_cmdline:
(options, args) = op.parse_args()
else:
(options, args) = op.parse_args(args)
if len(args) != 1:
print>>sys.stderr, 'Expecting a file/url as argument (WSDL).'
sys.exit(os.EX_USAGE)
location = args[0]
if options.schema is True:
reader = XMLSchema.SchemaReader(base_url=location)
else:
reader = WSDLTools.WSDLReader()
load = reader.loadFromFile
if not isfile(location):
load = reader.loadFromURL
try:
wsdl = load(location)
except Exception, e:
print >> sys.stderr, "Error loading %s: \n\t%s" % (location, e)
traceback.print_exc(sys.stderr)
# exit code UNIX specific, Windows?
if hasattr(os, 'EX_NOINPUT'): sys.exit(os.EX_NOINPUT)
sys.exit("error loading %s" %location)
if isinstance(wsdl, XMLSchema.XMLSchema):
wsdl.location = location
files = _wsdl2py(options, wsdl)
else:
files = _wsdl2py(options, wsdl)
files.append(_wsdl2dispatch(options, wsdl))
if getattr(options, 'pydoc', False):
_writepydoc(os.path.join('docs', 'API'), *files)
if is_cmdline:
return
return files
#def wsdl2dispatch(args=None):
# """Deprecated: wsdl2py now generates everything
# A utility for automatically generating service skeleton code from a wsdl
# definition.
# """
# op = optparse.OptionParser()
# op.add_option("-a", "--address",
# action="store_true", dest="address", default=False,
# help="ws-addressing support, must include WS-Addressing schema.")
# op.add_option("-d", "--debug",
# action="callback", callback=SetDebugCallback,
# help="debug output")
# op.add_option("-t", "--types",
# action="store", dest="types", default=None, type="string",
# help="Write generated files to OUTPUT_DIR")
# op.add_option("-o", "--output-dir",
# action="store", dest="output_dir", default=".", type="string",
# help="file to load types from")
# op.add_option("-s", "--simple-naming",
# action="store_true", dest="simple_naming", default=False,
# help="Simplify generated naming.")
#
# if args is None:
# (options, args) = op.parse_args()
# else:
# (options, args) = op.parse_args(args)
#
# if len(args) != 1:
# print>>sys.stderr, 'Expecting a file/url as argument (WSDL).'
# sys.exit(os.EX_USAGE)
#
# reader = WSDLTools.WSDLReader()
# if isfile(args[0]):
# _wsdl2dispatch(options, reader.loadFromFile(args[0]))
# return
#
# _wsdl2dispatch(options, reader.loadFromURL(args[0]))
def _wsdl2py(options, wsdl):
if options.twisted:
from ZSI.generate.containers import ServiceHeaderContainer
try:
ServiceHeaderContainer.imports.remove('from ZSI import client')
except ValueError:
pass
ServiceHeaderContainer.imports.append('from ZSI.twisted import client')
if options.simple_naming:
# Use a different client suffix
# WriteServiceModule.client_module_suffix = "_client"
# Write messages definitions to a separate file.
#wsdl2pyServiceDescription.separate_messages = True
# Use more simple type and element class names
containers.SetTypeNameFunc( lambda n: '%s_' %(NC_to_CN(n)) )
containers.SetElementNameFunc( lambda n: '%s' %(NC_to_CN(n)) )
# Don't add "_" to the attribute name (remove when --aname works well)
containers.ContainerBase.func_aname = lambda instnc,n: TextProtect(str(n))
# write out the modules with their names rather than their number.
utility.namespace_name = lambda cls, ns: utility.Namespace2ModuleName(ns)
files = []
append = files.append
if isinstance(wsdl, XMLSchema.XMLSchema):
wsm = WriteServiceModule(_XMLSchemaAdapter(wsdl.location, wsdl),
addressing=options.address)
else:
wsm = WriteServiceModule(wsdl, addressing=options.address)
client_mod = wsm.getClientModuleName()
client_file = join(options.output_dir, '%s.py' %client_mod)
append(client_file)
fd = open(client_file, 'w+')
wsm.writeClient(fd)
fd.close()
types_mod = wsm.getTypesModuleName()
types_file = join(options.output_dir, '%s.py' %types_mod)
append(types_file)
fd = open(types_file, 'w+' )
wsm.writeTypes(fd)
fd.close()
return files
def _wsdl2dispatch(options, wsdl):
"""TOOD: Remove ServiceContainer stuff, and replace with WSGI.
"""
kw = dict()
if options.twisted:
from ZSI.twisted.WSresource import WSResource
kw['base'] = WSResource
ss = ServiceDescription(**kw)
if options.address is True:
raise RuntimeError, 'WS-Address w/twisted currently unsupported, edit the "factory" attribute by hand'
else:
# TODO: make all this handler arch
if options.address is True:
ss = ServiceDescriptionWSA()
else:
ss = ServiceDescription(**kw)
ss.fromWSDL(wsdl)
file_name = ss.getServiceModuleName()+'.py'
fd = open( join(options.output_dir, file_name), 'w+')
ss.write(fd)
fd.close()
return file_name
class _XMLSchemaAdapter:
"""Adapts an obj XMLSchema.XMLSchema to look like a WSDLTools.WSDL,
just setting a couple attributes code expects to see.
"""
def __init__(self, location, schema):
"""Parameters:
location -- base location, file path
schema -- XMLSchema instance
"""
self.name = '_'.join(split(location)[-1].split('.'))
self.types = {schema.targetNamespace:schema}
import os, pydoc, sys, warnings, inspect
import os.path
from distutils import log
from distutils.command.build_py import build_py
from distutils.util import convert_path
#from setuptools import find_packages
#from setuptools import Command
from ZSI.schema import ElementDeclaration, TypeDefinition
#from pyGridWare.utility.generate.Modules import NR
#from pyGridWare.utility.generate.Modules import CLIENT, TYPES
#def find_packages_modules(where='.'):
# #pack,mod,mod_file
# """Return a list all Python packages found within directory 'where'
# """
# out = []
# stack=[(convert_path(where), '')]
# while stack:
# where,prefix = stack.pop(0)
# for name in os.listdir(where):
# fn = os.path.join(where,name)
# #if (os.path.isdir(fn) and
# # os.path.isfile(os.path.join(fn,'__init__.py'))
# #):
# # out.append(prefix+name); stack.append((fn,prefix+name+'.'))
# if (os.path.isdir(fn) and
# os.path.isfile(os.path.join(fn,'__init__.py'))):
# stack.append((fn,prefix+name+'.'))
# continue
#
# if name == '__init__.py' or not name.endswith('.py'):
# continue
#
# out.append((prefix, name.split('.py')[0]))
#
# return out
def _writedoc(doc, thing, forceload=0):
"""Write HTML documentation to a file in the current directory.
"""
try:
object, name = pydoc.resolve(thing, forceload)
page = pydoc.html.page(pydoc.describe(object), pydoc.html.document(object, name))
fname = os.path.join(doc, name + '.html')
file = open(fname, 'w')
file.write(page)
file.close()
except (ImportError, pydoc.ErrorDuringImport), value:
traceback.print_exc(sys.stderr)
else:
return name + '.html'
def _writeclientdoc(doc, thing, forceload=0):
"""Write HTML documentation to a file in the current directory.
"""
docmodule = pydoc.HTMLDoc.docmodule
def strongarm(self, object, name=None, mod=None, *ignored):
result = docmodule(self, object, name, mod, *ignored)
# Grab all the aliases to pyclasses and create links.
nonmembers = []
push = nonmembers.append
for k,v in inspect.getmembers(object, inspect.isclass):
if inspect.getmodule(v) is not object and getattr(v,'typecode',None) is not None:
push('<a href="%s.html">%s</a>: pyclass alias<br/>' %(v.__name__,k))
result += self.bigsection('Aliases', '#ffffff', '#eeaa77', ''.join(nonmembers))
return result
pydoc.HTMLDoc.docmodule = strongarm
try:
object, name = pydoc.resolve(thing, forceload)
page = pydoc.html.page(pydoc.describe(object), pydoc.html.document(object, name))
name = os.path.join(doc, name + '.html')
file = open(name, 'w')
file.write(page)
file.close()
except (ImportError, pydoc.ErrorDuringImport), value:
log.debug(str(value))
pydoc.HTMLDoc.docmodule = docmodule
def _writetypesdoc(doc, thing, forceload=0):
"""Write HTML documentation to a file in the current directory.
"""
try:
object, name = pydoc.resolve(thing, forceload)
name = os.path.join(doc, name + '.html')
except (ImportError, pydoc.ErrorDuringImport), value:
log.debug(str(value))
return
# inner classes
cdict = {}
fdict = {}
elements_dict = {}
types_dict = {}
for kname,klass in inspect.getmembers(thing, inspect.isclass):
if thing is not inspect.getmodule(klass):
continue
cdict[kname] = inspect.getmembers(klass, inspect.isclass)
for iname,iklass in cdict[kname]:
key = (kname,iname)
fdict[key] = _writedoc(doc, iklass)
if issubclass(iklass, ElementDeclaration):
try:
typecode = iklass()
except (AttributeError,RuntimeError), ex:
elements_dict[iname] = _writebrokedoc(doc, ex, iname)
continue
elements_dict[iname] = None
if typecode.pyclass is not None:
elements_dict[iname] = _writedoc(doc, typecode.pyclass)
continue
if issubclass(iklass, TypeDefinition):
try:
typecode = iklass(None)
except (AttributeError,RuntimeError), ex:
types_dict[iname] = _writebrokedoc(doc, ex, iname)
continue
types_dict[iname] = None
if typecode.pyclass is not None:
types_dict[iname] = _writedoc(doc, typecode.pyclass)
continue
def strongarm(self, object, name=None, mod=None, funcs={}, classes={}, *ignored):
"""Produce HTML documentation for a class object."""
realname = object.__name__
name = name or realname
bases = object.__bases__
object, name = pydoc.resolve(object, forceload)
contents = []
push = contents.append
if name == realname:
title = '<a name="%s">class <strong>%s</strong></a>' % (
name, realname)
else:
title = '<strong>%s</strong> = <a name="%s">class %s</a>' % (
name, name, realname)
mdict = {}
if bases:
parents = []
for base in bases:
parents.append(self.classlink(base, object.__module__))
title = title + '(%s)' % pydoc.join(parents, ', ')
doc = self.markup(pydoc.getdoc(object), self.preformat, funcs, classes, mdict)
doc = doc and '<tt>%s<br> </tt>' % doc
for iname,iclass in cdict[name]:
fname = fdict[(name,iname)]
if elements_dict.has_key(iname):
push('class <a href="%s">%s</a>: element declaration typecode<br/>'\
%(fname,iname))
pyclass = elements_dict[iname]
if pyclass is not None:
push('<ul>instance attributes:')
push('<li><a href="%s">pyclass</a>: instances serializable to XML<br/></li>'\
%elements_dict[iname])
push('</ul>')
elif types_dict.has_key(iname):
push('class <a href="%s">%s</a>: type definition typecode<br/>' %(fname,iname))
pyclass = types_dict[iname]
if pyclass is not None:
push('<ul>instance attributes:')
push('<li><a href="%s">pyclass</a>: instances serializable to XML<br/></li>'\
%types_dict[iname])
push('</ul>')
else:
push('class <a href="%s">%s</a>: TODO not sure what this is<br/>' %(fname,iname))
contents = ''.join(contents)
return self.section(title, '#000000', '#ffc8d8', contents, 3, doc)
doclass = pydoc.HTMLDoc.docclass
pydoc.HTMLDoc.docclass = strongarm
try:
page = pydoc.html.page(pydoc.describe(object), pydoc.html.document(object, name))
file = open(name, 'w')
file.write(page)
file.close()
except (ImportError, pydoc.ErrorDuringImport), value:
log.debug(str(value))
pydoc.HTMLDoc.docclass = doclass
def _writebrokedoc(doc, ex, name, forceload=0):
try:
fname = os.path.join(doc, name + '.html')
page = pydoc.html.page(pydoc.describe(ex), pydoc.html.document(str(ex), fname))
file = open(fname, 'w')
file.write(page)
file.close()
except (ImportError, pydoc.ErrorDuringImport), value:
log.debug(str(value))
return name + '.html'
def _writepydoc(doc, *args):
"""create pydoc html pages
doc -- destination directory for documents
*args -- modules run thru pydoc
"""
ok = True
if not os.path.isdir(doc):
os.makedirs(doc)
if os.path.curdir not in sys.path:
sys.path.append(os.path.curdir)
for f in args:
if f.startswith('./'): f = f[2:]
name = os.path.sep.join(f.strip('.py').split(os.path.sep))
try:
e = __import__(name)
except Exception,ex:
raise
# _writebrokedoc(doc, ex, name)
# continue
if name.endswith('_client'):
_writeclientdoc(doc, e)
continue
if name.endswith('_types'):
_writetypesdoc(doc, e)
continue
try:
_writedoc(doc, e)
except IndexError,ex:
_writebrokedoc(doc, ex, name)
continue
| sassoftware/catalog-service | catalogService/libs/viclient_vendor/ZSI/generate/commands.py | Python | apache-2.0 | 19,840 |
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Author: Kiall Mac Innes <kiall@hp.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from designate.api.v2 import patches # flake8: noqa
import pecan.deploy
from oslo.config import cfg
from designate.openstack.common import log as logging
LOG = logging.getLogger(__name__)
def factory(global_config, **local_conf):
if not cfg.CONF['service:api'].enable_api_v2:
def disabled_app(environ, start_response):
status = '404 Not Found'
start_response(status, [])
return []
return disabled_app
conf = {
'app': {
'root': 'designate.api.v2.controllers.root.RootController',
'modules': ['designate.api.v2']
}
}
app = pecan.deploy.deploy(conf)
return app
| richm/designate | designate/api/v2/__init__.py | Python | apache-2.0 | 1,327 |
"""
Module Resty
Date: November 25, 2013
Company: SwissTech Consulting.
Author: Patrick Glass <patrickglass@gmail.com>
Copyright: Copyright 2013 SwissTech Consulting.
This class implements a simple rest api framework for interfacing with the
Server via its REST API.
"""
__title__ = 'Resty'
__version__ = '0.1'
__author__ = 'Patrick Glass'
__license__ = 'Apache 2.0'
__copyright__ = 'Copyright 2013 Patrick Glass'
from resty.api import RestyAPI
from resty.exceptions import (
RestApiException,
RestApiUrlException,
RestApiAuthError,
RestApiBadRequest,
RestApiServersDown
)
from resty.auth import RestAuthToken
from resty.request import request
| patrickglass/Resty | resty/__init__.py | Python | apache-2.0 | 667 |
#########
# Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import requests
from cloudify import compute
from cloudify import exceptions
from cloudify import ctx
def handle_userdata(server):
existing_userdata = server.get('userdata')
install_agent_userdata = ctx.agent.init_script()
if not (existing_userdata or install_agent_userdata):
return
if isinstance(existing_userdata, dict):
ud_type = existing_userdata['type']
if ud_type not in userdata_handlers:
raise exceptions.NonRecoverableError(
"Invalid type '{0}' for server userdata)".format(ud_type))
existing_userdata = userdata_handlers[ud_type](existing_userdata)
if not existing_userdata:
final_userdata = install_agent_userdata
elif not install_agent_userdata:
final_userdata = existing_userdata
else:
final_userdata = compute.create_multi_mimetype_userdata(
[existing_userdata, install_agent_userdata])
server['userdata'] = final_userdata
userdata_handlers = {
'http': lambda params: requests.get(params['url']).text
}
| GigaSpaces-ProfessionalServices/cloudify-openstack-plugin | nova_plugin/userdata.py | Python | apache-2.0 | 1,696 |
from twython import Twython
from config import APP_KEY, APP_SECRET
def obtain_auth_url():
"""Used to app to tweet to my account
NOT CALLED ANYWHERE"""
twitter = Twython(APP_KEY, APP_SECRET)
auth = twitter.get_authentication_tokens()
oauth_token = auth['oauth_token']
oauth_token_secret = auth['oauth_token_secret']
print "\n\n\nGo to the following URL to authorize app:"
print auth['auth_url']
oauth_verifier = raw_input("\nEnter the pin: ")
twitter = Twython(APP_KEY, APP_SECRET, oauth_token, oauth_token_secret)
authorized = twitter.get_authorized_tokens(oauth_verifier)
#write confirmed tokens to disk
with open("config.py", "a") as config_file:
config_file.write("\n'OAUTH_TOKEN': '" + authorized['oauth_token']
+ "'\n'OAUTH_TOKEN_SECRET': '" + authorized['oauth_token_secret'] + "'")
obtain_auth_url()
| BWeatherMaine/WXGIF | twitter.py | Python | apache-2.0 | 885 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import tvm
from tvm import te
import tvm.testing
from tvm import relay
from tvm import autotvm
from tvm import topi
from tvm.relay.testing import run_infer_type
from tvm.relay.testing.temp_op_attr import TempOpAttr
import tvm.testing
@autotvm.register_topi_compute("test/conv2d_1")
def _compute_conv2d_1(cfg, input, filter, strides, padding, dilation, out_dtype):
return topi.nn.conv2d_nchw(input, filter, strides, padding, dilation, out_dtype)
@autotvm.register_topi_schedule("test/conv2d_1")
def _schedule_conv2d_1(cfg, outs):
return topi.generic.schedule_conv2d_nchw(outs)
@autotvm.register_topi_compute("test/conv2d_2")
def _compute_conv2d_2(cfg, input, filter, strides, padding, dilation, out_dtype):
return topi.nn.conv2d_nchw(input, filter, strides, padding, dilation, out_dtype)
@autotvm.register_topi_schedule("test/conv2d_2")
def _schedule_conv2d_2(cfg, outs):
return topi.generic.schedule_conv2d_nchw(outs)
def _compute_conv2d_3(input, filter, strides, padding, dilation, out_dtype):
return topi.nn.conv2d_nchw(input, filter, strides, padding, dilation, out_dtype)
def _schedule_conv2d_3(outs):
return topi.generic.schedule_conv2d_nchw(outs)
@tvm.target.override_native_generic_func("test_conv2d_strategy")
def _tmp_strategy(attrs, inputs, out_type, target):
strategy = relay.op.OpStrategy()
strategy.add_implementation(
relay.op.strategy.wrap_compute_conv2d(_compute_conv2d_1),
relay.op.strategy.wrap_topi_schedule(_schedule_conv2d_1),
name="conv2d_1",
plevel=10,
)
strategy.add_implementation(
relay.op.strategy.wrap_compute_conv2d(_compute_conv2d_2),
relay.op.strategy.wrap_topi_schedule(_schedule_conv2d_2),
name="conv2d_2",
plevel=15,
)
ic = inputs[0].shape[1]
with tvm.te.SpecializedCondition(ic >= 16):
strategy.add_implementation(
relay.op.strategy.wrap_compute_conv2d(_compute_conv2d_3),
relay.op.strategy.wrap_topi_schedule(_schedule_conv2d_3),
name="conv2d_3",
plevel=20,
)
return strategy
def _create_record(task_name, dshape, wshape, target, cost):
args = [te.placeholder(dshape), te.placeholder(wshape), (1, 1), (1, 1, 1, 1), (1, 1), "float32"]
task = autotvm.task.create(task_name, args, target)
cfg = autotvm.ConfigEntity(0, None, {}, [])
cfg.cost = cost
inp = autotvm.MeasureInput(target=target, task=task, config=cfg)
result = autotvm.MeasureResult(costs=(cost,), error_no=0, all_cost=-1, timestamp=-1)
return (inp, result)
def test_get_valid_implementations():
target = tvm.target.Target("llvm")
def _get_impls(dshape, wshape):
data = relay.var("data", shape=dshape)
weight = relay.var("wshape", shape=wshape)
out = relay.nn.conv2d(data, weight, padding=(1, 1))
out = run_infer_type(out)
return relay.backend.compile_engine.get_valid_implementations(
relay.op.get("nn.conv2d"),
out.attrs,
[te.placeholder(dshape), te.placeholder(wshape)],
out.checked_type,
target,
)
with TempOpAttr("nn.conv2d", "FTVMStrategy", _tmp_strategy):
impls = _get_impls((1, 8, 7, 7), (32, 8, 3, 3))
assert len(impls) == 2
impls = _get_impls((1, 16, 7, 7), (32, 16, 3, 3))
assert len(impls) == 3
def test_select_implementation():
target = tvm.target.Target("llvm")
def _select_impl(dshape, wshape, use_autotvm=False):
data = relay.var("data", shape=dshape)
weight = relay.var("wshape", shape=wshape)
out = relay.nn.conv2d(data, weight, padding=(1, 1))
out = run_infer_type(out)
return relay.backend.compile_engine.select_implementation(
relay.op.get("nn.conv2d"),
out.attrs,
[te.placeholder(dshape), te.placeholder(wshape)],
out.checked_type,
target,
use_autotvm,
)
with TempOpAttr("nn.conv2d", "FTVMStrategy", _tmp_strategy):
impl, _ = _select_impl((1, 8, 7, 7), (32, 8, 3, 3))
assert impl.name == "conv2d_2"
impl, _ = _select_impl((1, 8, 7, 7), (32, 8, 3, 3), True)
assert impl.name == "conv2d_2"
impl, _ = _select_impl((1, 16, 7, 7), (32, 16, 3, 3))
assert impl.name == "conv2d_3"
impl, _ = _select_impl((1, 16, 7, 7), (32, 16, 3, 3), True)
assert impl.name == "conv2d_3"
# add autotvm record
records = []
records.append(_create_record("test/conv2d_1", (1, 8, 7, 7), (32, 8, 3, 3), target, 0.5))
records.append(_create_record("test/conv2d_1", (1, 16, 7, 7), (32, 16, 3, 3), target, 1.0))
with target:
with autotvm.apply_history_best(records):
impl, _ = _select_impl((1, 8, 7, 7), (32, 8, 3, 3), True)
assert impl.name == "conv2d_1"
impl, _ = _select_impl((1, 16, 7, 7), (32, 16, 3, 3), True)
assert impl.name == "conv2d_1"
records.append(_create_record("test/conv2d_2", (1, 8, 7, 7), (32, 8, 3, 3), target, 0.2))
records.append(_create_record("test/conv2d_1", (1, 16, 7, 7), (32, 16, 3, 3), target, 1.2))
with target:
with autotvm.apply_history_best(records):
impl, _ = _select_impl((1, 8, 7, 7), (32, 8, 3, 3), True)
assert impl.name == "conv2d_2"
impl, _ = _select_impl((1, 16, 7, 7), (32, 16, 3, 3), True)
assert impl.name == "conv2d_1"
def test_compile_engine():
engine = relay.backend.compile_engine.get()
def get_func(shape):
x = relay.var("x", shape=shape)
y = relay.add(x, x)
z = relay.add(y, x)
f = relay.Function([x], z)
mod = tvm.IRModule.from_expr(f)
mod = relay.transform.InferType()(mod)
return mod["main"]
z1 = engine.lower(get_func((10,)), "llvm")
z2 = engine.lower(get_func((10,)), "llvm")
z3 = engine.lower(get_func(()), "llvm")
assert z1.same_as(z2)
assert not z3.same_as(z1)
if tvm.testing.device_enabled("cuda"):
z4 = engine.lower(get_func(()), "cuda")
assert not z3.same_as(z4)
# Test JIT target
for target in ["llvm"]:
ctx = tvm.context(target)
if tvm.testing.device_enabled(target):
f = engine.jit(get_func((10,)), target)
x = tvm.nd.array(np.ones(10).astype("float32"), ctx=ctx)
y = tvm.nd.empty((10,), ctx=ctx)
f(x, y)
tvm.testing.assert_allclose(y.asnumpy(), x.asnumpy() * 3)
engine.dump()
def test_compile_placeholder_bypass():
engine = relay.backend.compile_engine.get()
x = relay.var("x", shape=(2, 3))
y = relay.var("y", shape=(2, 3))
z = relay.var("z", shape=(2, 3))
result = relay.Tuple([x, relay.op.concatenate([y, z], axis=0)])
func = relay.Function(relay.analysis.free_vars(result), result)
with tvm.transform.PassContext(opt_level=0):
graph, lib, params = relay.build(tvm.IRModule.from_expr(func), "llvm")
def test_compile_injective_with_tuple():
x = relay.var("x", shape=(2, 3))
y = relay.var("y", shape=(2, 3))
x_transpose = relay.transpose(x)
output = relay.Tuple([x_transpose, y])
func = relay.Function([x, y], output)
relay.build(tvm.IRModule.from_expr(func), "llvm")
def test_compile_tuple_dup():
x = relay.var("data", shape=(16, 16))
log = relay.log(x)
output = relay.Tuple([log, log])
f = relay.Function([x], output)
relay.build(tvm.IRModule.from_expr(f), "llvm")
def test_compile_full():
# Shape calculations can happen in int64. The test checks that full operator
# can handle when shapes are not int32
shape = (
tvm.tir.IntImm("int32", 1),
tvm.tir.IntImm("int64", 16),
tvm.tir.IntImm("int64", 16),
tvm.tir.IntImm("int32", 64),
)
output = relay.full(relay.const(0, "int32"), shape=shape, dtype="int32")
f = relay.Function([], output)
mod = tvm.IRModule.from_expr(f)
mod = relay.qnn.transform.CanonicalizeOps()(mod)
relay.build(mod, "llvm")
def test_compile_nhwc_pack():
data = relay.var("data", shape=(1, 1, 1, 1024), dtype="uint8")
weight = relay.var("weight", shape=(1, 1, 1024, 1001), dtype="int8")
p2 = relay.var("p2", shape=(1, 1, 1, 1), dtype="int32")
conv = relay.nn.conv2d(
data,
weight,
kernel_size=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
out_dtype="int32",
)
multiply = relay.multiply(relay.const(-22, dtype="int32"), p2)
tile = relay.tile(multiply, reps=(1, 1, 1, 1001))
subtract = relay.subtract(conv, tile)
func = subtract
mod = relay.Function(relay.analysis.free_vars(func), func)
relay.build(mod, target="llvm")
if __name__ == "__main__":
test_get_valid_implementations()
test_select_implementation()
test_compile_engine()
test_compile_placeholder_bypass()
test_compile_injective_with_tuple()
test_compile_tuple_dup()
test_compile_full()
test_compile_nhwc_pack()
| tqchen/tvm | tests/python/relay/test_backend_compile_engine.py | Python | apache-2.0 | 9,932 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Request body validating middleware for OpenStack Identity resources."""
from keystone.common.validation import validators
def lazy_validate(request_body_schema, resource_to_validate):
"""A non-decorator way to validate a request, to be used inline.
:param request_body_schema: a schema to validate the resource reference
:param resource_to_validate: dictionary to validate
:raises keystone.exception.ValidationError: if `resource_to_validate` is
None. (see wrapper method below).
:raises TypeError: at decoration time when the expected resource to
validate isn't found in the decorated method's
signature
"""
schema_validator = validators.SchemaValidator(request_body_schema)
schema_validator.validate(resource_to_validate)
def nullable(property_schema):
"""Clone a property schema into one that is nullable.
:param dict property_schema: schema to clone into a nullable schema
:returns: a new dict schema
"""
# TODO(dstanek): deal with the case where type is already a list; we don't
# do that yet so I'm not wasting time on it
new_schema = property_schema.copy()
new_schema['type'] = [property_schema['type'], 'null']
# NOTE(kmalloc): If enum is specified (such as our boolean case) ensure we
# add null to the enum as well so that null can be passed/validated as
# expected. Without adding to the enum, null will not validate as enum is
# explicitly listing valid values. According to the JSON Schema
# specification, the values must be unique in the enum array.
if 'enum' in new_schema and None not in new_schema['enum']:
# In the enum the 'null' is NoneType
new_schema['enum'].append(None)
return new_schema
| mahak/keystone | keystone/common/validation/__init__.py | Python | apache-2.0 | 2,344 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np # type: ignore
import onnx
from ..base import Base
from . import expect
class Atan(Base):
@staticmethod
def export(): # type: () -> None
node = onnx.helper.make_node(
'Atan',
inputs=['x'],
outputs=['y'],
)
x = np.array([-1, 0, 1]).astype(np.float32)
y = np.arctan(x)
expect(node, inputs=[x], outputs=[y],
name='test_atan_example')
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.arctan(x)
expect(node, inputs=[x], outputs=[y],
name='test_atan')
| mlperf/training_results_v0.6 | Fujitsu/benchmarks/resnet/implementations/mxnet/3rdparty/onnx-tensorrt/third_party/onnx/onnx/backend/test/case/node/atan.py | Python | apache-2.0 | 767 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.layers.convolutional."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.layers import convolutional as conv_layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
@test_util.with_c_api
class ConvTest(test.TestCase):
def testInvalidDataFormat(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
with self.assertRaisesRegexp(ValueError, 'data_format'):
conv_layers.conv2d(images, 32, 3, data_format='invalid')
def testInvalidStrides(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
with self.assertRaisesRegexp(ValueError, 'strides'):
conv_layers.conv2d(images, 32, 3, strides=(1, 2, 3))
with self.assertRaisesRegexp(ValueError, 'strides'):
conv_layers.conv2d(images, 32, 3, strides=None)
def testInvalidKernelSize(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
with self.assertRaisesRegexp(ValueError, 'kernel_size'):
conv_layers.conv2d(images, 32, (1, 2, 3))
with self.assertRaisesRegexp(ValueError, 'kernel_size'):
conv_layers.conv2d(images, 32, None)
def testCreateConv2D(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4))
layer = conv_layers.Conv2D(32, [3, 3], activation=nn_ops.relu)
output = layer.apply(images)
self.assertEqual(output.op.name, 'conv2d/Relu')
self.assertListEqual(output.get_shape().as_list(),
[5, height - 2, width - 2, 32])
self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 4, 32])
self.assertListEqual(layer.bias.get_shape().as_list(), [32])
def testConv2DFloat16(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4), dtype='float16')
output = conv_layers.conv2d(images, 32, [3, 3], activation=nn_ops.relu)
self.assertListEqual(output.get_shape().as_list(),
[5, height - 2, width - 2, 32])
def testCreateConv2DIntegerKernelSize(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4))
layer = conv_layers.Conv2D(32, 3)
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, height - 2, width - 2, 32])
self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 4, 32])
self.assertListEqual(layer.bias.get_shape().as_list(), [32])
def testCreateConv2DChannelsFirst(self):
height, width = 7, 9
images = random_ops.random_uniform((5, 4, height, width))
layer = conv_layers.Conv2D(32, [3, 3], data_format='channels_first')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, 32, height - 2, width - 2])
self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 4, 32])
self.assertListEqual(layer.bias.get_shape().as_list(), [32])
def testUnknownInputChannels(self):
images = array_ops.placeholder(dtypes.float32, (5, 7, 9, None))
layer = conv_layers.Conv2D(32, [3, 3], activation=nn_ops.relu)
with self.assertRaisesRegexp(ValueError,
'The channel dimension of the inputs '
'should be defined. Found `None`.'):
_ = layer.apply(images)
images = array_ops.placeholder(dtypes.float32, (5, None, 7, 9))
layer = conv_layers.Conv2D(32, [3, 3], data_format='channels_first')
with self.assertRaisesRegexp(ValueError,
'The channel dimension of the inputs '
'should be defined. Found `None`.'):
_ = layer.apply(images)
def testConv2DPaddingSame(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 32), seed=1)
layer = conv_layers.Conv2D(64, images.get_shape()[1:3], padding='same')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 64])
def testCreateConvWithStrides(self):
height, width = 6, 8
# Test strides tuple
images = random_ops.random_uniform((5, height, width, 3), seed=1)
layer = conv_layers.Conv2D(32, [3, 3], strides=(2, 2), padding='same')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, height / 2, width / 2, 32])
# Test strides integer
layer = conv_layers.Conv2D(32, [3, 3], strides=2, padding='same')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, height / 2, width / 2, 32])
# Test unequal strides
layer = conv_layers.Conv2D(32, [3, 3], strides=(2, 1), padding='same')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, height / 2, width, 32])
def testCreateConv1D(self):
width = 7
data = random_ops.random_uniform((5, width, 4))
layer = conv_layers.Conv1D(32, 3, activation=nn_ops.relu)
output = layer.apply(data)
self.assertEqual(output.op.name, 'conv1d/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, width - 2, 32])
self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 4, 32])
self.assertListEqual(layer.bias.get_shape().as_list(), [32])
def testConv1DFloat16(self):
width = 7
data = random_ops.random_uniform((5, width, 4), dtype='float16')
output = conv_layers.conv1d(data, 32, 3, activation=nn_ops.relu)
self.assertListEqual(output.get_shape().as_list(), [5, width - 2, 32])
def testCreateConv1DChannelsFirst(self):
width = 7
data = random_ops.random_uniform((5, 4, width))
layer = conv_layers.Conv1D(32, 3, data_format='channels_first')
output = layer.apply(data)
self.assertListEqual(output.get_shape().as_list(), [5, 32, width - 2])
self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 4, 32])
self.assertListEqual(layer.bias.get_shape().as_list(), [32])
def testUnknownInputChannelsConv1D(self):
data = array_ops.placeholder(dtypes.float32, (5, 4, None))
layer = conv_layers.Conv1D(32, 3, activation=nn_ops.relu)
with self.assertRaisesRegexp(ValueError,
'The channel dimension of the inputs '
'should be defined. Found `None`.'):
_ = layer.apply(data)
data = array_ops.placeholder(dtypes.float32, (5, None, 4))
layer = conv_layers.Conv1D(32, 3, data_format='channels_first')
with self.assertRaisesRegexp(ValueError,
'The channel dimension of the inputs '
'should be defined. Found `None`.'):
_ = layer.apply(data)
def testCreateConv3D(self):
depth, height, width = 6, 7, 9
volumes = random_ops.random_uniform((5, depth, height, width, 4))
layer = conv_layers.Conv3D(32, [3, 3, 3], activation=nn_ops.relu)
output = layer.apply(volumes)
self.assertEqual(output.op.name, 'conv3d/Relu')
self.assertListEqual(output.get_shape().as_list(),
[5, depth - 2, height - 2, width - 2, 32])
self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 3, 4, 32])
self.assertListEqual(layer.bias.get_shape().as_list(), [32])
def testUnknownInputChannelsConv3D(self):
volumes = array_ops.placeholder(dtypes.float32, (5, 6, 7, 9, None))
layer = conv_layers.Conv3D(32, [3, 3, 3], activation=nn_ops.relu)
with self.assertRaisesRegexp(ValueError,
'The channel dimension of the inputs '
'should be defined. Found `None`.'):
_ = layer.apply(volumes)
def testConv2DKernelRegularizer(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4))
reg = lambda x: 0.1 * math_ops.reduce_sum(x)
layer = conv_layers.Conv2D(32, [3, 3], kernel_regularizer=reg)
layer.apply(images)
loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(loss_keys), 1)
self.assertListEqual(layer.losses, loss_keys)
def testConv2DBiasRegularizer(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4))
reg = lambda x: 0.1 * math_ops.reduce_sum(x)
layer = conv_layers.Conv2D(32, [3, 3], bias_regularizer=reg)
layer.apply(images)
loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(loss_keys), 1)
self.assertListEqual(layer.losses, loss_keys)
def testConv2DNoBias(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4))
layer = conv_layers.Conv2D(
32, [3, 3], activation=nn_ops.relu, use_bias=False)
output = layer.apply(images)
self.assertEqual(output.op.name, 'conv2d/Relu')
self.assertListEqual(output.get_shape().as_list(),
[5, height - 2, width - 2, 32])
self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 4, 32])
self.assertEqual(layer.bias, None)
def testDilatedConv2D(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4))
layer = conv_layers.Conv2D(32, [3, 3], dilation_rate=3)
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(), [5, 1, 3, 32])
self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 4, 32])
self.assertListEqual(layer.bias.get_shape().as_list(), [32])
# Test tuple dilation rate
layer = conv_layers.Conv2D(32, [3, 3], dilation_rate=(1, 3))
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(), [5, height - 2, 3, 32])
def testFunctionalConv2DReuse(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
conv_layers.conv2d(images, 32, [3, 3], name='conv1')
self.assertEqual(len(variables.trainable_variables()), 2)
conv_layers.conv2d(images, 32, [3, 3], name='conv1', reuse=True)
self.assertEqual(len(variables.trainable_variables()), 2)
def testFunctionalConv2DReuseFromScope(self):
with variable_scope.variable_scope('scope'):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
conv_layers.conv2d(images, 32, [3, 3], name='conv1')
self.assertEqual(len(variables.trainable_variables()), 2)
with variable_scope.variable_scope('scope', reuse=True):
conv_layers.conv2d(images, 32, [3, 3], name='conv1')
self.assertEqual(len(variables.trainable_variables()), 2)
def testFunctionalConv2DInitializerFromScope(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
'scope', initializer=init_ops.ones_initializer()):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
conv_layers.conv2d(images, 32, [3, 3], name='conv1')
weights = variables.trainable_variables()
# Check the names of weights in order.
self.assertTrue('kernel' in weights[0].name)
self.assertTrue('bias' in weights[1].name)
sess.run(variables.global_variables_initializer())
weights = sess.run(weights)
# Check that the kernel weights got initialized to ones (from scope)
self.assertAllClose(weights[0], np.ones((3, 3, 3, 32)))
# Check that the bias still got initialized to zeros.
self.assertAllClose(weights[1], np.zeros((32)))
def testFunctionalConv2DNoReuse(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
conv_layers.conv2d(images, 32, [3, 3])
self.assertEqual(len(variables.trainable_variables()), 2)
conv_layers.conv2d(images, 32, [3, 3])
self.assertEqual(len(variables.trainable_variables()), 4)
def testConstraints(self):
# Conv1D
k_constraint = lambda x: x / math_ops.reduce_sum(x)
b_constraint = lambda x: x / math_ops.reduce_max(x)
conv1d = conv_layers.Conv1D(2, 3,
kernel_constraint=k_constraint,
bias_constraint=b_constraint)
inputs = random_ops.random_uniform((5, 3, 5), seed=1)
conv1d(inputs)
self.assertEqual(conv1d.kernel_constraint, k_constraint)
self.assertEqual(conv1d.bias_constraint, b_constraint)
# Conv2D
k_constraint = lambda x: x / math_ops.reduce_sum(x)
b_constraint = lambda x: x / math_ops.reduce_max(x)
conv2d = conv_layers.Conv2D(2, 3,
kernel_constraint=k_constraint,
bias_constraint=b_constraint)
inputs = random_ops.random_uniform((5, 3, 3, 5), seed=1)
conv2d(inputs)
self.assertEqual(conv2d.kernel_constraint, k_constraint)
self.assertEqual(conv2d.bias_constraint, b_constraint)
# Conv3D
k_constraint = lambda x: x / math_ops.reduce_sum(x)
b_constraint = lambda x: x / math_ops.reduce_max(x)
conv3d = conv_layers.Conv3D(2, 3,
kernel_constraint=k_constraint,
bias_constraint=b_constraint)
inputs = random_ops.random_uniform((5, 3, 3, 3, 5), seed=1)
conv3d(inputs)
self.assertEqual(conv3d.kernel_constraint, k_constraint)
self.assertEqual(conv3d.bias_constraint, b_constraint)
def testConv3DChannelsFirst(self):
# Test case for GitHub issue 15655
images = array_ops.placeholder(
dtype=dtypes.float32, shape=[None, 1, 32, 32, 32])
conv_layers.conv3d(images, 32, 9, data_format='channels_first')
@test_util.with_c_api
class SeparableConv1DTest(test.TestCase):
def testInvalidDataFormat(self):
length = 9
data = random_ops.random_uniform((5, length, 3), seed=1)
with self.assertRaisesRegexp(ValueError, 'data_format'):
conv_layers.separable_conv1d(data, 32, 3, data_format='invalid')
def testInvalidStrides(self):
length = 9
data = random_ops.random_uniform((5, length, 3), seed=1)
with self.assertRaisesRegexp(ValueError, 'strides'):
conv_layers.separable_conv1d(data, 32, 3, strides=(1, 2))
with self.assertRaisesRegexp(ValueError, 'strides'):
conv_layers.separable_conv1d(data, 32, 3, strides=None)
def testInvalidKernelSize(self):
length = 9
data = random_ops.random_uniform((5, length, 3), seed=1)
with self.assertRaisesRegexp(ValueError, 'kernel_size'):
conv_layers.separable_conv1d(data, 32, (1, 2))
with self.assertRaisesRegexp(ValueError, 'kernel_size'):
conv_layers.separable_conv1d(data, 32, None)
def testCreateSeparableConv1D(self):
length = 9
data = random_ops.random_uniform((5, length, 4))
layer = conv_layers.SeparableConv1D(32, 3, activation=nn_ops.relu)
output = layer.apply(data)
self.assertEqual(output.op.name, 'separable_conv1d/Relu')
self.assertEqual(output.get_shape().as_list(), [5, length - 2, 32])
self.assertEqual(layer.depthwise_kernel.get_shape().as_list(), [3, 4, 1])
self.assertEqual(layer.pointwise_kernel.get_shape().as_list(), [1, 4, 32])
self.assertEqual(layer.bias.get_shape().as_list(), [32])
def testCreateSeparableConv1DDepthMultiplier(self):
length = 9
data = random_ops.random_uniform((5, length, 4))
layer = conv_layers.SeparableConv1D(32, 3, depth_multiplier=2)
output = layer.apply(data)
self.assertEqual(output.get_shape().as_list(), [5, length - 2, 32])
self.assertEqual(layer.depthwise_kernel.get_shape().as_list(), [3, 4, 2])
self.assertEqual(layer.pointwise_kernel.get_shape().as_list(), [1, 8, 32])
self.assertEqual(layer.bias.get_shape().as_list(), [32])
def testCreateSeparableConv1DChannelsFirst(self):
length = 9
data = random_ops.random_uniform((5, 4, length))
layer = conv_layers.SeparableConv1D(32, 3, data_format='channels_first')
output = layer.apply(data)
self.assertEqual(output.get_shape().as_list(), [5, 32, length - 2])
self.assertEqual(layer.depthwise_kernel.get_shape().as_list(), [3, 4, 1])
self.assertEqual(layer.pointwise_kernel.get_shape().as_list(), [1, 4, 32])
self.assertEqual(layer.bias.get_shape().as_list(), [32])
def testSeparableConv1DPaddingSame(self):
length = 9
data = random_ops.random_uniform((5, length, 32), seed=1)
layer = conv_layers.SeparableConv1D(
64, length, padding='same')
output = layer.apply(data)
self.assertEqual(output.get_shape().as_list(), [5, length, 64])
def testCreateSeparableConv1DWithStrides(self):
length = 10
data = random_ops.random_uniform((5, length, 3), seed=1)
layer = conv_layers.SeparableConv1D(32, 3, strides=2, padding='same')
output = layer.apply(data)
self.assertEqual(output.get_shape().as_list(), [5, length // 2, 32])
def testCreateSeparableConv1DWithStridesChannelsFirst(self):
data_format = 'channels_first'
length = 10
data = random_ops.random_uniform((5, 3, length), seed=1)
layer = conv_layers.SeparableConv1D(
32, 3, strides=2, padding='same', data_format=data_format)
output = layer.apply(data)
self.assertEqual(output.get_shape().as_list(), [5, 32, length // 2])
def testFunctionalConv1DReuse(self):
length = 10
data = random_ops.random_uniform((5, length, 3), seed=1)
conv_layers.separable_conv1d(data, 32, 3, name='sepconv1')
self.assertEqual(len(variables.trainable_variables()), 3)
conv_layers.separable_conv1d(data, 32, 3, name='sepconv1', reuse=True)
self.assertEqual(len(variables.trainable_variables()), 3)
def testFunctionalConv1DReuseFromScope(self):
with variable_scope.variable_scope('scope'):
length = 10
data = random_ops.random_uniform((5, length, 3), seed=1)
conv_layers.separable_conv1d(data, 32, 3, name='sepconv1')
self.assertEqual(len(variables.trainable_variables()), 3)
with variable_scope.variable_scope('scope', reuse=True):
conv_layers.separable_conv1d(data, 32, 3, name='sepconv1')
self.assertEqual(len(variables.trainable_variables()), 3)
def testFunctionalConv1DNoReuse(self):
length = 10
data = random_ops.random_uniform((5, length, 3), seed=1)
conv_layers.separable_conv1d(data, 32, 3)
self.assertEqual(len(variables.trainable_variables()), 3)
conv_layers.separable_conv1d(data, 32, 3)
self.assertEqual(len(variables.trainable_variables()), 6)
def testSeparableConv1DDepthwiseRegularizer(self):
length = 9
data = random_ops.random_uniform((5, length, 4))
reg = lambda x: 0.1 * math_ops.reduce_sum(x)
layer = conv_layers.SeparableConv1D(32, 3, depthwise_regularizer=reg)
layer.apply(data)
loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(loss_keys), 1)
self.assertEqual(layer.losses, loss_keys)
def testSeparableConv1DPointwiseRegularizer(self):
length = 9
data = random_ops.random_uniform((5, length, 4))
reg = lambda x: 0.1 * math_ops.reduce_sum(x)
layer = conv_layers.SeparableConv1D(32, 3, pointwise_regularizer=reg)
layer.apply(data)
loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(loss_keys), 1)
self.assertEqual(layer.losses, loss_keys)
def testSeparableConv1DBiasRegularizer(self):
length = 9
data = random_ops.random_uniform((5, length, 4))
reg = lambda x: 0.1 * math_ops.reduce_sum(x)
layer = conv_layers.SeparableConv1D(32, 3, bias_regularizer=reg)
layer.apply(data)
loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(loss_keys), 1)
self.assertEqual(layer.losses, loss_keys)
def testSeparableConv1DNoBias(self):
length = 9
data = random_ops.random_uniform((5, length, 4))
layer = conv_layers.SeparableConv1D(
32, 3, activation=nn_ops.relu, use_bias=False)
output = layer.apply(data)
self.assertEqual(output.op.name, 'separable_conv1d/Relu')
self.assertEqual(layer.bias, None)
def testConstraints(self):
d_constraint = lambda x: x / math_ops.reduce_sum(x)
p_constraint = lambda x: x / math_ops.reduce_sum(x)
b_constraint = lambda x: x / math_ops.reduce_max(x)
layer = conv_layers.SeparableConv1D(2, 3,
depthwise_constraint=d_constraint,
pointwise_constraint=p_constraint,
bias_constraint=b_constraint)
inputs = random_ops.random_uniform((5, 3, 5), seed=1)
layer(inputs)
self.assertEqual(layer.depthwise_constraint, d_constraint)
self.assertEqual(layer.pointwise_constraint, p_constraint)
self.assertEqual(layer.bias_constraint, b_constraint)
@test_util.with_c_api
class SeparableConv2DTest(test.TestCase):
def testInvalidDataFormat(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
with self.assertRaisesRegexp(ValueError, 'data_format'):
conv_layers.separable_conv2d(images, 32, 3, data_format='invalid')
def testInvalidStrides(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
with self.assertRaisesRegexp(ValueError, 'strides'):
conv_layers.separable_conv2d(images, 32, 3, strides=(1, 2, 3))
with self.assertRaisesRegexp(ValueError, 'strides'):
conv_layers.separable_conv2d(images, 32, 3, strides=None)
def testInvalidKernelSize(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
with self.assertRaisesRegexp(ValueError, 'kernel_size'):
conv_layers.separable_conv2d(images, 32, (1, 2, 3))
with self.assertRaisesRegexp(ValueError, 'kernel_size'):
conv_layers.separable_conv2d(images, 32, None)
def testCreateSeparableConv2D(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4))
layer = conv_layers.SeparableConv2D(32, [3, 3], activation=nn_ops.relu)
output = layer.apply(images)
self.assertEqual(output.op.name, 'separable_conv2d/Relu')
self.assertListEqual(output.get_shape().as_list(),
[5, height - 2, width - 2, 32])
self.assertListEqual(layer.depthwise_kernel.get_shape().as_list(),
[3, 3, 4, 1])
self.assertListEqual(layer.pointwise_kernel.get_shape().as_list(),
[1, 1, 4, 32])
self.assertListEqual(layer.bias.get_shape().as_list(), [32])
def testCreateSeparableConv2DDepthMultiplier(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4))
layer = conv_layers.SeparableConv2D(32, [3, 3], depth_multiplier=2)
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, height - 2, width - 2, 32])
self.assertListEqual(layer.depthwise_kernel.get_shape().as_list(),
[3, 3, 4, 2])
self.assertListEqual(layer.pointwise_kernel.get_shape().as_list(),
[1, 1, 8, 32])
self.assertListEqual(layer.bias.get_shape().as_list(), [32])
def testCreateSeparableConv2DIntegerKernelSize(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4))
layer = conv_layers.SeparableConv2D(32, 3)
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, height - 2, width - 2, 32])
self.assertListEqual(layer.depthwise_kernel.get_shape().as_list(),
[3, 3, 4, 1])
self.assertListEqual(layer.pointwise_kernel.get_shape().as_list(),
[1, 1, 4, 32])
self.assertListEqual(layer.bias.get_shape().as_list(), [32])
def testCreateSeparableConv2DChannelsFirst(self):
height, width = 7, 9
images = random_ops.random_uniform((5, 4, height, width))
layer = conv_layers.SeparableConv2D(
32, [3, 3], data_format='channels_first')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, 32, height - 2, width - 2])
self.assertListEqual(layer.depthwise_kernel.get_shape().as_list(),
[3, 3, 4, 1])
self.assertListEqual(layer.pointwise_kernel.get_shape().as_list(),
[1, 1, 4, 32])
self.assertListEqual(layer.bias.get_shape().as_list(), [32])
def testSeparableConv2DPaddingSame(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 32), seed=1)
layer = conv_layers.SeparableConv2D(
64, images.get_shape()[1:3], padding='same')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 64])
def testCreateSeparableConvWithStrides(self):
height, width = 6, 8
# Test strides tuple
images = random_ops.random_uniform((5, height, width, 3), seed=1)
layer = conv_layers.SeparableConv2D(
32, [3, 3], strides=(2, 2), padding='same')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, height / 2, width / 2, 32])
# Test strides integer
layer = conv_layers.SeparableConv2D(32, [3, 3], strides=2, padding='same')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, height / 2, width / 2, 32])
# Test unequal strides
layer = conv_layers.SeparableConv2D(
32, [3, 3], strides=(2, 1), padding='same')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, height / 2, width, 32])
def testCreateSeparableConvWithStridesChannelsFirst(self):
data_format = 'channels_first'
height, width = 6, 8
# Test strides tuple
images = random_ops.random_uniform((5, 3, height, width), seed=1)
layer = conv_layers.SeparableConv2D(
32, [3, 3], strides=(2, 2), padding='same', data_format=data_format)
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, 32, height / 2, width / 2])
# Test strides integer
layer = conv_layers.SeparableConv2D(32, [3, 3], strides=2, padding='same',
data_format=data_format)
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, 32, height / 2, width / 2])
# Test unequal strides
layer = conv_layers.SeparableConv2D(
32, [3, 3], strides=(2, 1), padding='same', data_format=data_format)
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, 32, height / 2, width])
def testFunctionalConv2DReuse(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
conv_layers.separable_conv2d(images, 32, [3, 3], name='sepconv1')
self.assertEqual(len(variables.trainable_variables()), 3)
conv_layers.separable_conv2d(
images, 32, [3, 3], name='sepconv1', reuse=True)
self.assertEqual(len(variables.trainable_variables()), 3)
def testFunctionalConv2DReuseFromScope(self):
with variable_scope.variable_scope('scope'):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
conv_layers.separable_conv2d(images, 32, [3, 3], name='sepconv1')
self.assertEqual(len(variables.trainable_variables()), 3)
with variable_scope.variable_scope('scope', reuse=True):
conv_layers.separable_conv2d(images, 32, [3, 3], name='sepconv1')
self.assertEqual(len(variables.trainable_variables()), 3)
def testFunctionalConv2DInitializerFromScope(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
'scope', initializer=init_ops.ones_initializer()):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
conv_layers.separable_conv2d(images, 32, [3, 3], name='sepconv1')
weights = variables.trainable_variables()
# Check the names of weights in order.
self.assertTrue('depthwise_kernel' in weights[0].name)
self.assertTrue('pointwise_kernel' in weights[1].name)
self.assertTrue('bias' in weights[2].name)
sess.run(variables.global_variables_initializer())
weights = sess.run(weights)
# Check that the kernel weights got initialized to ones (from scope)
self.assertAllClose(weights[0], np.ones((3, 3, 3, 1)))
self.assertAllClose(weights[1], np.ones((1, 1, 3, 32)))
# Check that the bias still got initialized to zeros.
self.assertAllClose(weights[2], np.zeros((32)))
def testFunctionalConv2DNoReuse(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
conv_layers.separable_conv2d(images, 32, [3, 3])
self.assertEqual(len(variables.trainable_variables()), 3)
conv_layers.separable_conv2d(images, 32, [3, 3])
self.assertEqual(len(variables.trainable_variables()), 6)
def testSeparableConv2DDepthwiseRegularizer(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4))
reg = lambda x: 0.1 * math_ops.reduce_sum(x)
layer = conv_layers.SeparableConv2D(32, [3, 3], depthwise_regularizer=reg)
layer.apply(images)
loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(loss_keys), 1)
self.assertListEqual(layer.losses, loss_keys)
def testSeparableConv2DPointwiseRegularizer(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4))
reg = lambda x: 0.1 * math_ops.reduce_sum(x)
layer = conv_layers.SeparableConv2D(32, [3, 3], pointwise_regularizer=reg)
layer.apply(images)
loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(loss_keys), 1)
self.assertListEqual(layer.losses, loss_keys)
def testSeparableConv2DBiasRegularizer(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4))
reg = lambda x: 0.1 * math_ops.reduce_sum(x)
layer = conv_layers.SeparableConv2D(32, [3, 3], bias_regularizer=reg)
layer.apply(images)
loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(loss_keys), 1)
self.assertListEqual(layer.losses, loss_keys)
def testSeparableConv2DNoBias(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4))
layer = conv_layers.SeparableConv2D(
32, [3, 3], activation=nn_ops.relu, use_bias=False)
output = layer.apply(images)
self.assertEqual(output.op.name, 'separable_conv2d/Relu')
self.assertListEqual(output.get_shape().as_list(),
[5, height - 2, width - 2, 32])
self.assertListEqual(layer.depthwise_kernel.get_shape().as_list(),
[3, 3, 4, 1])
self.assertListEqual(layer.pointwise_kernel.get_shape().as_list(),
[1, 1, 4, 32])
self.assertEqual(layer.bias, None)
def testConstraints(self):
d_constraint = lambda x: x / math_ops.reduce_sum(x)
p_constraint = lambda x: x / math_ops.reduce_sum(x)
b_constraint = lambda x: x / math_ops.reduce_max(x)
layer = conv_layers.SeparableConv2D(2, 3,
depthwise_constraint=d_constraint,
pointwise_constraint=p_constraint,
bias_constraint=b_constraint)
inputs = random_ops.random_uniform((5, 3, 3, 5), seed=1)
layer(inputs)
self.assertEqual(layer.depthwise_constraint, d_constraint)
self.assertEqual(layer.pointwise_constraint, p_constraint)
self.assertEqual(layer.bias_constraint, b_constraint)
@test_util.with_c_api
class Conv2DTransposeTest(test.TestCase):
def testInvalidDataFormat(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
with self.assertRaisesRegexp(ValueError, 'data_format'):
conv_layers.conv2d_transpose(images, 32, 3, data_format='invalid')
def testInvalidStrides(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
with self.assertRaisesRegexp(ValueError, 'strides'):
conv_layers.conv2d_transpose(images, 32, 3, strides=(1, 2, 3))
with self.assertRaisesRegexp(ValueError, 'strides'):
conv_layers.conv2d_transpose(images, 32, 3, strides=None)
def testInvalidKernelSize(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
with self.assertRaisesRegexp(ValueError, 'kernel_size'):
conv_layers.conv2d_transpose(images, 32, (1, 2, 3))
with self.assertRaisesRegexp(ValueError, 'kernel_size'):
conv_layers.conv2d_transpose(images, 32, None)
def testCreateConv2DTranspose(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4))
layer = conv_layers.Conv2DTranspose(32, [3, 3], activation=nn_ops.relu)
output = layer.apply(images)
self.assertEqual(output.op.name, 'conv2d_transpose/Relu')
self.assertListEqual(output.get_shape().as_list(),
[5, height + 2, width + 2, 32])
self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 32, 4])
self.assertListEqual(layer.bias.get_shape().as_list(), [32])
def testConv2DTransposeFloat16(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4), dtype='float16')
output = conv_layers.conv2d_transpose(images, 32, [3, 3],
activation=nn_ops.relu)
self.assertListEqual(output.get_shape().as_list(),
[5, height + 2, width + 2, 32])
def testCreateConv2DTransposeIntegerKernelSize(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4))
layer = conv_layers.Conv2DTranspose(32, 3)
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, height + 2, width + 2, 32])
self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 32, 4])
self.assertListEqual(layer.bias.get_shape().as_list(), [32])
def testCreateConv2DTransposeChannelsFirst(self):
height, width = 7, 9
images = random_ops.random_uniform((5, 4, height, width))
layer = conv_layers.Conv2DTranspose(
32, [3, 3], data_format='channels_first')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, 32, height + 2, width + 2])
self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 32, 4])
self.assertListEqual(layer.bias.get_shape().as_list(), [32])
def testConv2DTransposePaddingSame(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 32), seed=1)
layer = conv_layers.Conv2DTranspose(
64, images.get_shape()[1:3], padding='same')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 64])
def testCreateConv2DTransposeWithStrides(self):
height, width = 6, 8
# Test strides tuple
images = random_ops.random_uniform((5, height, width, 3), seed=1)
layer = conv_layers.Conv2DTranspose(
32, [3, 3], strides=(2, 2), padding='same')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, height * 2, width * 2, 32])
# Test strides integer
layer = conv_layers.Conv2DTranspose(32, [3, 3], strides=2, padding='same')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, height * 2, width * 2, 32])
# Test unequal strides
layer = conv_layers.Conv2DTranspose(
32, [3, 3], strides=(2, 1), padding='same')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, height * 2, width, 32])
def testConv2DTransposeKernelRegularizer(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4))
reg = lambda x: 0.1 * math_ops.reduce_sum(x)
layer = conv_layers.Conv2DTranspose(32, [3, 3], kernel_regularizer=reg)
layer.apply(images)
loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(loss_keys), 1)
self.assertListEqual(layer.losses, loss_keys)
def testConv2DTransposeBiasRegularizer(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4))
reg = lambda x: 0.1 * math_ops.reduce_sum(x)
layer = conv_layers.Conv2DTranspose(32, [3, 3], bias_regularizer=reg)
layer.apply(images)
loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(loss_keys), 1)
self.assertListEqual(layer.losses, loss_keys)
def testConv2DTransposeNoBias(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4))
layer = conv_layers.Conv2DTranspose(
32, [3, 3], activation=nn_ops.relu, use_bias=False)
output = layer.apply(images)
self.assertEqual(output.op.name, 'conv2d_transpose/Relu')
self.assertListEqual(output.get_shape().as_list(),
[5, height + 2, width + 2, 32])
self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 32, 4])
self.assertEqual(layer.bias, None)
def testFunctionalConv2DTransposeReuse(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
conv_layers.conv2d_transpose(images, 32, [3, 3], name='deconv1')
self.assertEqual(len(variables.trainable_variables()), 2)
conv_layers.conv2d_transpose(images, 32, [3, 3], name='deconv1', reuse=True)
self.assertEqual(len(variables.trainable_variables()), 2)
def testFunctionalConv2DTransposeReuseFromScope(self):
with variable_scope.variable_scope('scope'):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
conv_layers.conv2d_transpose(images, 32, [3, 3], name='deconv1')
self.assertEqual(len(variables.trainable_variables()), 2)
with variable_scope.variable_scope('scope', reuse=True):
conv_layers.conv2d_transpose(images, 32, [3, 3], name='deconv1')
self.assertEqual(len(variables.trainable_variables()), 2)
def testFunctionalConv2DTransposeInitializerFromScope(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
'scope', initializer=init_ops.ones_initializer()):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
conv_layers.conv2d_transpose(images, 32, [3, 3], name='deconv1')
weights = variables.trainable_variables()
# Check the names of weights in order.
self.assertTrue('kernel' in weights[0].name)
self.assertTrue('bias' in weights[1].name)
sess.run(variables.global_variables_initializer())
weights = sess.run(weights)
# Check that the kernel weights got initialized to ones (from scope)
self.assertAllClose(weights[0], np.ones((3, 3, 32, 3)))
# Check that the bias still got initialized to zeros.
self.assertAllClose(weights[1], np.zeros((32)))
def testFunctionalConv2DTransposeNoReuse(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
conv_layers.conv2d_transpose(images, 32, [3, 3])
self.assertEqual(len(variables.trainable_variables()), 2)
conv_layers.conv2d_transpose(images, 32, [3, 3])
self.assertEqual(len(variables.trainable_variables()), 4)
def testConstraints(self):
k_constraint = lambda x: x / math_ops.reduce_sum(x)
b_constraint = lambda x: x / math_ops.reduce_max(x)
layer = conv_layers.Conv2DTranspose(2, 3,
kernel_constraint=k_constraint,
bias_constraint=b_constraint)
inputs = random_ops.random_uniform((5, 3, 3, 5), seed=1)
layer(inputs)
self.assertEqual(layer.kernel_constraint, k_constraint)
self.assertEqual(layer.bias_constraint, b_constraint)
@test_util.with_c_api
class Conv3DTransposeTest(test.TestCase):
def testInvalidDataFormat(self):
depth, height, width = 5, 7, 9
volumes = random_ops.random_uniform((5, depth, height, width, 32), seed=1)
with self.assertRaisesRegexp(ValueError, 'data_format'):
conv_layers.conv3d_transpose(volumes, 4, 3, data_format='invalid')
def testInvalidStrides(self):
depth, height, width = 5, 7, 9
volumes = random_ops.random_uniform((5, depth, height, width, 32), seed=1)
with self.assertRaisesRegexp(ValueError, 'strides'):
conv_layers.conv3d_transpose(volumes, 4, 3, strides=(1, 2))
with self.assertRaisesRegexp(ValueError, 'strides'):
conv_layers.conv3d_transpose(volumes, 4, 3, strides=None)
def testInvalidKernelSize(self):
depth, height, width = 5, 7, 9
volumes = random_ops.random_uniform((5, depth, height, width, 32), seed=1)
with self.assertRaisesRegexp(ValueError, 'kernel_size'):
conv_layers.conv3d_transpose(volumes, 4, (1, 2))
with self.assertRaisesRegexp(ValueError, 'kernel_size'):
conv_layers.conv3d_transpose(volumes, 4, None)
def testCreateConv3DTranspose(self):
depth, height, width = 5, 7, 9
volumes = random_ops.random_uniform((5, depth, height, width, 32))
layer = conv_layers.Conv3DTranspose(4, [3, 3, 3], activation=nn_ops.relu)
output = layer.apply(volumes)
self.assertEqual(output.op.name, 'conv3d_transpose/Relu')
self.assertListEqual(output.get_shape().as_list(),
[5, depth + 2, height + 2, width + 2, 4])
self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 3, 4, 32])
self.assertListEqual(layer.bias.get_shape().as_list(), [4])
def testCreateConv3DTransposeIntegerKernelSize(self):
depth, height, width = 5, 7, 9
volumes = random_ops.random_uniform((5, depth, height, width, 32))
layer = conv_layers.Conv3DTranspose(4, 3)
output = layer.apply(volumes)
self.assertListEqual(output.get_shape().as_list(),
[5, depth + 2, height + 2, width + 2, 4])
self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 3, 4, 32])
self.assertListEqual(layer.bias.get_shape().as_list(), [4])
def testCreateConv3DTransposeChannelsFirst(self):
depth, height, width = 5, 7, 9
volumes = random_ops.random_uniform((5, 32, depth, height, width))
layer = conv_layers.Conv3DTranspose(
4, [3, 3, 3], data_format='channels_first')
output = layer.apply(volumes)
self.assertListEqual(output.get_shape().as_list(),
[5, 4, depth + 2, height + 2, width + 2])
self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 3, 4, 32])
self.assertListEqual(layer.bias.get_shape().as_list(), [4])
def testConv3DTransposePaddingSame(self):
depth, height, width = 5, 7, 9
volumes = random_ops.random_uniform((5, depth, height, width, 64), seed=1)
layer = conv_layers.Conv3DTranspose(
32, volumes.get_shape()[1:4], padding='same')
output = layer.apply(volumes)
self.assertListEqual(output.get_shape().as_list(),
[5, depth, height, width, 32])
def testCreateConv3DTransposeWithStrides(self):
depth, height, width = 4, 6, 8
# Test strides tuple.
volumes = random_ops.random_uniform((5, depth, height, width, 32), seed=1)
layer = conv_layers.Conv3DTranspose(
4, [3, 3, 3], strides=(2, 2, 2), padding='same')
output = layer.apply(volumes)
self.assertListEqual(output.get_shape().as_list(),
[5, depth * 2, height * 2, width * 2, 4])
# Test strides integer.
layer = conv_layers.Conv3DTranspose(4, [3, 3, 3], strides=2, padding='same')
output = layer.apply(volumes)
self.assertListEqual(output.get_shape().as_list(),
[5, depth * 2, height * 2, width * 2, 4])
# Test unequal strides.
layer = conv_layers.Conv3DTranspose(
4, [3, 3, 3], strides=(2, 1, 1), padding='same')
output = layer.apply(volumes)
self.assertListEqual(output.get_shape().as_list(),
[5, depth * 2, height, width, 4])
def testConv3DTransposeKernelRegularizer(self):
depth, height, width = 5, 7, 9
volumes = random_ops.random_uniform((5, depth, height, width, 32))
reg = lambda x: 0.1 * math_ops.reduce_sum(x)
layer = conv_layers.Conv3DTranspose(4, [3, 3, 3], kernel_regularizer=reg)
layer.apply(volumes)
loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(loss_keys), 1)
self.assertListEqual(layer.losses, loss_keys)
def testConv3DTransposeBiasRegularizer(self):
depth, height, width = 5, 7, 9
volumes = random_ops.random_uniform((5, depth, height, width, 32))
reg = lambda x: 0.1 * math_ops.reduce_sum(x)
layer = conv_layers.Conv3DTranspose(4, [3, 3, 3], bias_regularizer=reg)
layer.apply(volumes)
loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(loss_keys), 1)
self.assertListEqual(layer.losses, loss_keys)
def testConv3DTransposeNoBias(self):
depth, height, width = 5, 7, 9
volumes = random_ops.random_uniform((5, depth, height, width, 32))
layer = conv_layers.Conv3DTranspose(
4, [3, 3, 3], activation=nn_ops.relu, use_bias=False)
output = layer.apply(volumes)
self.assertEqual(output.op.name, 'conv3d_transpose/Relu')
self.assertListEqual(output.get_shape().as_list(),
[5, depth + 2, height + 2, width + 2, 4])
self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 3, 4, 32])
self.assertEqual(layer.bias, None)
def testFunctionalConv3DTransposeReuse(self):
depth, height, width = 5, 7, 9
volumes = random_ops.random_uniform((5, depth, height, width, 32), seed=1)
conv_layers.conv3d_transpose(volumes, 4, [3, 3, 3], name='deconv1')
self.assertEqual(len(variables.trainable_variables()), 2)
conv_layers.conv3d_transpose(
volumes, 4, [3, 3, 3], name='deconv1', reuse=True)
self.assertEqual(len(variables.trainable_variables()), 2)
def testFunctionalConv3DTransposeReuseFromScope(self):
with variable_scope.variable_scope('scope'):
depth, height, width = 5, 7, 9
volumes = random_ops.random_uniform((5, depth, height, width, 32), seed=1)
conv_layers.conv3d_transpose(volumes, 4, [3, 3, 3], name='deconv1')
self.assertEqual(len(variables.trainable_variables()), 2)
with variable_scope.variable_scope('scope', reuse=True):
conv_layers.conv3d_transpose(volumes, 4, [3, 3, 3], name='deconv1')
self.assertEqual(len(variables.trainable_variables()), 2)
def testFunctionalConv3DTransposeInitializerFromScope(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
'scope', initializer=init_ops.ones_initializer()):
depth, height, width = 5, 7, 9
volumes = random_ops.random_uniform(
(5, depth, height, width, 32), seed=1)
conv_layers.conv3d_transpose(volumes, 4, [3, 3, 3], name='deconv1')
weights = variables.trainable_variables()
# Check the names of weights in order.
self.assertTrue('kernel' in weights[0].name)
self.assertTrue('bias' in weights[1].name)
sess.run(variables.global_variables_initializer())
weights = sess.run(weights)
# Check that the kernel weights got initialized to ones (from scope)
self.assertAllClose(weights[0], np.ones((3, 3, 3, 4, 32)))
# Check that the bias still got initialized to zeros.
self.assertAllClose(weights[1], np.zeros((4)))
def testFunctionalConv3DTransposeNoReuse(self):
depth, height, width = 5, 7, 9
volumes = random_ops.random_uniform((5, depth, height, width, 32), seed=1)
conv_layers.conv3d_transpose(volumes, 4, [3, 3, 3])
self.assertEqual(len(variables.trainable_variables()), 2)
conv_layers.conv3d_transpose(volumes, 4, [3, 3, 3])
self.assertEqual(len(variables.trainable_variables()), 4)
def testConstraints(self):
k_constraint = lambda x: x / math_ops.reduce_sum(x)
b_constraint = lambda x: x / math_ops.reduce_max(x)
layer = conv_layers.Conv3DTranspose(2, 3,
kernel_constraint=k_constraint,
bias_constraint=b_constraint)
inputs = random_ops.random_uniform((5, 3, 3, 3, 5), seed=1)
layer(inputs)
self.assertEqual(layer.kernel_constraint, k_constraint)
self.assertEqual(layer.bias_constraint, b_constraint)
if __name__ == '__main__':
test.main()
| allenlavoie/tensorflow | tensorflow/python/layers/convolutional_test.py | Python | apache-2.0 | 50,372 |
"""Helpers for config validation using voluptuous."""
import inspect
import logging
import os
import re
from datetime import (timedelta, datetime as datetime_sys,
time as time_sys, date as date_sys)
from socket import _GLOBAL_DEFAULT_TIMEOUT
from typing import Any, Union, TypeVar, Callable, Sequence, Dict, Optional
from urllib.parse import urlparse
import voluptuous as vol
from pkg_resources import parse_version
import homeassistant.util.dt as dt_util
from homeassistant.const import (
CONF_PLATFORM, CONF_SCAN_INTERVAL, TEMP_CELSIUS, TEMP_FAHRENHEIT,
CONF_ALIAS, CONF_ENTITY_ID, CONF_VALUE_TEMPLATE, WEEKDAYS,
CONF_CONDITION, CONF_BELOW, CONF_ABOVE, CONF_TIMEOUT, SUN_EVENT_SUNSET,
SUN_EVENT_SUNRISE, CONF_UNIT_SYSTEM_IMPERIAL, CONF_UNIT_SYSTEM_METRIC,
ENTITY_MATCH_ALL, CONF_ENTITY_NAMESPACE, __version__)
from homeassistant.core import valid_entity_id, split_entity_id
from homeassistant.exceptions import TemplateError
from homeassistant.helpers import template as template_helper
from homeassistant.helpers.logging import KeywordStyleAdapter
from homeassistant.util import slugify as util_slugify
# pylint: disable=invalid-name
TIME_PERIOD_ERROR = "offset {} should be format 'HH:MM' or 'HH:MM:SS'"
OLD_SLUG_VALIDATION = r'^[a-z0-9_]+$'
OLD_ENTITY_ID_VALIDATION = r"^(\w+)\.(\w+)$"
# Keep track of invalid slugs and entity ids found so we can create a
# persistent notification. Rare temporary exception to use a global.
INVALID_SLUGS_FOUND = {}
INVALID_ENTITY_IDS_FOUND = {}
INVALID_EXTRA_KEYS_FOUND = []
# Home Assistant types
byte = vol.All(vol.Coerce(int), vol.Range(min=0, max=255))
small_float = vol.All(vol.Coerce(float), vol.Range(min=0, max=1))
positive_int = vol.All(vol.Coerce(int), vol.Range(min=0))
latitude = vol.All(vol.Coerce(float), vol.Range(min=-90, max=90),
msg='invalid latitude')
longitude = vol.All(vol.Coerce(float), vol.Range(min=-180, max=180),
msg='invalid longitude')
gps = vol.ExactSequence([latitude, longitude])
sun_event = vol.All(vol.Lower, vol.Any(SUN_EVENT_SUNSET, SUN_EVENT_SUNRISE))
port = vol.All(vol.Coerce(int), vol.Range(min=1, max=65535))
# typing typevar
T = TypeVar('T')
# Adapted from:
# https://github.com/alecthomas/voluptuous/issues/115#issuecomment-144464666
def has_at_least_one_key(*keys: str) -> Callable:
"""Validate that at least one key exists."""
def validate(obj: Dict) -> Dict:
"""Test keys exist in dict."""
if not isinstance(obj, dict):
raise vol.Invalid('expected dictionary')
for k in obj.keys():
if k in keys:
return obj
raise vol.Invalid('must contain one of {}.'.format(', '.join(keys)))
return validate
def has_at_most_one_key(*keys: str) -> Callable:
"""Validate that zero keys exist or one key exists."""
def validate(obj: Dict) -> Dict:
"""Test zero keys exist or one key exists in dict."""
if not isinstance(obj, dict):
raise vol.Invalid('expected dictionary')
if len(set(keys) & set(obj)) > 1:
raise vol.Invalid(
'must contain at most one of {}.'.format(', '.join(keys))
)
return obj
return validate
def boolean(value: Any) -> bool:
"""Validate and coerce a boolean value."""
if isinstance(value, str):
value = value.lower()
if value in ('1', 'true', 'yes', 'on', 'enable'):
return True
if value in ('0', 'false', 'no', 'off', 'disable'):
return False
raise vol.Invalid('invalid boolean value {}'.format(value))
return bool(value)
def isdevice(value):
"""Validate that value is a real device."""
try:
os.stat(value)
return str(value)
except OSError:
raise vol.Invalid('No device at {} found'.format(value))
def matches_regex(regex):
"""Validate that the value is a string that matches a regex."""
regex = re.compile(regex)
def validator(value: Any) -> str:
"""Validate that value matches the given regex."""
if not isinstance(value, str):
raise vol.Invalid('not a string value: {}'.format(value))
if not regex.match(value):
raise vol.Invalid('value {} does not match regular expression {}'
.format(value, regex.pattern))
return value
return validator
def is_regex(value):
"""Validate that a string is a valid regular expression."""
try:
r = re.compile(value)
return r
except TypeError:
raise vol.Invalid("value {} is of the wrong type for a regular "
"expression".format(value))
except re.error:
raise vol.Invalid("value {} is not a valid regular expression".format(
value))
def isfile(value: Any) -> str:
"""Validate that the value is an existing file."""
if value is None:
raise vol.Invalid('None is not file')
file_in = os.path.expanduser(str(value))
if not os.path.isfile(file_in):
raise vol.Invalid('not a file')
if not os.access(file_in, os.R_OK):
raise vol.Invalid('file not readable')
return file_in
def isdir(value: Any) -> str:
"""Validate that the value is an existing dir."""
if value is None:
raise vol.Invalid('not a directory')
dir_in = os.path.expanduser(str(value))
if not os.path.isdir(dir_in):
raise vol.Invalid('not a directory')
if not os.access(dir_in, os.R_OK):
raise vol.Invalid('directory not readable')
return dir_in
def ensure_list(value: Union[T, Sequence[T]]) -> Sequence[T]:
"""Wrap value in list if it is not one."""
if value is None:
return []
return value if isinstance(value, list) else [value]
def entity_id(value: Any) -> str:
"""Validate Entity ID."""
value = string(value).lower()
if valid_entity_id(value):
return value
if re.match(OLD_ENTITY_ID_VALIDATION, value):
# To ease the breaking change, we allow old slugs for now
# Remove after 0.94 or 1.0
fixed = '.'.join(util_slugify(part) for part in value.split('.', 1))
INVALID_ENTITY_IDS_FOUND[value] = fixed
logging.getLogger(__name__).warning(
"Found invalid entity_id %s, please update with %s. This "
"will become a breaking change.",
value, fixed
)
return value
raise vol.Invalid('Entity ID {} is an invalid entity id'.format(value))
def entity_ids(value: Union[str, Sequence]) -> Sequence[str]:
"""Validate Entity IDs."""
if value is None:
raise vol.Invalid('Entity IDs can not be None')
if isinstance(value, str):
value = [ent_id.strip() for ent_id in value.split(',')]
return [entity_id(ent_id) for ent_id in value]
comp_entity_ids = vol.Any(
vol.All(vol.Lower, ENTITY_MATCH_ALL),
entity_ids
)
def entity_domain(domain: str):
"""Validate that entity belong to domain."""
def validate(value: Any) -> str:
"""Test if entity domain is domain."""
ent_domain = entities_domain(domain)
return ent_domain(value)[0]
return validate
def entities_domain(domain: str):
"""Validate that entities belong to domain."""
def validate(values: Union[str, Sequence]) -> Sequence[str]:
"""Test if entity domain is domain."""
values = entity_ids(values)
for ent_id in values:
if split_entity_id(ent_id)[0] != domain:
raise vol.Invalid(
"Entity ID '{}' does not belong to domain '{}'"
.format(ent_id, domain))
return values
return validate
def enum(enumClass):
"""Create validator for specified enum."""
return vol.All(vol.In(enumClass.__members__), enumClass.__getitem__)
def icon(value):
"""Validate icon."""
value = str(value)
if ':' in value:
return value
raise vol.Invalid('Icons should be specifed on the form "prefix:name"')
time_period_dict = vol.All(
dict, vol.Schema({
'days': vol.Coerce(int),
'hours': vol.Coerce(int),
'minutes': vol.Coerce(int),
'seconds': vol.Coerce(int),
'milliseconds': vol.Coerce(int),
}),
has_at_least_one_key('days', 'hours', 'minutes',
'seconds', 'milliseconds'),
lambda value: timedelta(**value))
def time(value) -> time_sys:
"""Validate and transform a time."""
if isinstance(value, time_sys):
return value
try:
time_val = dt_util.parse_time(value)
except TypeError:
raise vol.Invalid('Not a parseable type')
if time_val is None:
raise vol.Invalid('Invalid time specified: {}'.format(value))
return time_val
def date(value) -> date_sys:
"""Validate and transform a date."""
if isinstance(value, date_sys):
return value
try:
date_val = dt_util.parse_date(value)
except TypeError:
raise vol.Invalid('Not a parseable type')
if date_val is None:
raise vol.Invalid("Could not parse date")
return date_val
def time_period_str(value: str) -> timedelta:
"""Validate and transform time offset."""
if isinstance(value, int):
raise vol.Invalid('Make sure you wrap time values in quotes')
if not isinstance(value, str):
raise vol.Invalid(TIME_PERIOD_ERROR.format(value))
negative_offset = False
if value.startswith('-'):
negative_offset = True
value = value[1:]
elif value.startswith('+'):
value = value[1:]
try:
parsed = [int(x) for x in value.split(':')]
except ValueError:
raise vol.Invalid(TIME_PERIOD_ERROR.format(value))
if len(parsed) == 2:
hour, minute = parsed
second = 0
elif len(parsed) == 3:
hour, minute, second = parsed
else:
raise vol.Invalid(TIME_PERIOD_ERROR.format(value))
offset = timedelta(hours=hour, minutes=minute, seconds=second)
if negative_offset:
offset *= -1
return offset
def time_period_seconds(value: Union[int, str]) -> timedelta:
"""Validate and transform seconds to a time offset."""
try:
return timedelta(seconds=int(value))
except (ValueError, TypeError):
raise vol.Invalid('Expected seconds, got {}'.format(value))
time_period = vol.Any(time_period_str, time_period_seconds, timedelta,
time_period_dict)
def match_all(value):
"""Validate that matches all values."""
return value
def positive_timedelta(value: timedelta) -> timedelta:
"""Validate timedelta is positive."""
if value < timedelta(0):
raise vol.Invalid('Time period should be positive')
return value
def service(value):
"""Validate service."""
# Services use same format as entities so we can use same helper.
if valid_entity_id(value):
return value
raise vol.Invalid('Service {} does not match format <domain>.<name>'
.format(value))
def schema_with_slug_keys(value_schema: Union[T, Callable]) -> Callable:
"""Ensure dicts have slugs as keys.
Replacement of vol.Schema({cv.slug: value_schema}) to prevent misleading
"Extra keys" errors from voluptuous.
"""
schema = vol.Schema({str: value_schema})
def verify(value: Dict) -> Dict:
"""Validate all keys are slugs and then the value_schema."""
if not isinstance(value, dict):
raise vol.Invalid('expected dictionary')
for key in value.keys():
try:
slug(key)
except vol.Invalid:
# To ease the breaking change, we allow old slugs for now
# Remove after 0.94 or 1.0
if re.match(OLD_SLUG_VALIDATION, key):
fixed = util_slugify(key)
INVALID_SLUGS_FOUND[key] = fixed
logging.getLogger(__name__).warning(
"Found invalid slug %s, please update with %s. This "
"will be come a breaking change.",
key, fixed
)
else:
raise
return schema(value)
return verify
def slug(value: Any) -> str:
"""Validate value is a valid slug."""
if value is None:
raise vol.Invalid('Slug should not be None')
value = str(value)
slg = util_slugify(value)
if value == slg:
return value
raise vol.Invalid('invalid slug {} (try {})'.format(value, slg))
def slugify(value: Any) -> str:
"""Coerce a value to a slug."""
if value is None:
raise vol.Invalid('Slug should not be None')
slg = util_slugify(str(value))
if slg:
return slg
raise vol.Invalid('Unable to slugify {}'.format(value))
def string(value: Any) -> str:
"""Coerce value to string, except for None."""
if value is None:
raise vol.Invalid('string value is None')
if isinstance(value, (list, dict)):
raise vol.Invalid('value should be a string')
return str(value)
def temperature_unit(value) -> str:
"""Validate and transform temperature unit."""
value = str(value).upper()
if value == 'C':
return TEMP_CELSIUS
if value == 'F':
return TEMP_FAHRENHEIT
raise vol.Invalid('invalid temperature unit (expected C or F)')
unit_system = vol.All(vol.Lower, vol.Any(CONF_UNIT_SYSTEM_METRIC,
CONF_UNIT_SYSTEM_IMPERIAL))
def template(value):
"""Validate a jinja2 template."""
if value is None:
raise vol.Invalid('template value is None')
if isinstance(value, (list, dict, template_helper.Template)):
raise vol.Invalid('template value should be a string')
value = template_helper.Template(str(value))
try:
value.ensure_valid()
return value
except TemplateError as ex:
raise vol.Invalid('invalid template ({})'.format(ex))
def template_complex(value):
"""Validate a complex jinja2 template."""
if isinstance(value, list):
return_value = value.copy()
for idx, element in enumerate(return_value):
return_value[idx] = template_complex(element)
return return_value
if isinstance(value, dict):
return_value = value.copy()
for key, element in return_value.items():
return_value[key] = template_complex(element)
return return_value
return template(value)
def datetime(value):
"""Validate datetime."""
if isinstance(value, datetime_sys):
return value
try:
date_val = dt_util.parse_datetime(value)
except TypeError:
date_val = None
if date_val is None:
raise vol.Invalid('Invalid datetime specified: {}'.format(value))
return date_val
def time_zone(value):
"""Validate timezone."""
if dt_util.get_time_zone(value) is not None:
return value
raise vol.Invalid(
'Invalid time zone passed in. Valid options can be found here: '
'http://en.wikipedia.org/wiki/List_of_tz_database_time_zones')
weekdays = vol.All(ensure_list, [vol.In(WEEKDAYS)])
def socket_timeout(value):
"""Validate timeout float > 0.0.
None coerced to socket._GLOBAL_DEFAULT_TIMEOUT bare object.
"""
if value is None:
return _GLOBAL_DEFAULT_TIMEOUT
try:
float_value = float(value)
if float_value > 0.0:
return float_value
raise vol.Invalid('Invalid socket timeout value.'
' float > 0.0 required.')
except Exception as _:
raise vol.Invalid('Invalid socket timeout: {err}'.format(err=_))
# pylint: disable=no-value-for-parameter
def url(value: Any) -> str:
"""Validate an URL."""
url_in = str(value)
if urlparse(url_in).scheme in ['http', 'https']:
return vol.Schema(vol.Url())(url_in)
raise vol.Invalid('invalid url')
def x10_address(value):
"""Validate an x10 address."""
regex = re.compile(r'([A-Pa-p]{1})(?:[2-9]|1[0-6]?)$')
if not regex.match(value):
raise vol.Invalid('Invalid X10 Address')
return str(value).lower()
def ensure_list_csv(value: Any) -> Sequence:
"""Ensure that input is a list or make one from comma-separated string."""
if isinstance(value, str):
return [member.strip() for member in value.split(',')]
return ensure_list(value)
def deprecated(key: str,
replacement_key: Optional[str] = None,
invalidation_version: Optional[str] = None,
default: Optional[Any] = None):
"""
Log key as deprecated and provide a replacement (if exists).
Expected behavior:
- Outputs the appropriate deprecation warning if key is detected
- Processes schema moving the value from key to replacement_key
- Processes schema changing nothing if only replacement_key provided
- No warning if only replacement_key provided
- No warning if neither key nor replacement_key are provided
- Adds replacement_key with default value in this case
- Once the invalidation_version is crossed, raises vol.Invalid if key
is detected
"""
module_name = inspect.getmodule(inspect.stack()[1][0]).__name__
if replacement_key and invalidation_version:
warning = ("The '{key}' option (with value '{value}') is"
" deprecated, please replace it with '{replacement_key}'."
" This option will become invalid in version"
" {invalidation_version}")
elif replacement_key:
warning = ("The '{key}' option (with value '{value}') is"
" deprecated, please replace it with '{replacement_key}'")
elif invalidation_version:
warning = ("The '{key}' option (with value '{value}') is"
" deprecated, please remove it from your configuration."
" This option will become invalid in version"
" {invalidation_version}")
else:
warning = ("The '{key}' option (with value '{value}') is"
" deprecated, please remove it from your configuration")
def check_for_invalid_version(value: Optional[Any]):
"""Raise error if current version has reached invalidation."""
if not invalidation_version:
return
if parse_version(__version__) >= parse_version(invalidation_version):
raise vol.Invalid(
warning.format(
key=key,
value=value,
replacement_key=replacement_key,
invalidation_version=invalidation_version
)
)
def validator(config: Dict):
"""Check if key is in config and log warning."""
if key in config:
value = config[key]
check_for_invalid_version(value)
KeywordStyleAdapter(logging.getLogger(module_name)).warning(
warning,
key=key,
value=value,
replacement_key=replacement_key,
invalidation_version=invalidation_version
)
if replacement_key:
config.pop(key)
else:
value = default
if (replacement_key
and (replacement_key not in config
or default == config.get(replacement_key))
and value is not None):
config[replacement_key] = value
return has_at_most_one_key(key, replacement_key)(config)
return validator
# Validator helpers
def key_dependency(key, dependency):
"""Validate that all dependencies exist for key."""
def validator(value):
"""Test dependencies."""
if not isinstance(value, dict):
raise vol.Invalid('key dependencies require a dict')
if key in value and dependency not in value:
raise vol.Invalid('dependency violation - key "{}" requires '
'key "{}" to exist'.format(key, dependency))
return value
return validator
# Schemas
class HASchema(vol.Schema):
"""Schema class that allows us to mark PREVENT_EXTRA errors as warnings."""
def __call__(self, data):
"""Override __call__ to mark PREVENT_EXTRA as warning."""
try:
return super().__call__(data)
except vol.Invalid as orig_err:
if self.extra != vol.PREVENT_EXTRA:
raise
# orig_error is of type vol.MultipleInvalid (see super __call__)
assert isinstance(orig_err, vol.MultipleInvalid)
# pylint: disable=no-member
# If it fails with PREVENT_EXTRA, try with ALLOW_EXTRA
self.extra = vol.ALLOW_EXTRA
# In case it still fails the following will raise
try:
validated = super().__call__(data)
finally:
self.extra = vol.PREVENT_EXTRA
# This is a legacy config, print warning
extra_key_errs = [err for err in orig_err.errors
if err.error_message == 'extra keys not allowed']
if extra_key_errs:
msg = "Your configuration contains extra keys " \
"that the platform does not support.\n" \
"Please remove "
submsg = ', '.join('[{}]'.format(err.path[-1]) for err in
extra_key_errs)
submsg += '. '
if hasattr(data, '__config_file__'):
submsg += " (See {}, line {}). ".format(
data.__config_file__, data.__line__)
msg += submsg
logging.getLogger(__name__).warning(msg)
INVALID_EXTRA_KEYS_FOUND.append(submsg)
else:
# This should not happen (all errors should be extra key
# errors). Let's raise the original error anyway.
raise orig_err
# Return legacy validated config
return validated
def extend(self, schema, required=None, extra=None):
"""Extend this schema and convert it to HASchema if necessary."""
ret = super().extend(schema, required=required, extra=extra)
if extra is not None:
return ret
return HASchema(ret.schema, required=required, extra=self.extra)
PLATFORM_SCHEMA = HASchema({
vol.Required(CONF_PLATFORM): string,
vol.Optional(CONF_ENTITY_NAMESPACE): string,
vol.Optional(CONF_SCAN_INTERVAL): time_period
})
PLATFORM_SCHEMA_BASE = PLATFORM_SCHEMA.extend({
}, extra=vol.ALLOW_EXTRA)
EVENT_SCHEMA = vol.Schema({
vol.Optional(CONF_ALIAS): string,
vol.Required('event'): string,
vol.Optional('event_data'): dict,
vol.Optional('event_data_template'): {match_all: template_complex}
})
SERVICE_SCHEMA = vol.All(vol.Schema({
vol.Optional(CONF_ALIAS): string,
vol.Exclusive('service', 'service name'): service,
vol.Exclusive('service_template', 'service name'): template,
vol.Optional('data'): dict,
vol.Optional('data_template'): {match_all: template_complex},
vol.Optional(CONF_ENTITY_ID): comp_entity_ids,
}), has_at_least_one_key('service', 'service_template'))
NUMERIC_STATE_CONDITION_SCHEMA = vol.All(vol.Schema({
vol.Required(CONF_CONDITION): 'numeric_state',
vol.Required(CONF_ENTITY_ID): entity_id,
CONF_BELOW: vol.Coerce(float),
CONF_ABOVE: vol.Coerce(float),
vol.Optional(CONF_VALUE_TEMPLATE): template,
}), has_at_least_one_key(CONF_BELOW, CONF_ABOVE))
STATE_CONDITION_SCHEMA = vol.All(vol.Schema({
vol.Required(CONF_CONDITION): 'state',
vol.Required(CONF_ENTITY_ID): entity_id,
vol.Required('state'): str,
vol.Optional('for'): vol.All(time_period, positive_timedelta),
# To support use_trigger_value in automation
# Deprecated 2016/04/25
vol.Optional('from'): str,
}), key_dependency('for', 'state'))
SUN_CONDITION_SCHEMA = vol.All(vol.Schema({
vol.Required(CONF_CONDITION): 'sun',
vol.Optional('before'): sun_event,
vol.Optional('before_offset'): time_period,
vol.Optional('after'): vol.All(vol.Lower, vol.Any(
SUN_EVENT_SUNSET, SUN_EVENT_SUNRISE)),
vol.Optional('after_offset'): time_period,
}), has_at_least_one_key('before', 'after'))
TEMPLATE_CONDITION_SCHEMA = vol.Schema({
vol.Required(CONF_CONDITION): 'template',
vol.Required(CONF_VALUE_TEMPLATE): template,
})
TIME_CONDITION_SCHEMA = vol.All(vol.Schema({
vol.Required(CONF_CONDITION): 'time',
'before': time,
'after': time,
'weekday': weekdays,
}), has_at_least_one_key('before', 'after', 'weekday'))
ZONE_CONDITION_SCHEMA = vol.Schema({
vol.Required(CONF_CONDITION): 'zone',
vol.Required(CONF_ENTITY_ID): entity_id,
'zone': entity_id,
# To support use_trigger_value in automation
# Deprecated 2016/04/25
vol.Optional('event'): vol.Any('enter', 'leave'),
})
AND_CONDITION_SCHEMA = vol.Schema({
vol.Required(CONF_CONDITION): 'and',
vol.Required('conditions'): vol.All(
ensure_list,
# pylint: disable=unnecessary-lambda
[lambda value: CONDITION_SCHEMA(value)],
)
})
OR_CONDITION_SCHEMA = vol.Schema({
vol.Required(CONF_CONDITION): 'or',
vol.Required('conditions'): vol.All(
ensure_list,
# pylint: disable=unnecessary-lambda
[lambda value: CONDITION_SCHEMA(value)],
)
})
CONDITION_SCHEMA = vol.Any(
NUMERIC_STATE_CONDITION_SCHEMA,
STATE_CONDITION_SCHEMA,
SUN_CONDITION_SCHEMA,
TEMPLATE_CONDITION_SCHEMA,
TIME_CONDITION_SCHEMA,
ZONE_CONDITION_SCHEMA,
AND_CONDITION_SCHEMA,
OR_CONDITION_SCHEMA,
)
_SCRIPT_DELAY_SCHEMA = vol.Schema({
vol.Optional(CONF_ALIAS): string,
vol.Required("delay"): vol.Any(
vol.All(time_period, positive_timedelta),
template, template_complex)
})
_SCRIPT_WAIT_TEMPLATE_SCHEMA = vol.Schema({
vol.Optional(CONF_ALIAS): string,
vol.Required("wait_template"): template,
vol.Optional(CONF_TIMEOUT): vol.All(time_period, positive_timedelta),
vol.Optional("continue_on_timeout"): boolean,
})
SCRIPT_SCHEMA = vol.All(
ensure_list,
[vol.Any(SERVICE_SCHEMA, _SCRIPT_DELAY_SCHEMA,
_SCRIPT_WAIT_TEMPLATE_SCHEMA, EVENT_SCHEMA, CONDITION_SCHEMA)],
)
| HydrelioxGitHub/home-assistant | homeassistant/helpers/config_validation.py | Python | apache-2.0 | 26,479 |
# Copyright 2011 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Sumit Naiksatam, Cisco Systems, Inc.
# Attachment attributes
INSTANCE_ID = 'instance_id'
TENANT_ID = 'tenant_id'
TENANT_NAME = 'tenant_name'
HOST_NAME = 'host_name'
# Network attributes
NET_ID = 'id'
NET_NAME = 'name'
NET_VLAN_ID = 'vlan_id'
NET_VLAN_NAME = 'vlan_name'
NET_PORTS = 'ports'
CREDENTIAL_ID = 'credential_id'
CREDENTIAL_NAME = 'credential_name'
CREDENTIAL_USERNAME = 'user_name'
CREDENTIAL_PASSWORD = 'password'
CREDENTIAL_TYPE = 'type'
MASKED_PASSWORD = '********'
USERNAME = 'username'
PASSWORD = 'password'
LOGGER_COMPONENT_NAME = "cisco_plugin"
NEXUS_PLUGIN = 'nexus_plugin'
VSWITCH_PLUGIN = 'vswitch_plugin'
DEVICE_IP = 'device_ip'
NETWORK_ADMIN = 'network_admin'
NETWORK = 'network'
PORT = 'port'
BASE_PLUGIN_REF = 'base_plugin_ref'
CONTEXT = 'context'
SUBNET = 'subnet'
#### N1Kv CONSTANTS
# Special vlan_id value in n1kv_vlan_allocations table indicating flat network
FLAT_VLAN_ID = -1
# Topic for tunnel notifications between the plugin and agent
TUNNEL = 'tunnel'
# Maximum VXLAN range configurable for one network profile.
MAX_VXLAN_RANGE = 1000000
# Values for network_type
NETWORK_TYPE_FLAT = 'flat'
NETWORK_TYPE_VLAN = 'vlan'
NETWORK_TYPE_VXLAN = 'vxlan'
NETWORK_TYPE_LOCAL = 'local'
NETWORK_TYPE_NONE = 'none'
NETWORK_TYPE_TRUNK = 'trunk'
NETWORK_TYPE_MULTI_SEGMENT = 'multi-segment'
# Values for network sub_type
NETWORK_TYPE_OVERLAY = 'overlay'
NETWORK_SUBTYPE_NATIVE_VXLAN = 'native_vxlan'
NETWORK_SUBTYPE_TRUNK_VLAN = NETWORK_TYPE_VLAN
NETWORK_SUBTYPE_TRUNK_VXLAN = NETWORK_TYPE_OVERLAY
# Prefix for VM Network name
VM_NETWORK_NAME_PREFIX = 'vmn_'
DEFAULT_HTTP_TIMEOUT = 15
SET = 'set'
INSTANCE = 'instance'
PROPERTIES = 'properties'
NAME = 'name'
ID = 'id'
POLICY = 'policy'
TENANT_ID_NOT_SET = 'TENANT_ID_NOT_SET'
ENCAPSULATIONS = 'encapsulations'
STATE = 'state'
ONLINE = 'online'
MAPPINGS = 'mappings'
MAPPING = 'mapping'
SEGMENTS = 'segments'
SEGMENT = 'segment'
BRIDGE_DOMAIN_SUFFIX = '_bd'
LOGICAL_NETWORK_SUFFIX = '_log_net'
ENCAPSULATION_PROFILE_SUFFIX = '_profile'
UUID_LENGTH = 36
# Nexus vlan and vxlan segment range
NEXUS_VLAN_RESERVED_MIN = 3968
NEXUS_VLAN_RESERVED_MAX = 4047
NEXUS_VXLAN_MIN = 4096
NEXUS_VXLAN_MAX = 16000000
| onecloud/neutron | neutron/plugins/cisco/common/cisco_constants.py | Python | apache-2.0 | 2,838 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'ExecutionCommandServer.celery_task_id'
db.add_column(u'task_executioncommandserver', 'celery_task_id',
self.gf('django.db.models.fields.CharField')(default='', max_length=36, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'ExecutionCommandServer.celery_task_id'
db.delete_column(u'task_executioncommandserver', 'celery_task_id')
models = {
u'account.customuser': {
'Meta': {'object_name': 'CustomUser'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '254'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'core.application': {
'Meta': {'unique_together': "(('department', 'name'),)", 'object_name': 'Application'},
'department': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'applications'", 'to': u"orm['core.Department']"}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
u'core.department': {
'Meta': {'object_name': 'Department'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
u'core.environment': {
'Meta': {'unique_together': "(('application', 'name'),)", 'object_name': 'Environment'},
'application': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'environments'", 'to': u"orm['core.Application']"}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_production': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
u'core.server': {
'Meta': {'unique_together': "(('environment', 'name'),)", 'object_name': 'Server'},
'environment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'servers'", 'to': u"orm['core.Environment']"}),
'host': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'roles': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'servers'", 'symmetrical': 'False', 'to': u"orm['core.ServerRole']"}),
'user': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
u'core.serverrole': {
'Meta': {'object_name': 'ServerRole'},
'department': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'serverroles'", 'to': u"orm['core.Department']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'})
},
u'task.execution': {
'Meta': {'object_name': 'Execution'},
'environment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'executions'", 'to': u"orm['core.Environment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '3'}),
'task': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'executions'", 'to': u"orm['task.Task']"}),
'time': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'time_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'time_end': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'time_start': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'executions'", 'to': u"orm['account.CustomUser']"})
},
u'task.executioncommand': {
'Meta': {'object_name': 'ExecutionCommand'},
'command': ('django.db.models.fields.TextField', [], {}),
'execution': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'commands'", 'to': u"orm['task.Execution']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'roles': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['core.ServerRole']", 'symmetrical': 'False'})
},
u'task.executioncommandserver': {
'Meta': {'object_name': 'ExecutionCommandServer'},
'celery_task_id': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'}),
'execution_command': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'servers'", 'to': u"orm['task.ExecutionCommand']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'output': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'return_code': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'server': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Server']"}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '3'}),
'time': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'time_end': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'time_start': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
u'task.executionlivelog': {
'Meta': {'object_name': 'ExecutionLiveLog'},
'data': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'event': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'execution': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'live_logs'", 'to': u"orm['task.Execution']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'task.executionparameter': {
'Meta': {'object_name': 'ExecutionParameter'},
'execution': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'parameters'", 'to': u"orm['task.Execution']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
u'task.task': {
'Meta': {'unique_together': "(('application', 'name'),)", 'object_name': 'Task'},
'application': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tasks'", 'to': u"orm['core.Application']"}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
u'task.taskcommand': {
'Meta': {'object_name': 'TaskCommand'},
'command': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {}),
'roles': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'commands'", 'symmetrical': 'False', 'to': u"orm['core.ServerRole']"}),
'task': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'commands'", 'to': u"orm['task.Task']"})
},
u'task.taskparameter': {
'Meta': {'object_name': 'TaskParameter'},
'default_value': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'order': ('django.db.models.fields.IntegerField', [], {}),
'task': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'parameters'", 'to': u"orm['task.Task']"})
}
}
complete_apps = ['task'] | gunnery/gunnery | gunnery/task/migrations/0002_auto__add_field_executioncommandserver_celery_task_id.py | Python | apache-2.0 | 12,042 |
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
import gettext
from enum import Enum, unique
_ = gettext.gettext
strategy_descriptions = [_("New resourcelist strategy"),
_("New changelist strategy"),
_("Incremental changelist strategy")]
@unique
class Strategy(Enum):
"""
:samp:`Strategy for ResourceSync Publishing`
"""
resourcelist = 0
"""
``0`` :samp:`New resourcelist {strategy}`
Create new resourcelist(s) every run.
"""
new_changelist = 1
"""
``1`` :samp:`New changelist {strategy}`
Create a new changelist every run.
If no resourcelist was found in the metadata directory switch to new resourcelist strategy.
"""
inc_changelist = 2
"""
``2`` :samp:`Incremental changelist {strategy}`
Add changes to an existing changelist. If no changelist exists, create a new one.
If no resourcelist was found in the metadata directory switch to new resourcelist strategy.
"""
# resourcedump = 3 # not implemented
# changedump = 4 # not implemented
@staticmethod
def names():
"""
:samp:`Get Strategy names`
:return: List<str> of names
"""
names = dir(Strategy)
return [x for x in names if not x.startswith("_")]
@staticmethod
def sanitize(name):
"""
:samp:`Verify a {Strategy} name`
:param str name: string to test
:return: name if it is the name of a strategy
:raises: :exc:`ValueError` if the given name is not the name of a strategy
"""
try:
strategy = Strategy[name]
return strategy.name
except KeyError as err:
raise ValueError(err)
@staticmethod
def strategy_for(value):
"""
:samp:`Get a Strategy for the given value`
:param value: may be :class:`Strategy`, str or int
:return: :class:`Strategy`
:raises: :exc:`ValueError` if the given value could not be converted to a :class:`Strategy`
"""
try:
if isinstance(value, Strategy):
return value
elif isinstance(value, int):
return Strategy(value)
else:
return Strategy[value]
except KeyError as err:
raise ValueError(err)
def describe(self):
return strategy_descriptions[self.value]
class Capability(Enum):
"""
:samp:`Capabilities as defined in the ResourceSync Framework`
"""
resourcelist = 0
"""
``0`` :samp:`resourcelist`
"""
changelist = 1
"""
``1`` :samp:`changelist`
"""
resourcedump = 2
"""
``2`` :samp:`resourcedump`
"""
changedump = 3
"""
``3`` :samp:`changedump`
"""
resourcedump_manifest = 4
"""
``4`` :samp:`resourcedump_manifest`
"""
changedump_manifest = 5
"""
``5`` :samp:`changedump_manifest`
"""
capabilitylist = 6
"""
``6`` :samp:`capabilitylist`
"""
description = 7
"""
``7`` :samp:`description`
"""
class SelectMode(Enum):
"""
:samp:`Mode of selection`
"""
simple = 0
selector = 1
@staticmethod
def names():
"""
:samp:`Get SelectMode names`
:return: List<str> of names
"""
names = dir(SelectMode)
return [x for x in names if not x.startswith("_")]
@staticmethod
def select_mode_for(mode):
try:
if isinstance(mode, SelectMode):
return mode
elif isinstance(mode, int):
return SelectMode(mode)
else:
return SelectMode[mode]
except KeyError as err:
raise ValueError(err)
| EHRI/rspub-core | rspub/core/rs_enum.py | Python | apache-2.0 | 3,776 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010-2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import gettext
gettext.install('glance', unicode=1)
| tylertian/Openstack | openstack F/glance/glance/__init__.py | Python | apache-2.0 | 733 |
import os
import pickle
import random
import re
from datetime import datetime
from data_processing import (InteractiveAnswer, _in_list, colorit, space_fill,
split_wrd)
BOARDER_LENGTH = 40
class Quest():
def __init__(self, q, sel=None, ta=None, args={}):
'''
Class representing a Question.
Parameters
----------
basic arguments:
q : question. necessary. list.
sel : selections. list.
ta : true answer. list.
extensable arguments:
args : dict with sets of {'name': 'value'}.
'''
self.q = q
self.sel = sel
self.ta = ta
self.args = args
def __str__(self):
'''Visualize the `Quest`.'''
return '{\n\tq: %s,\n\tsel: %s,\n\tta: %s,\n\targs: %s\n}' % \
(self.q, self.sel, self.ta, self.args)
def __eq__(self, value):
'''Evalue two `Quest`s as equal.'''
if type(value) != type(self): return False
for i in ['q', 'sel', 'ta', 'args']:
if self.__getattribute__(i) != value.__getattribute__(i):
return False
return True
def __hash__(self):
return (hash('\n'.join(self.q)) + hash('\n'.join(self.sel)) + \
hash('\n'.join(self.ta)) + hash('\n'.join(self.args))) % int(1e+16)
class QuestForm(list):
def __init__(self, *args, **kwargs):
super(QuestForm, self).__init__(*args, **kwargs)
def __getitem__(self, ind):
if type(ind) == int:
return super(QuestForm, self).__getitem__(ind)
if type(ind) == slice:
return QuestForm(super(QuestForm, self).__getitem__(ind))
else:
returns = QuestForm()
for i in ind:
returns.append(self[i])
return returns
def append(self, *args, **kwargs):
super(QuestForm, self).append(*args, **kwargs)
return self
class QuestFormTextLoader():
'''QuestForm Loader for text files.'''
def __init__(self,
questpattern,
qpattern,
selpattern=None,
tapattern=None,
argpattern={}):
'''
Parameters
----------
questpattern : regex pattern for a question. necessary.
qpattern : regex pattern for question text in a question. necessary.
selpattern : regex pattern for selections.
a question can have several matching selections.
tapattern : regex pattern for true answer.
argpattern : dict with {'arg_name' : 'arg_regex'} sets.
'''
self.questpattern = questpattern
self.qpattern = qpattern
self.selpattern = selpattern
self.tapattern = tapattern
self.argpattern = dict(argpattern)
self.is_cached = False
def get_cached_qf(self, togo='Curdata.data'):
'''Load cached QuestForm.'''
if togo in os.listdir():
if InteractiveAnswer(
'Cached data found.Continue?', yes_or_no=True).get():
with open(togo, 'rb') as f:
return pickle.load(f)
else:
datas = ["Create a new data"] + [
i for i in os.listdir() if re.findall(r'.*\.data$', i)
]
if not datas: return
print("Cached data not found, listing other datas")
for i in range(len(datas)):
print('\t%3s: \t%s' % (i, datas[i]))
no = InteractiveAnswer(
'Which one to choose?',
verify=range(len(datas)),
serializer=
lambda x: [int(i) for i in re.findall(r'[0-9]+', x)]).get()[0]
if no == 0:
return
else:
with open(datas[no], 'rb') as f:
return pickle.load(f)
def _load(self, queststr):
questform = QuestForm()
for quest in re.findall(self.questpattern, queststr):
qitem = re.findall(self.qpattern, quest)
selitem = re.findall(self.selpattern,
quest) if self.selpattern else None
taitem = re.findall(self.tapattern,
quest) if self.tapattern else None
argitem = [(patnam,re.findall(self.argpattern(patnam),quest)) \
for patnam in self.argpattern] if self.argpattern else {}
questform = questform.append(
Quest(q=qitem, sel=selitem, ta=taitem, args=argitem))
return questform
def load(self, queststr):
'''Search queststr, match arguments and returns a QuestForm.'''
qf = self.get_cached_qf()
if qf is not None:
self.is_cached = True
return qf
if 'MainData.data' in os.listdir():
with open('MainData.data', 'rb') as f:
qf = pickle.load(f)
else:
qf = self._load(queststr)
with open('MainData.data', 'wb') as f:
pickle.dump(qf, f)
return qf
class QuestFormExcelLoader(QuestFormTextLoader):
'''QuestForm Loader for excel files. Requires `pandas` module.'''
def __init__(self, qcol, selcol=None, tacol=None, argcol={}):
'''
Parameters
----------
questpattern : regex pattern for a question. necessary.
qpattern : regex pattern for question text in a question. necessary.
selpattern : regex pattern for selections.
a question can have several matching selections.
tapattern : regex pattern for true answer.
argpattern : dict with {'arg_name' : 'arg_regex'} sets.
'''
super(QuestFormExcelLoader, self).__init__(None, qcol, selcol, tacol,
argcol)
def _load(self, questdf):
import pandas as pd
if type(questdf) == str: questdf = pd.read_excel(questdf)
questform = QuestForm()
for q in range(len(questdf)):
quest = questdf.ix[q]
qitem = quest[self.qpattern]
selitem = quest[self.selpattern] if self.selpattern else None
taitem = quest[self.tapattern] if self.tapattern else None
argitem = {
pat: quest[self.argpattern[pat]]
for pat in self.argpattern
} if self.argpattern else {}
qitem = None if qitem is None else ([qitem] if isinstance(
qitem, str) else list(qitem))
selitem = None if selitem is None else ([selitem] if isinstance(
selitem, str) else list(selitem))
taitem = None if taitem is None else ([taitem] if isinstance(
taitem, str) else list(taitem))
questform = questform.append(
Quest(q=qitem, sel=selitem, ta=taitem, args=argitem))
return questform
class BeginQuestForm():
'''Class for rendering the exam.'''
def __init__(self,
qf,
arrange='qast',
no_score=False,
input_manner=None,
no_filter=False,
storage='l|w',
filenames=['Curdata.data', 'Wrongdata.data']):
'''
Parameters
----------
qf : QuestForm. The QuestForm that test on.
storage : str with several units separated by `|`.
each unit contains one or more of `twol`.
`t` indicates Quests that marked as true.
`w` indicates Quests that marked as false.
`o` indicates Quests that marked as others.
`l` indicates Quests that isn't marked.
filenames : list with each element indicates the filename of
the output of `storage` option.
arrange : iterable. each element should be one argument in a `Quest` object.
`question` indicates the question text.
`args` indicates all args.
`selections` indicates the question text.
`trueanswer` indicates the trueanswer text.
`label` may indicate the `lable` keyword in `args` child in `Quest`.
If not ambiguous, you can use `q` or `que` to indicate `question`,
or `a` to indicate `answer`.
no_filter : determines whether to record the True/False/others score.
input_manner : a class with a .get() method returns input text.
designed for `InteractiveAnswer` class.
no_filter : determines whether to filter the qf by `self.sel_chap`.
'''
self.qf = qf
self.starttime = datetime.now()
self.correct = []
self.wrong = []
self.other = []
self.arrange = arrange
self.storage = storage
self.store_filenames = filenames
self.no_score = no_score
self.input_manner = input_manner
self.status = []
self.no_filter = no_filter
def selchap(self, qf):
'''
Dummy function to select chapters (or filtering the QuestForm).
Override this funtion to make it work.
'''
return qf
def oninit(self):
'''Things done on initialize'''
if InteractiveAnswer('Randomize?', yes_or_no=True).get():
random.shuffle(self.arranged_index)
print('\n', '=' * BOARDER_LENGTH, '\n')
print(
space_fill(
self.starttime.strftime('%Y-%m-%d %H:%M:%S'), BOARDER_LENGTH))
print(space_fill('Find %d questions.' % (self.length), BOARDER_LENGTH))
print(space_fill('start test.', BOARDER_LENGTH))
print('\n', '=' * BOARDER_LENGTH, '\n')
def _report(self):
''' Report prints.'''
print('\n\n', '=' * BOARDER_LENGTH, '\n')
usedtime = (datetime.now() - self.starttime).seconds
(usedtime, s) = divmod(usedtime, 60)
(h, m) = divmod(usedtime, 60)
print(space_fill('Total Time: %d hours, %d minutes, %d seconds'\
%(h, m, s) ,BOARDER_LENGTH))
if self.no_score: pass
elif len(self.correct) + len(self.wrong) != 0:
c = len(self.correct)
w = len(self.wrong)
print('Correct: ', c)
print('Wrong: ', w)
print('Score: %.2f' % (c / (c + w) * 100))
print('\n', '-' * BOARDER_LENGTH, '\n')
self.show_status(h)
print('\n', '=' * BOARDER_LENGTH, '\n')
def onkill(self):
''' Things done on kill/interrupt.'''
print('\n\n', '=' * BOARDER_LENGTH, '\n')
print(space_fill('Interrupted', BOARDER_LENGTH))
self._report()
self.store_data(level=self.storage, filenames=self.store_filenames)
return
def onfinish(self):
''' Things done on finishing exam.'''
print('\n\n', '=' * BOARDER_LENGTH, '\n')
print(space_fill('Finished', BOARDER_LENGTH))
self._report()
self.store_data(level=self.storage, filenames=self.store_filenames)
return
def store_data(self,
filenames=['Curdata.data', 'Wrongdata.data'],
level='l|w'):
''' Stores data.'''
# get left quests
l = [
i for i in range(len(self.qf))
if not (_in_list(i, self.correct) | _in_list(i, self.wrong)
| _in_list(i, self.other))
]
_level = level.split('|')
for fn, lv in zip(filenames, range(len(_level))):
index = []
# add required quests to index
for i, j in zip('cwol', [self.correct, self.wrong, self.other, l]):
if i in _level[lv]: index += j
index.sort()
qf = self.qf[index]
# TODO: duplicated. add append/write method as an option
if fn == 'Curdata.data':
if len(qf) != 0:
with open(fn, 'wb') as f:
pickle.dump(qf, f)
else:
try:
os.remove(fn)
except:
pass
else:
if fn not in os.listdir():
with open(fn, 'wb') as f:
pickle.dump(qf, f)
else:
with open(fn, 'rb') as f:
data = pickle.load(f)
data = QuestForm(data + qf)
with open(fn, 'wb') as f:
pickle.dump(data, f)
def raise_quest(self, quest, **kwargs):
'''Loop to raise a `Quest` according to `self.arrange`.'''
ans = None
for a in self.arrange:
if re.findall('^' + a, 'quest'):
self.raise_q(quest, **kwargs)
elif re.findall('^' + a, 'args'):
if not quest.args: continue
for k in quest.args:
print(k + ':', quest.args[k])
elif re.findall('^' + a, 'selection'):
self.raise_sel(quest, **kwargs)
elif re.findall('^' + a, 'true_answer'):
ans = self.get_input(self.input_manner)
ans = self.check_ans(ans, quest, **kwargs)
if ans is not True or self.no_score:
self.raise_ta(quest, **kwargs)
else:
for k in quest.args:
if re.findall('^' + a, k):
print(k + ':', quest.args[k])
print('\n', '-' * BOARDER_LENGTH, '\n')
return ans
def get_input(self, input_manner=None):
'''Get user input if input_manner is not given.'''
if input_manner is None:
return input('Your Answer: ')
else:
try:
return input_manner.get()
except AttributeError:
raise TypeError('`input_manner` should have a `get()` method')
def start(self):
'''Starting point.'''
try:
if not self.no_filter: self.qf = self.selchap(self.qf)
self.length = len(self.qf)
self.arranged_index = list(range(self.length))
self.oninit()
for quest in self.arranged_index:
tof = self.raise_quest(self.qf[quest], qid=quest)
if tof is True:
self.correct.append(quest)
self.status.append(
((datetime.now() - self.starttime).seconds, 1))
elif tof is False:
self.wrong.append(quest)
self.status.append(
((datetime.now() - self.starttime).seconds, 0))
else:
self.other.append(quest)
self.status.append(
((datetime.now() - self.starttime).seconds, 2))
self.onfinish()
except (KeyboardInterrupt, EOFError):
self.onkill()
def raise_q(self, quest, **kwargs):
'''Raises question in a `Quest`. You may want to overwrite it'''
print(
'Question %d/%d: ' %
(len(self.other) + len(self.correct) + len(self.wrong) + 1,
self.length),
end='')
print('\n'.join(quest.q))
return
def raise_sel(self, quest, **kwargs):
'''Raises selections in a `Quest`. You may want to overwrite it'''
if quest.sel: print('\n'.join(quest.sel))
def raise_ta(self, quest, **kwargs):
'''Raises true answer in a `Quest`. You may want to overwrite it'''
if quest.ta: print('True Answer:', ' '.join(quest.ta))
def check_ans(self, ans, quest, **kwargs):
'''
Check answer. returns True or False or other to your convenience.
You may want to overwrite it.
'''
if self.no_score: return True
if ans == ''.join(quest.ta):
print(colorit('Correct!', 'green'))
return True
else:
print(colorit('WRONG!', 'red'))
return False
def show_status(self, hduration):
''' Show statistics before exit. '''
result = []
tempres = [0, 0, 0]
status = self.status
if hduration == 0:
inteval = 3 * 60
if hduration > 0:
inteval = 5 * hduration * 60
cursec = inteval
for i in status:
while cursec - i[0] <= 0:
result.append(tempres)
tempres = [0, 0, 0]
cursec += inteval
tempres[i[1]] += 1
result.append(tempres)
total = inteval
for i in result:
print('%3dm:' % (total / 60),
colorit('+' * i[1], 'green') + colorit('-' * i[0], 'red'))
total += inteval
return result
| heyrict/exam | exam.py | Python | apache-2.0 | 16,883 |
# Copyright (c) 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Compute-related Utilities and helpers."""
import contextlib
import functools
import inspect
import itertools
import math
import traceback
import netifaces
from oslo_log import log
from oslo_serialization import jsonutils
from oslo_utils import excutils
from nova.accelerator import cyborg
from nova import block_device
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import vm_states
import nova.conf
from nova import exception
from nova import notifications
from nova.notifications.objects import aggregate as aggregate_notification
from nova.notifications.objects import base as notification_base
from nova.notifications.objects import compute_task as task_notification
from nova.notifications.objects import exception as notification_exception
from nova.notifications.objects import flavor as flavor_notification
from nova.notifications.objects import instance as instance_notification
from nova.notifications.objects import keypair as keypair_notification
from nova.notifications.objects import libvirt as libvirt_notification
from nova.notifications.objects import metrics as metrics_notification
from nova.notifications.objects import request_spec as reqspec_notification
from nova.notifications.objects import scheduler as scheduler_notification
from nova.notifications.objects import server_group as sg_notification
from nova.notifications.objects import volume as volume_notification
from nova import objects
from nova.objects import fields
from nova import rpc
from nova import safe_utils
from nova import utils
CONF = nova.conf.CONF
LOG = log.getLogger(__name__)
# These properties are specific to a particular image by design. It
# does not make sense for them to be inherited by server snapshots.
# This list is distinct from the configuration option of the same
# (lowercase) name.
NON_INHERITABLE_IMAGE_PROPERTIES = frozenset([
'cinder_encryption_key_id',
'cinder_encryption_key_deletion_policy',
'img_signature',
'img_signature_hash_method',
'img_signature_key_type',
'img_signature_certificate_uuid'])
# Properties starting with these namespaces are reserved for internal
# use by other services. It does not make sense (and may cause a request
# fail) if we include them in a snapshot.
NON_INHERITABLE_IMAGE_NAMESPACES = frozenset([
'os_glance',
])
def exception_to_dict(fault, message=None):
"""Converts exceptions to a dict for use in notifications.
:param fault: Exception that occurred
:param message: Optional fault message, otherwise the message is derived
from the fault itself.
:returns: dict with the following items:
- exception: the fault itself
- message: one of (in priority order):
- the provided message to this method
- a formatted NovaException message
- the fault class name
- code: integer code for the fault (defaults to 500)
"""
# TODO(johngarbutt) move to nova/exception.py to share with wrap_exception
code = 500
if hasattr(fault, "kwargs"):
code = fault.kwargs.get('code', 500)
# get the message from the exception that was thrown
# if that does not exist, use the name of the exception class itself
try:
if not message:
message = fault.format_message()
# These exception handlers are broad so we don't fail to log the fault
# just because there is an unexpected error retrieving the message
except Exception:
# In this case either we have a NovaException which failed to format
# the message or we have a non-nova exception which could contain
# sensitive details. Since we're not sure, be safe and set the message
# to the exception class name. Note that we don't guard on
# context.is_admin here because the message is always shown in the API,
# even to non-admin users (e.g. NoValidHost) but only the traceback
# details are shown to users with the admin role. Checking for admin
# context here is also not helpful because admins can perform
# operations on a tenant user's server (migrations, reboot, etc) and
# service startup and periodic tasks could take actions on a server
# and those use an admin context.
message = fault.__class__.__name__
# NOTE(dripton) The message field in the database is limited to 255 chars.
# MySQL silently truncates overly long messages, but PostgreSQL throws an
# error if we don't truncate it.
u_message = utils.safe_truncate(message, 255)
fault_dict = dict(exception=fault)
fault_dict["message"] = u_message
fault_dict["code"] = code
return fault_dict
def _get_fault_details(exc_info, error_code):
details = ''
# TODO(mriedem): Why do we only include the details if the code is 500?
# Though for non-nova exceptions the code will probably be 500.
if exc_info and error_code == 500:
# We get the full exception details including the value since
# the fault message may not contain that information for non-nova
# exceptions (see exception_to_dict).
details = ''.join(traceback.format_exception(
exc_info[0], exc_info[1], exc_info[2]))
return str(details)
def add_instance_fault_from_exc(context, instance, fault, exc_info=None,
fault_message=None):
"""Adds the specified fault to the database."""
fault_obj = objects.InstanceFault(context=context)
fault_obj.host = CONF.host
fault_obj.instance_uuid = instance.uuid
fault_obj.update(exception_to_dict(fault, message=fault_message))
code = fault_obj.code
fault_obj.details = _get_fault_details(exc_info, code)
fault_obj.create()
def get_device_name_for_instance(instance, bdms, device):
"""Validates (or generates) a device name for instance.
This method is a wrapper for get_next_device_name that gets the list
of used devices and the root device from a block device mapping.
:raises TooManyDiskDevices: if the maxmimum allowed devices to attach to a
single instance is exceeded.
"""
mappings = block_device.instance_block_mapping(instance, bdms)
return get_next_device_name(instance, mappings.values(),
mappings['root'], device)
def default_device_names_for_instance(instance, root_device_name,
*block_device_lists):
"""Generate missing device names for an instance.
:raises TooManyDiskDevices: if the maxmimum allowed devices to attach to a
single instance is exceeded.
"""
dev_list = [bdm.device_name
for bdm in itertools.chain(*block_device_lists)
if bdm.device_name]
if root_device_name not in dev_list:
dev_list.append(root_device_name)
for bdm in itertools.chain(*block_device_lists):
dev = bdm.device_name
if not dev:
dev = get_next_device_name(instance, dev_list,
root_device_name)
bdm.device_name = dev
bdm.save()
dev_list.append(dev)
def check_max_disk_devices_to_attach(num_devices):
maximum = CONF.compute.max_disk_devices_to_attach
if maximum < 0:
return
if num_devices > maximum:
raise exception.TooManyDiskDevices(maximum=maximum)
def get_next_device_name(instance, device_name_list,
root_device_name=None, device=None):
"""Validates (or generates) a device name for instance.
If device is not set, it will generate a unique device appropriate
for the instance. It uses the root_device_name (if provided) and
the list of used devices to find valid device names. If the device
name is valid but applicable to a different backend (for example
/dev/vdc is specified but the backend uses /dev/xvdc), the device
name will be converted to the appropriate format.
:raises TooManyDiskDevices: if the maxmimum allowed devices to attach to a
single instance is exceeded.
"""
req_prefix = None
req_letter = None
if device:
try:
req_prefix, req_letter = block_device.match_device(device)
except (TypeError, AttributeError, ValueError):
raise exception.InvalidDevicePath(path=device)
if not root_device_name:
root_device_name = block_device.DEFAULT_ROOT_DEV_NAME
try:
prefix = block_device.match_device(
block_device.prepend_dev(root_device_name))[0]
except (TypeError, AttributeError, ValueError):
raise exception.InvalidDevicePath(path=root_device_name)
if req_prefix != prefix:
LOG.debug("Using %(prefix)s instead of %(req_prefix)s",
{'prefix': prefix, 'req_prefix': req_prefix})
used_letters = set()
for device_path in device_name_list:
letter = block_device.get_device_letter(device_path)
used_letters.add(letter)
check_max_disk_devices_to_attach(len(used_letters) + 1)
if not req_letter:
req_letter = _get_unused_letter(used_letters)
if req_letter in used_letters:
raise exception.DevicePathInUse(path=device)
return prefix + req_letter
def get_root_bdm(context, instance, bdms=None):
if bdms is None:
if isinstance(instance, objects.Instance):
uuid = instance.uuid
else:
uuid = instance['uuid']
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, uuid)
return bdms.root_bdm()
def is_volume_backed_instance(context, instance, bdms=None):
root_bdm = get_root_bdm(context, instance, bdms)
if root_bdm is not None:
return root_bdm.is_volume
# in case we hit a very old instance without root bdm, we _assume_ that
# instance is backed by a volume, if and only if image_ref is not set
if isinstance(instance, objects.Instance):
return not instance.image_ref
return not instance['image_ref']
def heal_reqspec_is_bfv(ctxt, request_spec, instance):
"""Calculates the is_bfv flag for a RequestSpec created before Rocky.
Starting in Rocky, new instances have their RequestSpec created with
the "is_bfv" flag to indicate if they are volume-backed which is used
by the scheduler when determining root disk resource allocations.
RequestSpecs created before Rocky will not have the is_bfv flag set
so we need to calculate it here and update the RequestSpec.
:param ctxt: nova.context.RequestContext auth context
:param request_spec: nova.objects.RequestSpec used for scheduling
:param instance: nova.objects.Instance being scheduled
"""
if 'is_bfv' in request_spec:
return
# Determine if this is a volume-backed instance and set the field
# in the request spec accordingly.
request_spec.is_bfv = is_volume_backed_instance(ctxt, instance)
request_spec.save()
def convert_mb_to_ceil_gb(mb_value):
gb_int = 0
if mb_value:
gb_float = mb_value / 1024.0
# ensure we reserve/allocate enough space by rounding up to nearest GB
gb_int = int(math.ceil(gb_float))
return gb_int
def _get_unused_letter(used_letters):
# Return the first unused device letter
index = 0
while True:
letter = block_device.generate_device_letter(index)
if letter not in used_letters:
return letter
index += 1
def get_value_from_system_metadata(instance, key, type, default):
"""Get a value of a specified type from image metadata.
@param instance: The instance object
@param key: The name of the property to get
@param type: The python type the value is be returned as
@param default: The value to return if key is not set or not the right type
"""
value = instance.system_metadata.get(key, default)
try:
return type(value)
except ValueError:
LOG.warning("Metadata value %(value)s for %(key)s is not of "
"type %(type)s. Using default value %(default)s.",
{'value': value, 'key': key, 'type': type,
'default': default}, instance=instance)
return default
def notify_usage_exists(notifier, context, instance_ref, host,
current_period=False, ignore_missing_network_data=True,
system_metadata=None, extra_usage_info=None):
"""Generates 'exists' unversioned legacy and transformed notification
for an instance for usage auditing purposes.
:param notifier: a messaging.Notifier
:param context: request context for the current operation
:param instance_ref: nova.objects.Instance object from which to report
usage
:param host: the host emitting the notification
:param current_period: if True, this will generate a usage for the
current usage period; if False, this will generate a usage for the
previous audit period.
:param ignore_missing_network_data: if True, log any exceptions generated
while getting network info; if False, raise the exception.
:param system_metadata: system_metadata override for the instance. If
None, the instance_ref.system_metadata will be used.
:param extra_usage_info: Dictionary containing extra values to add or
override in the notification if not None.
"""
audit_start, audit_end = notifications.audit_period_bounds(current_period)
if system_metadata is None:
system_metadata = utils.instance_sys_meta(instance_ref)
# add image metadata to the notification:
image_meta = notifications.image_meta(system_metadata)
extra_info = dict(audit_period_beginning=str(audit_start),
audit_period_ending=str(audit_end),
image_meta=image_meta)
if extra_usage_info:
extra_info.update(extra_usage_info)
notify_about_instance_usage(notifier, context, instance_ref, 'exists',
extra_usage_info=extra_info)
audit_period = instance_notification.AuditPeriodPayload(
audit_period_beginning=audit_start,
audit_period_ending=audit_end,
)
payload = instance_notification.InstanceExistsPayload(
context=context,
instance=instance_ref,
audit_period=audit_period,
)
notification = instance_notification.InstanceExistsNotification(
context=context,
priority=fields.NotificationPriority.INFO,
publisher=notification_base.NotificationPublisher(
host=host, source=fields.NotificationSource.COMPUTE,
),
event_type=notification_base.EventType(
object='instance',
action=fields.NotificationAction.EXISTS,
),
payload=payload,
)
notification.emit(context)
def notify_about_instance_usage(notifier, context, instance, event_suffix,
network_info=None, extra_usage_info=None,
fault=None):
"""Send an unversioned legacy notification about an instance.
All new notifications should use notify_about_instance_action which sends
a versioned notification.
:param notifier: a messaging.Notifier
:param event_suffix: Event type like "delete.start" or "exists"
:param network_info: Networking information, if provided.
:param extra_usage_info: Dictionary containing extra values to add or
override in the notification.
"""
if not extra_usage_info:
extra_usage_info = {}
usage_info = notifications.info_from_instance(context, instance,
network_info, populate_image_ref_url=True, **extra_usage_info)
if fault:
# NOTE(johngarbutt) mirrors the format in wrap_exception
fault_payload = exception_to_dict(fault)
LOG.debug(fault_payload["message"], instance=instance)
usage_info.update(fault_payload)
if event_suffix.endswith("error"):
method = notifier.error
else:
method = notifier.info
method(context, 'compute.instance.%s' % event_suffix, usage_info)
def _get_fault_and_priority_from_exception(exception: Exception):
fault = None
priority = fields.NotificationPriority.INFO
if not exception:
return fault, priority
fault = notification_exception.ExceptionPayload.from_exception(exception)
priority = fields.NotificationPriority.ERROR
return fault, priority
@rpc.if_notifications_enabled
def notify_about_instance_action(context, instance, host, action, phase=None,
source=fields.NotificationSource.COMPUTE,
exception=None, bdms=None):
"""Send versioned notification about the action made on the instance
:param instance: the instance which the action performed on
:param host: the host emitting the notification
:param action: the name of the action
:param phase: the phase of the action
:param source: the source of the notification
:param exception: the thrown exception (used in error notifications)
:param bdms: BlockDeviceMappingList object for the instance. If it is not
provided then we will load it from the db if so configured
"""
fault, priority = _get_fault_and_priority_from_exception(exception)
payload = instance_notification.InstanceActionPayload(
context=context,
instance=instance,
fault=fault,
bdms=bdms)
notification = instance_notification.InstanceActionNotification(
context=context,
priority=priority,
publisher=notification_base.NotificationPublisher(
host=host, source=source),
event_type=notification_base.EventType(
object='instance',
action=action,
phase=phase),
payload=payload)
notification.emit(context)
@rpc.if_notifications_enabled
def notify_about_instance_create(context, instance, host, phase=None,
exception=None, bdms=None):
"""Send versioned notification about instance creation
:param context: the request context
:param instance: the instance being created
:param host: the host emitting the notification
:param phase: the phase of the creation
:param exception: the thrown exception (used in error notifications)
:param bdms: BlockDeviceMappingList object for the instance. If it is not
provided then we will load it from the db if so configured
"""
fault, priority = _get_fault_and_priority_from_exception(exception)
payload = instance_notification.InstanceCreatePayload(
context=context,
instance=instance,
fault=fault,
bdms=bdms)
notification = instance_notification.InstanceCreateNotification(
context=context,
priority=priority,
publisher=notification_base.NotificationPublisher(
host=host, source=fields.NotificationSource.COMPUTE),
event_type=notification_base.EventType(
object='instance',
action=fields.NotificationAction.CREATE,
phase=phase),
payload=payload)
notification.emit(context)
@rpc.if_notifications_enabled
def notify_about_scheduler_action(context, request_spec, action, phase=None,
source=fields.NotificationSource.SCHEDULER):
"""Send versioned notification about the action made by the scheduler
:param context: the RequestContext object
:param request_spec: the RequestSpec object
:param action: the name of the action
:param phase: the phase of the action
:param source: the source of the notification
"""
payload = reqspec_notification.RequestSpecPayload(
request_spec=request_spec)
notification = scheduler_notification.SelectDestinationsNotification(
context=context,
priority=fields.NotificationPriority.INFO,
publisher=notification_base.NotificationPublisher(
host=CONF.host, source=source),
event_type=notification_base.EventType(
object='scheduler',
action=action,
phase=phase),
payload=payload)
notification.emit(context)
@rpc.if_notifications_enabled
def notify_about_volume_attach_detach(context, instance, host, action, phase,
volume_id=None, exception=None):
"""Send versioned notification about the action made on the instance
:param instance: the instance which the action performed on
:param host: the host emitting the notification
:param action: the name of the action
:param phase: the phase of the action
:param volume_id: id of the volume will be attached
:param exception: the thrown exception (used in error notifications)
"""
fault, priority = _get_fault_and_priority_from_exception(exception)
payload = instance_notification.InstanceActionVolumePayload(
context=context,
instance=instance,
fault=fault,
volume_id=volume_id)
notification = instance_notification.InstanceActionVolumeNotification(
context=context,
priority=priority,
publisher=notification_base.NotificationPublisher(
host=host, source=fields.NotificationSource.COMPUTE),
event_type=notification_base.EventType(
object='instance',
action=action,
phase=phase),
payload=payload)
notification.emit(context)
@rpc.if_notifications_enabled
def notify_about_instance_rescue_action(context, instance, host,
rescue_image_ref, phase=None,
exception=None):
"""Send versioned notification about the action made on the instance
:param instance: the instance which the action performed on
:param host: the host emitting the notification
:param rescue_image_ref: the rescue image ref
:param phase: the phase of the action
:param exception: the thrown exception (used in error notifications)
"""
fault, priority = _get_fault_and_priority_from_exception(exception)
payload = instance_notification.InstanceActionRescuePayload(
context=context,
instance=instance,
fault=fault,
rescue_image_ref=rescue_image_ref)
notification = instance_notification.InstanceActionRescueNotification(
context=context,
priority=priority,
publisher=notification_base.NotificationPublisher(
host=host, source=fields.NotificationSource.COMPUTE),
event_type=notification_base.EventType(
object='instance',
action=fields.NotificationAction.RESCUE,
phase=phase),
payload=payload)
notification.emit(context)
@rpc.if_notifications_enabled
def notify_about_keypair_action(context, keypair, action, phase):
"""Send versioned notification about the keypair action on the instance
:param context: the request context
:param keypair: the keypair which the action performed on
:param action: the name of the action
:param phase: the phase of the action
"""
payload = keypair_notification.KeypairPayload(keypair=keypair)
notification = keypair_notification.KeypairNotification(
priority=fields.NotificationPriority.INFO,
publisher=notification_base.NotificationPublisher(
host=CONF.host, source=fields.NotificationSource.API),
event_type=notification_base.EventType(
object='keypair',
action=action,
phase=phase),
payload=payload)
notification.emit(context)
@rpc.if_notifications_enabled
def notify_about_volume_swap(context, instance, host, phase,
old_volume_id, new_volume_id, exception=None):
"""Send versioned notification about the volume swap action
on the instance
:param context: the request context
:param instance: the instance which the action performed on
:param host: the host emitting the notification
:param phase: the phase of the action
:param old_volume_id: the ID of the volume that is copied from and detached
:param new_volume_id: the ID of the volume that is copied to and attached
:param exception: an exception
"""
fault, priority = _get_fault_and_priority_from_exception(exception)
payload = instance_notification.InstanceActionVolumeSwapPayload(
context=context,
instance=instance,
fault=fault,
old_volume_id=old_volume_id,
new_volume_id=new_volume_id)
instance_notification.InstanceActionVolumeSwapNotification(
context=context,
priority=priority,
publisher=notification_base.NotificationPublisher(
host=host, source=fields.NotificationSource.COMPUTE),
event_type=notification_base.EventType(
object='instance',
action=fields.NotificationAction.VOLUME_SWAP,
phase=phase),
payload=payload).emit(context)
@rpc.if_notifications_enabled
def notify_about_instance_snapshot(context, instance, host, phase,
snapshot_image_id):
"""Send versioned notification about the snapshot action executed on the
instance
:param context: the request context
:param instance: the instance from which a snapshot image is being created
:param host: the host emitting the notification
:param phase: the phase of the action
:param snapshot_image_id: the ID of the snapshot
"""
payload = instance_notification.InstanceActionSnapshotPayload(
context=context,
instance=instance,
fault=None,
snapshot_image_id=snapshot_image_id)
instance_notification.InstanceActionSnapshotNotification(
context=context,
priority=fields.NotificationPriority.INFO,
publisher=notification_base.NotificationPublisher(
host=host, source=fields.NotificationSource.COMPUTE),
event_type=notification_base.EventType(
object='instance',
action=fields.NotificationAction.SNAPSHOT,
phase=phase),
payload=payload).emit(context)
@rpc.if_notifications_enabled
def notify_about_resize_prep_instance(context, instance, host, phase,
new_flavor):
"""Send versioned notification about the instance resize action
on the instance
:param context: the request context
:param instance: the instance which the resize action performed on
:param host: the host emitting the notification
:param phase: the phase of the action
:param new_flavor: new flavor
"""
payload = instance_notification.InstanceActionResizePrepPayload(
context=context,
instance=instance,
fault=None,
new_flavor=flavor_notification.FlavorPayload(flavor=new_flavor))
instance_notification.InstanceActionResizePrepNotification(
context=context,
priority=fields.NotificationPriority.INFO,
publisher=notification_base.NotificationPublisher(
host=host, source=fields.NotificationSource.COMPUTE),
event_type=notification_base.EventType(
object='instance',
action=fields.NotificationAction.RESIZE_PREP,
phase=phase),
payload=payload).emit(context)
def notify_about_server_group_update(context, event_suffix, sg_payload):
"""Send a notification about server group update.
:param event_suffix: Event type like "create.start" or "create.end"
:param sg_payload: payload for server group update
"""
notifier = rpc.get_notifier(service='servergroup')
notifier.info(context, 'servergroup.%s' % event_suffix, sg_payload)
def notify_about_aggregate_update(context, event_suffix, aggregate_payload):
"""Send a notification about aggregate update.
:param event_suffix: Event type like "create.start" or "create.end"
:param aggregate_payload: payload for aggregate update
"""
aggregate_identifier = aggregate_payload.get('aggregate_id', None)
if not aggregate_identifier:
aggregate_identifier = aggregate_payload.get('name', None)
if not aggregate_identifier:
LOG.debug("No aggregate id or name specified for this "
"notification and it will be ignored")
return
notifier = rpc.get_notifier(service='aggregate',
host=aggregate_identifier)
notifier.info(context, 'aggregate.%s' % event_suffix, aggregate_payload)
@rpc.if_notifications_enabled
def notify_about_aggregate_action(context, aggregate, action, phase):
payload = aggregate_notification.AggregatePayload(aggregate)
notification = aggregate_notification.AggregateNotification(
priority=fields.NotificationPriority.INFO,
publisher=notification_base.NotificationPublisher(
host=CONF.host, source=fields.NotificationSource.API),
event_type=notification_base.EventType(
object='aggregate',
action=action,
phase=phase),
payload=payload)
notification.emit(context)
@rpc.if_notifications_enabled
def notify_about_aggregate_cache(context, aggregate, host, image_status,
index, total):
"""Send a notification about aggregate cache_images progress.
:param context: The RequestContext
:param aggregate: The target aggregate
:param host: The host within the aggregate for which to report status
:param image_status: The result from the compute host, which is a dict
of {image_id: status}
:param index: An integer indicating progress toward completion, between
1 and $total
:param total: The total number of hosts being processed in this operation,
to bound $index
"""
success_statuses = ('cached', 'existing')
payload = aggregate_notification.AggregateCachePayload(aggregate,
host,
index,
total)
payload.images_cached = []
payload.images_failed = []
for img, status in image_status.items():
if status in success_statuses:
payload.images_cached.append(img)
else:
payload.images_failed.append(img)
notification = aggregate_notification.AggregateCacheNotification(
priority=fields.NotificationPriority.INFO,
publisher=notification_base.NotificationPublisher(
host=CONF.host, source=fields.NotificationSource.CONDUCTOR),
event_type=notification_base.EventType(
object='aggregate',
action=fields.NotificationAction.IMAGE_CACHE,
phase=fields.NotificationPhase.PROGRESS),
payload=payload)
notification.emit(context)
def notify_about_host_update(context, event_suffix, host_payload):
"""Send a notification about host update.
:param event_suffix: Event type like "create.start" or "create.end"
:param host_payload: payload for host update. It is a dict and there
should be at least the 'host_name' key in this
dict.
"""
host_identifier = host_payload.get('host_name')
if not host_identifier:
LOG.warning("No host name specified for the notification of "
"HostAPI.%s and it will be ignored", event_suffix)
return
notifier = rpc.get_notifier(service='api', host=host_identifier)
notifier.info(context, 'HostAPI.%s' % event_suffix, host_payload)
@rpc.if_notifications_enabled
def notify_about_server_group_action(context, group, action):
payload = sg_notification.ServerGroupPayload(group)
notification = sg_notification.ServerGroupNotification(
priority=fields.NotificationPriority.INFO,
publisher=notification_base.NotificationPublisher(
host=CONF.host, source=fields.NotificationSource.API),
event_type=notification_base.EventType(
object='server_group',
action=action),
payload=payload)
notification.emit(context)
@rpc.if_notifications_enabled
def notify_about_server_group_add_member(context, group_id):
group = objects.InstanceGroup.get_by_uuid(context, group_id)
payload = sg_notification.ServerGroupPayload(group)
notification = sg_notification.ServerGroupNotification(
priority=fields.NotificationPriority.INFO,
publisher=notification_base.NotificationPublisher(
host=CONF.host, source=fields.NotificationSource.API),
event_type=notification_base.EventType(
object='server_group',
action=fields.NotificationAction.ADD_MEMBER),
payload=payload)
notification.emit(context)
@rpc.if_notifications_enabled
def notify_about_instance_rebuild(context, instance, host,
action=fields.NotificationAction.REBUILD,
phase=None,
source=fields.NotificationSource.COMPUTE,
exception=None, bdms=None):
"""Send versioned notification about instance rebuild
:param instance: the instance which the action performed on
:param host: the host emitting the notification
:param action: the name of the action
:param phase: the phase of the action
:param source: the source of the notification
:param exception: the thrown exception (used in error notifications)
:param bdms: BlockDeviceMappingList object for the instance. If it is not
provided then we will load it from the db if so configured
"""
fault, priority = _get_fault_and_priority_from_exception(exception)
payload = instance_notification.InstanceActionRebuildPayload(
context=context,
instance=instance,
fault=fault,
bdms=bdms)
notification = instance_notification.InstanceActionRebuildNotification(
context=context,
priority=priority,
publisher=notification_base.NotificationPublisher(
host=host, source=source),
event_type=notification_base.EventType(
object='instance',
action=action,
phase=phase),
payload=payload)
notification.emit(context)
@rpc.if_notifications_enabled
def notify_about_metrics_update(context, host, host_ip, nodename,
monitor_metric_list):
"""Send versioned notification about updating metrics
:param context: the request context
:param host: the host emitting the notification
:param host_ip: the IP address of the host
:param nodename: the node name
:param monitor_metric_list: the MonitorMetricList object
"""
payload = metrics_notification.MetricsPayload(
host=host,
host_ip=host_ip,
nodename=nodename,
monitor_metric_list=monitor_metric_list)
notification = metrics_notification.MetricsNotification(
context=context,
priority=fields.NotificationPriority.INFO,
publisher=notification_base.NotificationPublisher(
host=host, source=fields.NotificationSource.COMPUTE),
event_type=notification_base.EventType(
object='metrics',
action=fields.NotificationAction.UPDATE),
payload=payload)
notification.emit(context)
@rpc.if_notifications_enabled
def notify_about_libvirt_connect_error(context, ip, exception):
"""Send a versioned notification about libvirt connect error.
:param context: the request context
:param ip: the IP address of the host
:param exception: the thrown exception
"""
fault, _ = _get_fault_and_priority_from_exception(exception)
payload = libvirt_notification.LibvirtErrorPayload(ip=ip, reason=fault)
notification = libvirt_notification.LibvirtErrorNotification(
priority=fields.NotificationPriority.ERROR,
publisher=notification_base.NotificationPublisher(
host=CONF.host, source=fields.NotificationSource.COMPUTE),
event_type=notification_base.EventType(
object='libvirt',
action=fields.NotificationAction.CONNECT,
phase=fields.NotificationPhase.ERROR),
payload=payload)
notification.emit(context)
@rpc.if_notifications_enabled
def notify_about_volume_usage(context, vol_usage, host):
"""Send versioned notification about the volume usage
:param context: the request context
:param vol_usage: the volume usage object
:param host: the host emitting the notification
"""
payload = volume_notification.VolumeUsagePayload(
vol_usage=vol_usage)
notification = volume_notification.VolumeUsageNotification(
context=context,
priority=fields.NotificationPriority.INFO,
publisher=notification_base.NotificationPublisher(
host=host, source=fields.NotificationSource.COMPUTE),
event_type=notification_base.EventType(
object='volume',
action=fields.NotificationAction.USAGE),
payload=payload)
notification.emit(context)
@rpc.if_notifications_enabled
def notify_about_compute_task_error(context, action, instance_uuid,
request_spec, state, exception):
"""Send a versioned notification about compute task error.
:param context: the request context
:param action: the name of the action
:param instance_uuid: the UUID of the instance
:param request_spec: the request spec object or
the dict includes request spec information
:param state: the vm state of the instance
:param exception: the thrown exception
:param tb: the traceback
"""
if (request_spec is not None and
not isinstance(request_spec, objects.RequestSpec)):
request_spec = objects.RequestSpec.from_primitives(
context, request_spec, {})
fault, _ = _get_fault_and_priority_from_exception(exception)
payload = task_notification.ComputeTaskPayload(
instance_uuid=instance_uuid, request_spec=request_spec, state=state,
reason=fault)
notification = task_notification.ComputeTaskNotification(
priority=fields.NotificationPriority.ERROR,
publisher=notification_base.NotificationPublisher(
host=CONF.host, source=fields.NotificationSource.CONDUCTOR),
event_type=notification_base.EventType(
object='compute_task',
action=action,
phase=fields.NotificationPhase.ERROR),
payload=payload)
notification.emit(context)
def refresh_info_cache_for_instance(context, instance):
"""Refresh the info cache for an instance.
:param instance: The instance object.
"""
if instance.info_cache is not None and not instance.deleted:
# Catch the exception in case the instance got deleted after the check
# instance.deleted was executed
try:
instance.info_cache.refresh()
except exception.InstanceInfoCacheNotFound:
LOG.debug("Can not refresh info_cache because instance "
"was not found", instance=instance)
def get_reboot_type(task_state, current_power_state):
"""Checks if the current instance state requires a HARD reboot."""
if current_power_state != power_state.RUNNING:
return 'HARD'
if task_state in task_states.soft_reboot_states:
return 'SOFT'
return 'HARD'
def get_machine_ips():
"""Get the machine's ip addresses
:returns: list of Strings of ip addresses
"""
addresses = []
for interface in netifaces.interfaces():
try:
iface_data = netifaces.ifaddresses(interface)
for family in iface_data:
if family not in (netifaces.AF_INET, netifaces.AF_INET6):
continue
for address in iface_data[family]:
addr = address['addr']
# If we have an ipv6 address remove the
# %ether_interface at the end
if family == netifaces.AF_INET6:
addr = addr.split('%')[0]
addresses.append(addr)
except ValueError:
pass
return addresses
def upsize_quota_delta(new_flavor, old_flavor):
"""Calculate deltas required to adjust quota for an instance upsize.
:param new_flavor: the target instance type
:param old_flavor: the original instance type
"""
def _quota_delta(resource):
return (new_flavor[resource] - old_flavor[resource])
deltas = {}
if _quota_delta('vcpus') > 0:
deltas['cores'] = _quota_delta('vcpus')
if _quota_delta('memory_mb') > 0:
deltas['ram'] = _quota_delta('memory_mb')
return deltas
def get_headroom(quotas, usages, deltas):
headroom = {res: quotas[res] - usages[res]
for res in quotas.keys()}
# If quota_cores is unlimited [-1]:
# - set cores headroom based on instances headroom:
if quotas.get('cores') == -1:
if deltas.get('cores'):
hc = headroom.get('instances', 1) * deltas['cores']
headroom['cores'] = hc / deltas.get('instances', 1)
else:
headroom['cores'] = headroom.get('instances', 1)
# If quota_ram is unlimited [-1]:
# - set ram headroom based on instances headroom:
if quotas.get('ram') == -1:
if deltas.get('ram'):
hr = headroom.get('instances', 1) * deltas['ram']
headroom['ram'] = hr / deltas.get('instances', 1)
else:
headroom['ram'] = headroom.get('instances', 1)
return headroom
def check_num_instances_quota(
context, flavor, min_count, max_count, project_id=None, user_id=None,
orig_num_req=None,
):
"""Enforce quota limits on number of instances created."""
# project_id is also used for the TooManyInstances error message
if project_id is None:
project_id = context.project_id
if user_id is None:
user_id = context.user_id
# Check whether we need to count resources per-user and check a per-user
# quota limit. If we have no per-user quota limit defined for a
# project/user, we can avoid wasteful resource counting.
user_quotas = objects.Quotas.get_all_by_project_and_user(
context, project_id, user_id)
if not any(r in user_quotas for r in ['instances', 'cores', 'ram']):
user_id = None
# Determine requested cores and ram
req_cores = max_count * flavor.vcpus
req_ram = max_count * flavor.memory_mb
deltas = {'instances': max_count, 'cores': req_cores, 'ram': req_ram}
try:
# NOTE(johngarbutt) when using unified limits, this is call
# is a no op, and as such, this function always returns max_count
objects.Quotas.check_deltas(context, deltas,
project_id, user_id=user_id,
check_project_id=project_id,
check_user_id=user_id)
except exception.OverQuota as exc:
quotas = exc.kwargs['quotas']
overs = exc.kwargs['overs']
usages = exc.kwargs['usages']
# This is for the recheck quota case where we used a delta of zero.
if min_count == max_count == 0:
# orig_num_req is the original number of instances requested in the
# case of a recheck quota, for use in the over quota exception.
req_cores = orig_num_req * flavor.vcpus
req_ram = orig_num_req * flavor.memory_mb
requested = {'instances': orig_num_req, 'cores': req_cores,
'ram': req_ram}
(overs, reqs, total_alloweds, useds) = get_over_quota_detail(
deltas, overs, quotas, requested)
msg = "Cannot run any more instances of this type."
params = {'overs': overs, 'pid': project_id, 'msg': msg}
LOG.debug("%(overs)s quota exceeded for %(pid)s. %(msg)s",
params)
raise exception.TooManyInstances(overs=overs,
req=reqs,
used=useds,
allowed=total_alloweds)
# OK, we exceeded quota; let's figure out why...
headroom = get_headroom(quotas, usages, deltas)
allowed = headroom.get('instances', 1)
# Reduce 'allowed' instances in line with the cores & ram headroom
if flavor.vcpus:
allowed = min(allowed, headroom['cores'] // flavor.vcpus)
if flavor.memory_mb:
allowed = min(allowed, headroom['ram'] // flavor.memory_mb)
# Convert to the appropriate exception message
if allowed <= 0:
msg = "Cannot run any more instances of this type."
elif min_count <= allowed <= max_count:
# We're actually OK, but still need to check against allowed
return check_num_instances_quota(
context, flavor, min_count, allowed, project_id=project_id,
user_id=user_id)
else:
msg = "Can only run %s more instances of this type." % allowed
num_instances = (str(min_count) if min_count == max_count else
"%s-%s" % (min_count, max_count))
requested = dict(instances=num_instances, cores=req_cores,
ram=req_ram)
(overs, reqs, total_alloweds, useds) = get_over_quota_detail(
headroom, overs, quotas, requested)
params = {'overs': overs, 'pid': project_id,
'min_count': min_count, 'max_count': max_count,
'msg': msg}
if min_count == max_count:
LOG.debug("%(overs)s quota exceeded for %(pid)s,"
" tried to run %(min_count)d instances. "
"%(msg)s", params)
else:
LOG.debug("%(overs)s quota exceeded for %(pid)s,"
" tried to run between %(min_count)d and"
" %(max_count)d instances. %(msg)s",
params)
raise exception.TooManyInstances(overs=overs,
req=reqs,
used=useds,
allowed=total_alloweds)
return max_count
def get_over_quota_detail(headroom, overs, quotas, requested):
reqs = []
useds = []
total_alloweds = []
for resource in overs:
reqs.append(str(requested[resource]))
useds.append(str(quotas[resource] - headroom[resource]))
total_alloweds.append(str(quotas[resource]))
(overs, reqs, useds, total_alloweds) = map(', '.join, (
overs, reqs, useds, total_alloweds))
return overs, reqs, total_alloweds, useds
def remove_shelved_keys_from_system_metadata(instance):
# Delete system_metadata for a shelved instance
for key in ['shelved_at', 'shelved_image_id', 'shelved_host']:
if key in instance.system_metadata:
del (instance.system_metadata[key])
def create_image(context, instance, name, image_type, image_api,
extra_properties=None):
"""Create new image entry in the image service. This new image
will be reserved for the compute manager to upload a snapshot
or backup.
:param context: security context
:param instance: nova.objects.instance.Instance object
:param name: string for name of the snapshot
:param image_type: snapshot | backup
:param image_api: instance of nova.image.glance.API
:param extra_properties: dict of extra image properties to include
"""
properties = {
'instance_uuid': instance.uuid,
'user_id': str(context.user_id),
'image_type': image_type,
}
properties.update(extra_properties or {})
image_meta = initialize_instance_snapshot_metadata(
context, instance, name, properties)
# if we're making a snapshot, omit the disk and container formats,
# since the image may have been converted to another format, and the
# original values won't be accurate. The driver will populate these
# with the correct values later, on image upload.
if image_type == 'snapshot':
image_meta.pop('disk_format', None)
image_meta.pop('container_format', None)
return image_api.create(context, image_meta)
def initialize_instance_snapshot_metadata(context, instance, name,
extra_properties=None):
"""Initialize new metadata for a snapshot of the given instance.
:param context: authenticated RequestContext; note that this may not
be the owner of the instance itself, e.g. an admin creates a
snapshot image of some user instance
:param instance: nova.objects.instance.Instance object
:param name: string for name of the snapshot
:param extra_properties: dict of extra metadata properties to include
:returns: the new instance snapshot metadata
"""
image_meta = utils.get_image_from_system_metadata(
instance.system_metadata)
image_meta['name'] = name
# If the user creating the snapshot is not in the same project as
# the owner of the instance, then the image visibility should be
# "shared" so the owner of the instance has access to the image, like
# in the case of an admin creating a snapshot of another user's
# server, either directly via the createImage API or via shelve.
extra_properties = extra_properties or {}
if context.project_id != instance.project_id:
# The glance API client-side code will use this to add the
# instance project as a member of the image for access.
image_meta['visibility'] = 'shared'
extra_properties['instance_owner'] = instance.project_id
# TODO(mriedem): Should owner_project_name and owner_user_name
# be removed from image_meta['properties'] here, or added to
# [DEFAULT]/non_inheritable_image_properties? It is confusing
# otherwise to see the owner project not match those values.
else:
# The request comes from the owner of the instance so make the
# image private.
image_meta['visibility'] = 'private'
# Delete properties that are non-inheritable
properties = image_meta['properties']
keys_to_pop = set(CONF.non_inheritable_image_properties).union(
NON_INHERITABLE_IMAGE_PROPERTIES)
for ns in NON_INHERITABLE_IMAGE_NAMESPACES:
keys_to_pop |= {key for key in properties
if key.startswith(ns)}
for key in keys_to_pop:
properties.pop(key, None)
# The properties in extra_properties have precedence
properties.update(extra_properties)
return image_meta
def delete_image(context, instance, image_api, image_id, log_exc_info=False):
"""Deletes the image if it still exists.
Ignores ImageNotFound if the image is already gone.
:param context: the nova auth request context where the context.project_id
matches the owner of the image
:param instance: the instance for which the snapshot image was created
:param image_api: the image API used to delete the image
:param image_id: the ID of the image to delete
:param log_exc_info: True if this is being called from an exception handler
block and traceback should be logged at DEBUG level, False otherwise.
"""
LOG.debug("Cleaning up image %s", image_id, instance=instance,
log_exc_info=log_exc_info)
try:
image_api.delete(context, image_id)
except exception.ImageNotFound:
# Since we're trying to cleanup an image, we don't care if
# if it's already gone.
pass
except Exception:
LOG.exception("Error while trying to clean up image %s",
image_id, instance=instance)
def may_have_ports_or_volumes(instance):
"""Checks to see if an instance may have ports or volumes based on vm_state
This is primarily only useful when instance.host is None.
:param instance: The nova.objects.Instance in question.
:returns: True if the instance may have ports of volumes, False otherwise
"""
# NOTE(melwitt): When an instance build fails in the compute manager,
# the instance host and node are set to None and the vm_state is set
# to ERROR. In the case, the instance with host = None has actually
# been scheduled and may have ports and/or volumes allocated on the
# compute node.
if instance.vm_state in (vm_states.SHELVED_OFFLOADED, vm_states.ERROR):
return True
return False
def get_stashed_volume_connector(bdm, instance):
"""Lookup a connector dict from the bdm.connection_info if set
Gets the stashed connector dict out of the bdm.connection_info if set
and the connector host matches the instance host.
:param bdm: nova.objects.block_device.BlockDeviceMapping
:param instance: nova.objects.instance.Instance
:returns: volume connector dict or None
"""
if 'connection_info' in bdm and bdm.connection_info is not None:
# NOTE(mriedem): We didn't start stashing the connector in the
# bdm.connection_info until Mitaka so it might not be there on old
# attachments. Also, if the volume was attached when the instance
# was in shelved_offloaded state and it hasn't been unshelved yet
# we don't have the attachment/connection information either.
connector = jsonutils.loads(bdm.connection_info).get('connector')
if connector:
if connector.get('host') == instance.host:
return connector
LOG.debug('Found stashed volume connector for instance but '
'connector host %(connector_host)s does not match '
'the instance host %(instance_host)s.',
{'connector_host': connector.get('host'),
'instance_host': instance.host}, instance=instance)
if (instance.host is None and
may_have_ports_or_volumes(instance)):
LOG.debug('Allowing use of stashed volume connector with '
'instance host None because instance with '
'vm_state %(vm_state)s has been scheduled in '
'the past.', {'vm_state': instance.vm_state},
instance=instance)
return connector
class EventReporter(object):
"""Context manager to report instance action events.
If constructed with ``graceful_exit=True`` the __exit__ function will
handle and not re-raise on InstanceActionNotFound.
"""
def __init__(self, context, event_name, host, *instance_uuids,
graceful_exit=False):
self.context = context
self.event_name = event_name
self.instance_uuids = instance_uuids
self.host = host
self.graceful_exit = graceful_exit
def __enter__(self):
for uuid in self.instance_uuids:
objects.InstanceActionEvent.event_start(
self.context, uuid, self.event_name, want_result=False,
host=self.host)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
for uuid in self.instance_uuids:
try:
objects.InstanceActionEvent.event_finish_with_failure(
self.context, uuid, self.event_name, exc_val=exc_val,
exc_tb=exc_tb, want_result=False)
except exception.InstanceActionNotFound:
# If the instance action was not found then determine if we
# should re-raise based on the graceful_exit attribute.
with excutils.save_and_reraise_exception(
reraise=not self.graceful_exit):
if self.graceful_exit:
return True
return False
def wrap_instance_event(prefix, graceful_exit=False):
"""Wraps a method to log the event taken on the instance, and result.
This decorator wraps a method to log the start and result of an event, as
part of an action taken on an instance.
:param prefix: prefix for the event name, usually a service binary like
"compute" or "conductor" to indicate the origin of the event.
:param graceful_exit: True if the decorator should gracefully handle
InstanceActionNotFound errors, False otherwise. This should rarely be
True.
"""
@utils.expects_func_args('instance')
def helper(function):
@functools.wraps(function)
def decorated_function(self, context, *args, **kwargs):
wrapped_func = safe_utils.get_wrapped_function(function)
keyed_args = inspect.getcallargs(wrapped_func, self, context,
*args, **kwargs)
instance_uuid = keyed_args['instance']['uuid']
event_name = '{0}_{1}'.format(prefix, function.__name__)
host = self.host if hasattr(self, 'host') else None
with EventReporter(context, event_name, host, instance_uuid,
graceful_exit=graceful_exit):
return function(self, context, *args, **kwargs)
return decorated_function
return helper
class UnlimitedSemaphore(object):
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
pass
@property
def balance(self):
return 0
# This semaphore is used to enforce a limit on disk-IO-intensive operations
# (image downloads, image conversions) at any given time.
# It is initialized at ComputeManager.init_host()
disk_ops_semaphore = UnlimitedSemaphore()
@contextlib.contextmanager
def notify_about_instance_delete(notifier, context, instance,
delete_type='delete',
source=fields.NotificationSource.API):
try:
notify_about_instance_usage(notifier, context, instance,
"%s.start" % delete_type)
# Note(gibi): force_delete types will be handled in a
# subsequent patch
if delete_type in ['delete', 'soft_delete']:
notify_about_instance_action(
context,
instance,
host=CONF.host,
source=source,
action=delete_type,
phase=fields.NotificationPhase.START)
yield
finally:
notify_about_instance_usage(notifier, context, instance,
"%s.end" % delete_type)
if delete_type in ['delete', 'soft_delete']:
notify_about_instance_action(
context,
instance,
host=CONF.host,
source=source,
action=delete_type,
phase=fields.NotificationPhase.END)
def update_pci_request_spec_with_allocated_interface_name(
context, report_client, pci_requests, provider_mapping):
"""Update the instance's PCI request based on the request group -
resource provider mapping and the device RP name from placement.
:param context: the request context
:param report_client: a SchedulerReportClient instance
:param pci_requests: A list of InstancePCIRequest objects to be updated
:param provider_mapping: the request group - resource provider mapping
in the form returned by the RequestSpec.get_request_group_mapping()
call.
:raises AmbigousResourceProviderForPCIRequest: if more than one
resource provider provides resource for the given PCI request.
:raises UnexpectResourceProviderNameForPCIRequest: if the resource
provider, which provides resource for the pci request, does not
have a well formatted name so we cannot parse the parent interface
name out of it.
"""
if not pci_requests:
return
def needs_update(pci_request, mapping):
return (pci_request.requester_id and
pci_request.requester_id in mapping)
for pci_request in pci_requests:
if needs_update(pci_request, provider_mapping):
provider_uuids = provider_mapping[pci_request.requester_id]
if len(provider_uuids) != 1:
raise exception.AmbiguousResourceProviderForPCIRequest(
providers=provider_uuids,
requester=pci_request.requester_id)
dev_rp_name = report_client.get_resource_provider_name(
context,
provider_uuids[0])
# NOTE(gibi): the device RP name reported by neutron is
# structured like <hostname>:<agentname>:<interfacename>
rp_name_pieces = dev_rp_name.split(':')
if len(rp_name_pieces) != 3:
ex = exception.UnexpectedResourceProviderNameForPCIRequest
raise ex(
provider=provider_uuids[0],
requester=pci_request.requester_id,
provider_name=dev_rp_name)
for spec in pci_request.spec:
spec['parent_ifname'] = rp_name_pieces[2]
def delete_arqs_if_needed(context, instance, arq_uuids=None):
"""Delete Cyborg ARQs for the instance.
:param context
:param instance: instance who own the args
:param uuids: delete arqs by uuids while did not bind to instance yet.
"""
cyclient = cyborg.get_client(context)
dp_name = instance.flavor.extra_specs.get('accel:device_profile')
if dp_name:
LOG.debug('Calling Cyborg to delete ARQs for instance %(instance)s',
{'instance': instance.uuid})
try:
cyclient.delete_arqs_for_instance(instance.uuid)
except exception.AcceleratorRequestOpFailed as e:
LOG.exception('Failed to delete accelerator requests for '
'instance %s. Exception: %s', instance.uuid, e)
if arq_uuids:
LOG.debug('Calling Cyborg to delete ARQs by uuids %(uuid)s for'
' instance %(instance)s',
{'instance': instance.uuid,
'uuid': arq_uuids})
cyclient.delete_arqs_by_uuid(arq_uuids)
| openstack/nova | nova/compute/utils.py | Python | apache-2.0 | 63,683 |
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import logging
import os
import re
import sys
import bs4
from oslo_config import cfg
import pkg_resources
import prettytable
import requests
PROJECT_ROOT = os.path.abspath(os.path.join(
os.path.dirname(os.path.realpath(__file__)), '..'))
# NOTE(SamYaple): Update the search patch to prefer PROJECT_ROOT as the source
# of packages to import if we are using local tools/build.py
# instead of pip installed kolla-build tool
if PROJECT_ROOT not in sys.path:
sys.path.insert(0, PROJECT_ROOT)
from kolla.common import config as common_config
logging.basicConfig(format="%(message)s")
LOG = logging.getLogger('version-check')
# Filter list for non-projects
NOT_PROJECTS = [
'nova-novncproxy',
'nova-spicehtml5proxy',
'openstack-base',
'profiles'
]
TARBALLS_BASE_URL = 'http://tarballs.openstack.org'
VERSIONS = {'local': dict()}
def retrieve_upstream_versions():
upstream_versions = dict()
for project in VERSIONS['local']:
winner = None
series = VERSIONS['local'][project].split('.')[0]
base = '{}/{}'.format(TARBALLS_BASE_URL, project)
LOG.debug("Getting latest version for project %s from %s",
project, base)
r = requests.get(base)
s = bs4.BeautifulSoup(r.text, 'html.parser')
for link in s.find_all('a'):
version = link.get('href')
if (version.endswith('.tar.gz') and
version.startswith('{}-{}'.format(project, series))):
split = '{}-|.tar.gz'.format(project)
candidate = re.split(split, version)[1]
# Ignore 2014, 2015 versions as they are older
if candidate.startswith('201'):
continue
if not winner or more_recent(candidate, winner):
winner = candidate
if not winner:
LOG.warning("Could not find a version for %s", project)
continue
if '-' in winner:
winner = winner.split('-')[1]
upstream_versions[project] = winner
LOG.debug("Found latest version %s for project %s", winner, project)
VERSIONS['upstream'] = collections.OrderedDict(
sorted(upstream_versions.items()))
def retrieve_local_versions(conf):
for section in common_config.SOURCES:
if section in NOT_PROJECTS:
continue
project = section.split('-')[0]
if section not in conf.list_all_sections():
LOG.debug("Project %s not found in configuration file, using "
"default from kolla.common.config", project)
raw_version = common_config.SOURCES[section]['location']
else:
raw_version = getattr(conf, section).location
version = raw_version.split('/')[-1].split('.tar.gz')[0]
if '-' in version:
version = version.split('-')[1]
LOG.debug("Use local version %s for project %s", version, project)
VERSIONS['local'][project] = version
def more_recent(candidate, reference):
return pkg_resources.parse_version(candidate) > \
pkg_resources.parse_version(reference)
def diff_link(project, old_ref, new_ref):
return "https://github.com/openstack/{}/compare/{}...{}".format(
project, old_ref, new_ref)
def compare_versions():
up_to_date = True
result = prettytable.PrettyTable(["Project", "Current version",
"Latest version", "Comparing changes"])
result.align = "l"
for project in VERSIONS['upstream']:
if project not in VERSIONS['local']:
continue
upstream_version = VERSIONS['upstream'][project]
local_version = VERSIONS['local'][project]
if more_recent(upstream_version, local_version):
result.add_row([
project,
VERSIONS['local'][project],
VERSIONS['upstream'][project],
diff_link(project, local_version, upstream_version)
])
up_to_date = False
if up_to_date:
result = "Everything is up to date"
print(result)
def main():
conf = cfg.ConfigOpts()
common_config.parse(conf, sys.argv[1:], prog='version-check')
if conf.debug:
LOG.setLevel(logging.DEBUG)
retrieve_local_versions(conf)
retrieve_upstream_versions()
compare_versions()
if __name__ == '__main__':
main()
| ealegol/kolla-newton | tools/version-check.py | Python | apache-2.0 | 5,041 |
# Copyright 2012 Big Switch Networks, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Neutron REST Proxy Plug-in for Big Switch and FloodLight Controllers.
NeutronRestProxy provides a generic neutron plugin that translates all plugin
function calls to equivalent authenticated REST calls to a set of redundant
external network controllers. It also keeps persistent store for all neutron
state to allow for re-sync of the external controller(s), if required.
The local state on the plugin also allows for local response and fast-fail
semantics where it can be determined based on the local persistent store.
Network controller specific code is decoupled from this plugin and expected
to reside on the controller itself (via the REST interface).
This allows for:
- independent authentication and redundancy schemes between neutron and the
network controller
- independent upgrade/development cycles between neutron and the controller
as it limits the proxy code upgrade requirement to neutron release cycle
and the controller specific code upgrade requirement to controller code
- ability to sync the controller with neutron for independent recovery/reset
External REST API used by proxy is the same API as defined for neutron (JSON
subset) with some additional parameters (gateway on network-create and macaddr
on port-attach) on an additional PUT to do a bulk dump of all persistent data.
"""
import copy
import functools
import httplib
import re
import eventlet
from oslo.config import cfg
from sqlalchemy.orm import exc as sqlexc
from neutron.agent import securitygroups_rpc as sg_rpc
from neutron.api import extensions as neutron_extensions
from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
from neutron.api.rpc.handlers import dhcp_rpc
from neutron.api.rpc.handlers import securitygroups_rpc
from neutron.common import constants as const
from neutron.common import exceptions
from neutron.common import rpc as n_rpc
from neutron.common import topics
from neutron.common import utils
from neutron import context as qcontext
from neutron.db import agents_db
from neutron.db import agentschedulers_db
from neutron.db import allowedaddresspairs_db as addr_pair_db
from neutron.db import api as db
from neutron.db import db_base_plugin_v2
from neutron.db import external_net_db
from neutron.db import extradhcpopt_db
from neutron.db import l3_db
from neutron.db import models_v2
from neutron.db import securitygroups_db as sg_db
from neutron.db import securitygroups_rpc_base as sg_db_rpc
from neutron.extensions import allowedaddresspairs as addr_pair
from neutron.extensions import external_net
from neutron.extensions import extra_dhcp_opt as edo_ext
from neutron.extensions import portbindings
from neutron import manager
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
from neutron.plugins.bigswitch import config as pl_config
from neutron.plugins.bigswitch.db import porttracker_db
from neutron.plugins.bigswitch import extensions
from neutron.plugins.bigswitch import servermanager
from neutron.plugins.bigswitch import version
from neutron.plugins.common import constants as pconst
LOG = logging.getLogger(__name__)
SYNTAX_ERROR_MESSAGE = _('Syntax error in server config file, aborting plugin')
METADATA_SERVER_IP = '169.254.169.254'
class AgentNotifierApi(n_rpc.RpcProxy,
sg_rpc.SecurityGroupAgentRpcApiMixin):
BASE_RPC_API_VERSION = '1.1'
def __init__(self, topic):
super(AgentNotifierApi, self).__init__(
topic=topic, default_version=self.BASE_RPC_API_VERSION)
self.topic_port_update = topics.get_topic_name(
topic, topics.PORT, topics.UPDATE)
def port_update(self, context, port):
self.fanout_cast(context,
self.make_msg('port_update',
port=port),
topic=self.topic_port_update)
class SecurityGroupServerRpcMixin(sg_db_rpc.SecurityGroupServerRpcMixin):
def get_port_from_device(self, device):
port_id = re.sub(r"^%s" % const.TAP_DEVICE_PREFIX, "", device)
port = self.get_port_and_sgs(port_id)
if port:
port['device'] = device
return port
def get_port_and_sgs(self, port_id):
"""Get port from database with security group info."""
LOG.debug(_("get_port_and_sgs() called for port_id %s"), port_id)
session = db.get_session()
sg_binding_port = sg_db.SecurityGroupPortBinding.port_id
with session.begin(subtransactions=True):
query = session.query(
models_v2.Port,
sg_db.SecurityGroupPortBinding.security_group_id
)
query = query.outerjoin(sg_db.SecurityGroupPortBinding,
models_v2.Port.id == sg_binding_port)
query = query.filter(models_v2.Port.id.startswith(port_id))
port_and_sgs = query.all()
if not port_and_sgs:
return
port = port_and_sgs[0][0]
plugin = manager.NeutronManager.get_plugin()
port_dict = plugin._make_port_dict(port)
port_dict['security_groups'] = [
sg_id for port_, sg_id in port_and_sgs if sg_id]
port_dict['security_group_rules'] = []
port_dict['security_group_source_groups'] = []
port_dict['fixed_ips'] = [ip['ip_address']
for ip in port['fixed_ips']]
return port_dict
class NeutronRestProxyV2Base(db_base_plugin_v2.NeutronDbPluginV2,
external_net_db.External_net_db_mixin):
supported_extension_aliases = ["binding"]
servers = None
@property
def l3_plugin(self):
return manager.NeutronManager.get_service_plugins().get(
pconst.L3_ROUTER_NAT)
def _get_all_data(self, get_ports=True, get_floating_ips=True,
get_routers=True):
admin_context = qcontext.get_admin_context()
networks = []
# this method is used by the ML2 driver so it can't directly invoke
# the self.get_(ports|networks) methods
plugin = manager.NeutronManager.get_plugin()
all_networks = plugin.get_networks(admin_context) or []
for net in all_networks:
mapped_network = self._get_mapped_network_with_subnets(net)
flips_n_ports = mapped_network
if get_floating_ips:
flips_n_ports = self._get_network_with_floatingips(
mapped_network)
if get_ports:
ports = []
net_filter = {'network_id': [net.get('id')]}
net_ports = plugin.get_ports(admin_context,
filters=net_filter) or []
for port in net_ports:
mapped_port = self._map_state_and_status(port)
mapped_port['attachment'] = {
'id': port.get('device_id'),
'mac': port.get('mac_address'),
}
mapped_port = self._extend_port_dict_binding(admin_context,
mapped_port)
ports.append(mapped_port)
flips_n_ports['ports'] = ports
if flips_n_ports:
networks.append(flips_n_ports)
data = {'networks': networks}
if get_routers and self.l3_plugin:
routers = []
all_routers = self.l3_plugin.get_routers(admin_context) or []
for router in all_routers:
interfaces = []
mapped_router = self._map_state_and_status(router)
router_filter = {
'device_owner': [const.DEVICE_OWNER_ROUTER_INTF],
'device_id': [router.get('id')]
}
router_ports = self.get_ports(admin_context,
filters=router_filter) or []
for port in router_ports:
net_id = port.get('network_id')
subnet_id = port['fixed_ips'][0]['subnet_id']
intf_details = self._get_router_intf_details(admin_context,
net_id,
subnet_id)
interfaces.append(intf_details)
mapped_router['interfaces'] = interfaces
routers.append(mapped_router)
data.update({'routers': routers})
return data
def _send_all_data(self, send_ports=True, send_floating_ips=True,
send_routers=True, timeout=None,
triggered_by_tenant=None):
"""Pushes all data to network ctrl (networks/ports, ports/attachments).
This gives the controller an option to re-sync it's persistent store
with neutron's current view of that data.
"""
data = self._get_all_data(send_ports, send_floating_ips, send_routers)
data['triggered_by_tenant'] = triggered_by_tenant
errstr = _("Unable to update remote topology: %s")
return self.servers.rest_action('PUT', servermanager.TOPOLOGY_PATH,
data, errstr, timeout=timeout)
def _get_network_with_floatingips(self, network, context=None):
if context is None:
context = qcontext.get_admin_context()
net_id = network['id']
net_filter = {'floating_network_id': [net_id]}
if self.l3_plugin:
fl_ips = self.l3_plugin.get_floatingips(context,
filters=net_filter) or []
network['floatingips'] = fl_ips
return network
def _get_all_subnets_json_for_network(self, net_id, context=None):
if context is None:
context = qcontext.get_admin_context()
# start a sub-transaction to avoid breaking parent transactions
with context.session.begin(subtransactions=True):
subnets = self._get_subnets_by_network(context,
net_id)
subnets_details = []
if subnets:
for subnet in subnets:
subnet_dict = self._make_subnet_dict(subnet)
mapped_subnet = self._map_state_and_status(subnet_dict)
subnets_details.append(mapped_subnet)
return subnets_details
def _get_mapped_network_with_subnets(self, network, context=None):
# if context is not provided, admin context is used
if context is None:
context = qcontext.get_admin_context()
network = self._map_state_and_status(network)
subnets = self._get_all_subnets_json_for_network(network['id'],
context)
network['subnets'] = subnets
for subnet in (subnets or []):
if subnet['gateway_ip']:
# FIX: For backward compatibility with wire protocol
network['gateway'] = subnet['gateway_ip']
break
else:
network['gateway'] = ''
network[external_net.EXTERNAL] = self._network_is_external(
context, network['id'])
# include ML2 segmentation types
network['segmentation_types'] = getattr(self, "segmentation_types", "")
return network
def _send_create_network(self, network, context=None):
tenant_id = network['tenant_id']
mapped_network = self._get_mapped_network_with_subnets(network,
context)
self.servers.rest_create_network(tenant_id, mapped_network)
def _send_update_network(self, network, context=None):
net_id = network['id']
tenant_id = network['tenant_id']
mapped_network = self._get_mapped_network_with_subnets(network,
context)
net_fl_ips = self._get_network_with_floatingips(mapped_network,
context)
self.servers.rest_update_network(tenant_id, net_id, net_fl_ips)
def _send_delete_network(self, network, context=None):
net_id = network['id']
tenant_id = network['tenant_id']
self.servers.rest_delete_network(tenant_id, net_id)
def _map_state_and_status(self, resource):
resource = copy.copy(resource)
resource['state'] = ('UP' if resource.pop('admin_state_up',
True) else 'DOWN')
resource.pop('status', None)
return resource
def _warn_on_state_status(self, resource):
if resource.get('admin_state_up', True) is False:
LOG.warning(_("Setting admin_state_up=False is not supported "
"in this plugin version. Ignoring setting for "
"resource: %s"), resource)
if 'status' in resource:
if resource['status'] != const.NET_STATUS_ACTIVE:
LOG.warning(_("Operational status is internally set by the "
"plugin. Ignoring setting status=%s."),
resource['status'])
def _get_router_intf_details(self, context, intf_id, subnet_id):
# we will use the network id as interface's id
net_id = intf_id
network = self.get_network(context, net_id)
subnet = self.get_subnet(context, subnet_id)
mapped_network = self._get_mapped_network_with_subnets(network)
mapped_subnet = self._map_state_and_status(subnet)
data = {
'id': intf_id,
"network": mapped_network,
"subnet": mapped_subnet
}
return data
def _extend_port_dict_binding(self, context, port):
cfg_vif_type = cfg.CONF.NOVA.vif_type.lower()
if not cfg_vif_type in (portbindings.VIF_TYPE_OVS,
portbindings.VIF_TYPE_IVS):
LOG.warning(_("Unrecognized vif_type in configuration "
"[%s]. Defaulting to ovs."),
cfg_vif_type)
cfg_vif_type = portbindings.VIF_TYPE_OVS
# In ML2, the host_id is already populated
if portbindings.HOST_ID in port:
hostid = port[portbindings.HOST_ID]
elif 'id' in port:
hostid = porttracker_db.get_port_hostid(context, port['id'])
else:
hostid = None
if hostid:
port[portbindings.HOST_ID] = hostid
override = self._check_hostvif_override(hostid)
if override:
cfg_vif_type = override
port[portbindings.VIF_TYPE] = cfg_vif_type
port[portbindings.VIF_DETAILS] = {
# TODO(rkukura): Replace with new VIF security details
portbindings.CAP_PORT_FILTER:
'security-group' in self.supported_extension_aliases,
portbindings.OVS_HYBRID_PLUG: True
}
return port
def _check_hostvif_override(self, hostid):
for v in cfg.CONF.NOVA.vif_types:
if hostid in getattr(cfg.CONF.NOVA, "node_override_vif_" + v, []):
return v
return False
def _get_port_net_tenantid(self, context, port):
net = super(NeutronRestProxyV2Base,
self).get_network(context, port["network_id"])
return net['tenant_id']
def async_port_create(self, tenant_id, net_id, port):
try:
self.servers.rest_create_port(tenant_id, net_id, port)
except servermanager.RemoteRestError as e:
# 404 should never be received on a port create unless
# there are inconsistencies between the data in neutron
# and the data in the backend.
# Run a sync to get it consistent.
if (cfg.CONF.RESTPROXY.auto_sync_on_failure and
e.status == httplib.NOT_FOUND and
servermanager.NXNETWORK in e.reason):
LOG.error(_("Iconsistency with backend controller "
"triggering full synchronization."))
# args depend on if we are operating in ML2 driver
# or as the full plugin
topoargs = self.servers.get_topo_function_args
self._send_all_data(
send_ports=topoargs['get_ports'],
send_floating_ips=topoargs['get_floating_ips'],
send_routers=topoargs['get_routers'],
triggered_by_tenant=tenant_id
)
# If the full sync worked, the port will be created
# on the controller so it can be safely marked as active
else:
# Any errors that don't result in a successful auto-sync
# require that the port be placed into the error state.
LOG.error(
_("NeutronRestProxyV2: Unable to create port: %s"), e)
try:
self._set_port_status(port['id'], const.PORT_STATUS_ERROR)
except exceptions.PortNotFound:
# If port is already gone from DB and there was an error
# creating on the backend, everything is already consistent
pass
return
new_status = (const.PORT_STATUS_ACTIVE if port['state'] == 'UP'
else const.PORT_STATUS_DOWN)
try:
self._set_port_status(port['id'], new_status)
except exceptions.PortNotFound:
# This port was deleted before the create made it to the controller
# so it now needs to be deleted since the normal delete request
# would have deleted an non-existent port.
self.servers.rest_delete_port(tenant_id, net_id, port['id'])
# NOTE(kevinbenton): workaround for eventlet/mysql deadlock
@utils.synchronized('bsn-port-barrier')
def _set_port_status(self, port_id, status):
session = db.get_session()
try:
port = session.query(models_v2.Port).filter_by(id=port_id).one()
port['status'] = status
session.flush()
except sqlexc.NoResultFound:
raise exceptions.PortNotFound(port_id=port_id)
def put_context_in_serverpool(f):
@functools.wraps(f)
def wrapper(self, context, *args, **kwargs):
# core plugin: context is top level object
# ml2: keeps context in _plugin_context
self.servers.set_context(getattr(context, '_plugin_context', context))
return f(self, context, *args, **kwargs)
return wrapper
class NeutronRestProxyV2(NeutronRestProxyV2Base,
addr_pair_db.AllowedAddressPairsMixin,
extradhcpopt_db.ExtraDhcpOptMixin,
agentschedulers_db.DhcpAgentSchedulerDbMixin,
SecurityGroupServerRpcMixin):
_supported_extension_aliases = ["external-net", "binding",
"extra_dhcp_opt", "quotas",
"dhcp_agent_scheduler", "agent",
"security-group", "allowed-address-pairs"]
@property
def supported_extension_aliases(self):
if not hasattr(self, '_aliases'):
aliases = self._supported_extension_aliases[:]
sg_rpc.disable_security_group_extension_by_config(aliases)
self._aliases = aliases
return self._aliases
def __init__(self):
super(NeutronRestProxyV2, self).__init__()
LOG.info(_('NeutronRestProxy: Starting plugin. Version=%s'),
version.version_string_with_vcs())
pl_config.register_config()
self.evpool = eventlet.GreenPool(cfg.CONF.RESTPROXY.thread_pool_size)
# Include the Big Switch Extensions path in the api_extensions
neutron_extensions.append_api_extensions_path(extensions.__path__)
self.add_meta_server_route = cfg.CONF.RESTPROXY.add_meta_server_route
# init network ctrl connections
self.servers = servermanager.ServerPool()
self.servers.get_topo_function = self._get_all_data
self.servers.get_topo_function_args = {'get_ports': True,
'get_floating_ips': True,
'get_routers': True}
self.network_scheduler = importutils.import_object(
cfg.CONF.network_scheduler_driver
)
# setup rpc for security and DHCP agents
self._setup_rpc()
if cfg.CONF.RESTPROXY.sync_data:
self._send_all_data()
LOG.debug(_("NeutronRestProxyV2: initialization done"))
def _setup_rpc(self):
self.conn = n_rpc.create_connection(new=True)
self.topic = topics.PLUGIN
self.notifier = AgentNotifierApi(topics.AGENT)
# init dhcp agent support
self._dhcp_agent_notifier = dhcp_rpc_agent_api.DhcpAgentNotifyAPI()
self.agent_notifiers[const.AGENT_TYPE_DHCP] = (
self._dhcp_agent_notifier
)
self.endpoints = [securitygroups_rpc.SecurityGroupServerRpcCallback(),
dhcp_rpc.DhcpRpcCallback(),
agents_db.AgentExtRpcCallback()]
self.conn.create_consumer(self.topic, self.endpoints,
fanout=False)
# Consume from all consumers in threads
self.conn.consume_in_threads()
@put_context_in_serverpool
def create_network(self, context, network):
"""Create a network.
Network represents an L2 network segment which can have a set of
subnets and ports associated with it.
:param context: neutron api request context
:param network: dictionary describing the network
:returns: a sequence of mappings with the following signature:
{
"id": UUID representing the network.
"name": Human-readable name identifying the network.
"tenant_id": Owner of network. NOTE: only admin user can specify
a tenant_id other than its own.
"admin_state_up": Sets admin state of network.
if down, network does not forward packets.
"status": Indicates whether network is currently operational
(values are "ACTIVE", "DOWN", "BUILD", and "ERROR")
"subnets": Subnets associated with this network.
}
:raises: RemoteRestError
"""
LOG.debug(_("NeutronRestProxyV2: create_network() called"))
self._warn_on_state_status(network['network'])
with context.session.begin(subtransactions=True):
self._ensure_default_security_group(
context,
network['network']["tenant_id"]
)
# create network in DB
new_net = super(NeutronRestProxyV2, self).create_network(context,
network)
self._process_l3_create(context, new_net, network['network'])
# create network on the network controller
self._send_create_network(new_net, context)
# return created network
return new_net
@put_context_in_serverpool
def update_network(self, context, net_id, network):
"""Updates the properties of a particular Virtual Network.
:param context: neutron api request context
:param net_id: uuid of the network to update
:param network: dictionary describing the updates
:returns: a sequence of mappings with the following signature:
{
"id": UUID representing the network.
"name": Human-readable name identifying the network.
"tenant_id": Owner of network. NOTE: only admin user can
specify a tenant_id other than its own.
"admin_state_up": Sets admin state of network.
if down, network does not forward packets.
"status": Indicates whether network is currently operational
(values are "ACTIVE", "DOWN", "BUILD", and "ERROR")
"subnets": Subnets associated with this network.
}
:raises: exceptions.NetworkNotFound
:raises: RemoteRestError
"""
LOG.debug(_("NeutronRestProxyV2.update_network() called"))
self._warn_on_state_status(network['network'])
session = context.session
with session.begin(subtransactions=True):
new_net = super(NeutronRestProxyV2, self).update_network(
context, net_id, network)
self._process_l3_update(context, new_net, network['network'])
# update network on network controller
self._send_update_network(new_net, context)
return new_net
# NOTE(kevinbenton): workaround for eventlet/mysql deadlock
@utils.synchronized('bsn-port-barrier')
@put_context_in_serverpool
def delete_network(self, context, net_id):
"""Delete a network.
:param context: neutron api request context
:param id: UUID representing the network to delete.
:returns: None
:raises: exceptions.NetworkInUse
:raises: exceptions.NetworkNotFound
:raises: RemoteRestError
"""
LOG.debug(_("NeutronRestProxyV2: delete_network() called"))
# Validate args
orig_net = super(NeutronRestProxyV2, self).get_network(context, net_id)
with context.session.begin(subtransactions=True):
self._process_l3_delete(context, net_id)
ret_val = super(NeutronRestProxyV2, self).delete_network(context,
net_id)
self._send_delete_network(orig_net, context)
return ret_val
@put_context_in_serverpool
def create_port(self, context, port):
"""Create a port, which is a connection point of a device
(e.g., a VM NIC) to attach an L2 Neutron network.
:param context: neutron api request context
:param port: dictionary describing the port
:returns:
{
"id": uuid representing the port.
"network_id": uuid of network.
"tenant_id": tenant_id
"mac_address": mac address to use on this port.
"admin_state_up": Sets admin state of port. if down, port
does not forward packets.
"status": dicates whether port is currently operational
(limit values to "ACTIVE", "DOWN", "BUILD", and "ERROR")
"fixed_ips": list of subnet IDs and IP addresses to be used on
this port
"device_id": identifies the device (e.g., virtual server) using
this port.
}
:raises: exceptions.NetworkNotFound
:raises: exceptions.StateInvalid
:raises: RemoteRestError
"""
LOG.debug(_("NeutronRestProxyV2: create_port() called"))
# Update DB in new session so exceptions rollback changes
with context.session.begin(subtransactions=True):
self._ensure_default_security_group_on_port(context, port)
sgids = self._get_security_groups_on_port(context, port)
# non-router port status is set to pending. it is then updated
# after the async rest call completes. router ports are synchronous
if port['port']['device_owner'] == l3_db.DEVICE_OWNER_ROUTER_INTF:
port['port']['status'] = const.PORT_STATUS_ACTIVE
else:
port['port']['status'] = const.PORT_STATUS_BUILD
dhcp_opts = port['port'].get(edo_ext.EXTRADHCPOPTS, [])
new_port = super(NeutronRestProxyV2, self).create_port(context,
port)
self._process_port_create_security_group(context, new_port, sgids)
if (portbindings.HOST_ID in port['port']
and 'id' in new_port):
host_id = port['port'][portbindings.HOST_ID]
porttracker_db.put_port_hostid(context, new_port['id'],
host_id)
new_port[addr_pair.ADDRESS_PAIRS] = (
self._process_create_allowed_address_pairs(
context, new_port,
port['port'].get(addr_pair.ADDRESS_PAIRS)))
self._process_port_create_extra_dhcp_opts(context, new_port,
dhcp_opts)
new_port = self._extend_port_dict_binding(context, new_port)
net = super(NeutronRestProxyV2,
self).get_network(context, new_port["network_id"])
if self.add_meta_server_route:
if new_port['device_owner'] == const.DEVICE_OWNER_DHCP:
destination = METADATA_SERVER_IP + '/32'
self._add_host_route(context, destination, new_port)
# create on network ctrl
mapped_port = self._map_state_and_status(new_port)
# ports have to be created synchronously when creating a router
# port since adding router interfaces is a multi-call process
if mapped_port['device_owner'] == l3_db.DEVICE_OWNER_ROUTER_INTF:
self.servers.rest_create_port(net["tenant_id"],
new_port["network_id"],
mapped_port)
else:
self.evpool.spawn_n(self.async_port_create, net["tenant_id"],
new_port["network_id"], mapped_port)
self.notify_security_groups_member_updated(context, new_port)
return new_port
def get_port(self, context, id, fields=None):
with context.session.begin(subtransactions=True):
port = super(NeutronRestProxyV2, self).get_port(context, id,
fields)
self._extend_port_dict_binding(context, port)
return self._fields(port, fields)
def get_ports(self, context, filters=None, fields=None):
with context.session.begin(subtransactions=True):
ports = super(NeutronRestProxyV2, self).get_ports(context, filters,
fields)
for port in ports:
self._extend_port_dict_binding(context, port)
return [self._fields(port, fields) for port in ports]
@put_context_in_serverpool
def update_port(self, context, port_id, port):
"""Update values of a port.
:param context: neutron api request context
:param id: UUID representing the port to update.
:param port: dictionary with keys indicating fields to update.
:returns: a mapping sequence with the following signature:
{
"id": uuid representing the port.
"network_id": uuid of network.
"tenant_id": tenant_id
"mac_address": mac address to use on this port.
"admin_state_up": sets admin state of port. if down, port
does not forward packets.
"status": dicates whether port is currently operational
(limit values to "ACTIVE", "DOWN", "BUILD", and "ERROR")
"fixed_ips": list of subnet IDs and IP addresses to be used on
this port
"device_id": identifies the device (e.g., virtual server) using
this port.
}
:raises: exceptions.StateInvalid
:raises: exceptions.PortNotFound
:raises: RemoteRestError
"""
LOG.debug(_("NeutronRestProxyV2: update_port() called"))
self._warn_on_state_status(port['port'])
# Validate Args
orig_port = super(NeutronRestProxyV2, self).get_port(context, port_id)
with context.session.begin(subtransactions=True):
# Update DB
new_port = super(NeutronRestProxyV2,
self).update_port(context, port_id, port)
ctrl_update_required = False
if addr_pair.ADDRESS_PAIRS in port['port']:
ctrl_update_required |= (
self.update_address_pairs_on_port(context, port_id, port,
orig_port, new_port))
self._update_extra_dhcp_opts_on_port(context, port_id, port,
new_port)
old_host_id = porttracker_db.get_port_hostid(context,
orig_port['id'])
if (portbindings.HOST_ID in port['port']
and 'id' in new_port):
host_id = port['port'][portbindings.HOST_ID]
porttracker_db.put_port_hostid(context, new_port['id'],
host_id)
if old_host_id != host_id:
ctrl_update_required = True
if (new_port.get("device_id") != orig_port.get("device_id") and
orig_port.get("device_id")):
ctrl_update_required = True
if ctrl_update_required:
# tenant_id must come from network in case network is shared
net_tenant_id = self._get_port_net_tenantid(context, new_port)
new_port = self._extend_port_dict_binding(context, new_port)
mapped_port = self._map_state_and_status(new_port)
self.servers.rest_update_port(net_tenant_id,
new_port["network_id"],
mapped_port)
agent_update_required = self.update_security_group_on_port(
context, port_id, port, orig_port, new_port)
agent_update_required |= self.is_security_group_member_updated(
context, orig_port, new_port)
# return new_port
return new_port
# NOTE(kevinbenton): workaround for eventlet/mysql deadlock
@utils.synchronized('bsn-port-barrier')
@put_context_in_serverpool
def delete_port(self, context, port_id, l3_port_check=True):
"""Delete a port.
:param context: neutron api request context
:param id: UUID representing the port to delete.
:raises: exceptions.PortInUse
:raises: exceptions.PortNotFound
:raises: exceptions.NetworkNotFound
:raises: RemoteRestError
"""
LOG.debug(_("NeutronRestProxyV2: delete_port() called"))
# if needed, check to see if this is a port owned by
# and l3-router. If so, we should prevent deletion.
if l3_port_check and self.l3_plugin:
self.l3_plugin.prevent_l3_port_deletion(context, port_id)
with context.session.begin(subtransactions=True):
if self.l3_plugin:
router_ids = self.l3_plugin.disassociate_floatingips(
context, port_id, do_notify=False)
self._delete_port_security_group_bindings(context, port_id)
port = super(NeutronRestProxyV2, self).get_port(context, port_id)
# Tenant ID must come from network in case the network is shared
tenid = self._get_port_net_tenantid(context, port)
self._delete_port(context, port_id)
self.servers.rest_delete_port(tenid, port['network_id'], port_id)
if self.l3_plugin:
# now that we've left db transaction, we are safe to notify
self.l3_plugin.notify_routers_updated(context, router_ids)
@put_context_in_serverpool
def create_subnet(self, context, subnet):
LOG.debug(_("NeutronRestProxyV2: create_subnet() called"))
self._warn_on_state_status(subnet['subnet'])
with context.session.begin(subtransactions=True):
# create subnet in DB
new_subnet = super(NeutronRestProxyV2,
self).create_subnet(context, subnet)
net_id = new_subnet['network_id']
orig_net = super(NeutronRestProxyV2,
self).get_network(context, net_id)
# update network on network controller
self._send_update_network(orig_net, context)
return new_subnet
@put_context_in_serverpool
def update_subnet(self, context, id, subnet):
LOG.debug(_("NeutronRestProxyV2: update_subnet() called"))
self._warn_on_state_status(subnet['subnet'])
with context.session.begin(subtransactions=True):
# update subnet in DB
new_subnet = super(NeutronRestProxyV2,
self).update_subnet(context, id, subnet)
net_id = new_subnet['network_id']
orig_net = super(NeutronRestProxyV2,
self).get_network(context, net_id)
# update network on network controller
self._send_update_network(orig_net, context)
return new_subnet
# NOTE(kevinbenton): workaround for eventlet/mysql deadlock
@utils.synchronized('bsn-port-barrier')
@put_context_in_serverpool
def delete_subnet(self, context, id):
LOG.debug(_("NeutronRestProxyV2: delete_subnet() called"))
orig_subnet = super(NeutronRestProxyV2, self).get_subnet(context, id)
net_id = orig_subnet['network_id']
with context.session.begin(subtransactions=True):
# delete subnet in DB
super(NeutronRestProxyV2, self).delete_subnet(context, id)
orig_net = super(NeutronRestProxyV2, self).get_network(context,
net_id)
# update network on network controller - exception will rollback
self._send_update_network(orig_net, context)
def _add_host_route(self, context, destination, port):
subnet = {}
for fixed_ip in port['fixed_ips']:
subnet_id = fixed_ip['subnet_id']
nexthop = fixed_ip['ip_address']
subnet['host_routes'] = [{'destination': destination,
'nexthop': nexthop}]
updated_subnet = self.update_subnet(context,
subnet_id,
{'subnet': subnet})
payload = {'subnet': updated_subnet}
self._dhcp_agent_notifier.notify(context, payload,
'subnet.update.end')
LOG.debug(_("Adding host route: "))
LOG.debug(_("Destination:%(dst)s nexthop:%(next)s"),
{'dst': destination, 'next': nexthop})
| uni2u/neutron | neutron/plugins/bigswitch/plugin.py | Python | apache-2.0 | 39,750 |
__author__ = 'Tony Beltramelli - www.tonybeltramelli.com'
import string
import random
class Utils:
@staticmethod
def get_random_text(length_text=10, space_number=1, with_upper_case=True):
results = []
while len(results) < length_text:
char = random.choice(string.ascii_letters[:26])
results.append(char)
if with_upper_case:
results[0] = results[0].upper()
current_spaces = []
while len(current_spaces) < space_number:
space_pos = random.randint(2, length_text - 3)
if space_pos in current_spaces:
break
results[space_pos] = " "
if with_upper_case:
results[space_pos + 1] = results[space_pos - 1].upper()
current_spaces.append(space_pos)
return ''.join(results)
@staticmethod
def get_ios_id(length=10):
results = []
while len(results) < length:
char = random.choice(string.digits + string.ascii_letters)
results.append(char)
results[3] = "-"
results[6] = "-"
return ''.join(results)
@staticmethod
def get_android_id(length=10):
results = []
while len(results) < length:
char = random.choice(string.ascii_letters)
results.append(char)
return ''.join(results)
| tonybeltramelli/pix2code | compiler/classes/Utils.py | Python | apache-2.0 | 1,382 |
"""
WSGI config for crowd_server project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "crowd_server.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| codeaudit/ampcrowd | ampcrowd/crowd_server/wsgi.py | Python | apache-2.0 | 399 |
from __future__ import print_function
import os
import abc
import shelve
from pprint import pprint
from cloudmesh_base.tables import dict_printer
from cloudmesh_base.Shell import Shell
from cloudmesh_base.util import banner
from cloudmesh_base.util import path_expand
from cloudmesh_pbs.OpenPBS import OpenPBS
class pbs_db_interface(object):
__metaclass__ = abc.ABCMeta
db = None
def data(self):
return dict(self.db)
def __getitem__(self, index):
return self.db[index]
def __setitem__(self, index, value):
self.db[index] = value
@abc.abstractmethod
def load(self, filename):
"""loads the saved databsa from the file"""
@abc.abstractmethod
def get(self, id):
"""get the object with the id"""
@abc.abstractmethod
def set(self, id, value):
"""set the objet with the id to value"""
def set_filename(self, filename):
"""set the objet with the id to value"""
self.filename = filename
def remove(self):
try:
os.remove(self.filename)
except:
pass
@abc.abstractmethod
def save(self):
"""save the cloudmesh_job"""
@abc.abstractmethod
def update(self):
"""load the cloudmesh_job"""
class DbPBS(pbs_db_interface):
def __init__(self, filename=None):
self.pbs = OpenPBS(deploy=True)
self.open()
def open(self, filename=None):
if filename is not None:
self.filename = filename
else:
self.filename = path_expand(self.pbs.database_filename())
path = os.path.dirname(self.filename)
Shell.mkdir(path)
self.load()
def clear(self):
for id in self.db:
del self.db[id]
self.save()
def load(self):
"""load the cloudmesh_job"""
print('loading', self.filename)
# remove db ending so that shelve automatically adds it
self.filename = self.filename.replace(".db", "")
self.db = shelve.open(self.filename, writeback=True)
def save(self):
self.db.sync()
def get(self, id):
return self.db[id]
def status(self, id):
return self.get(id)["job_state"]
def set(self, id, value):
self.db[id] = value
self.save()
def keys(self):
self.data().keys()
def delete(self, id):
del self.db[id]
def close(self):
self.db.close()
def update(self, host=None, user=True):
if host is None:
print("host is none is not supported yet")
raise
print("QSTAT")
r = dict(self.pbs.qstat(host, user=user, format='dict'))
pprint(r)
if r is not {}:
for jobid in r:
self.db[jobid] = r[jobid]
self.save()
else:
print("no jobs found after query")
print("update completed")
def info(self):
print("Filename:", self.filename)
def list(self, attributes=None, output="table"):
if self.db is None or len(self.db) == 0:
print("No jobs found")
return None
columns = attributes
if columns is None:
columns = ["cm_jobid", "cm_host", "cm_user", "Job_Name", "job_state", "exit_status"]
# prepare the dict
d = {}
for jobid in self.db:
content = {}
for attribute in columns:
try:
content[attribute] = self.db[jobid][attribute]
except:
content[attribute] = "None"
d[jobid] = content
# print the dict
if output in ["csv", "table", "dict", "yaml"]:
return dict_printer(d, order=columns, output=output)
return None
def qsub(self, name, host, script, template=None, kind="dict"):
r = self.pbs.qsub(name, host, script, template=template, kind=kind)
pprint(r)
return dict(r)
if __name__ == "__main__":
qsub = False
db = DbPBS()
db.clear()
db.info()
db.update(host="india", user=False)
print(db.list(output="table"))
print(db.list(output="csv"))
print(db.list(output="dict"))
print(db.list(output="yaml"))
banner("user")
db.clear()
db.update(host="india")
print(db.list(output="table"))
if qsub:
banner('qsub')
pbs = OpenPBS()
jobname = "job-" + pbs.jobid + ".pbs"
host = "india"
script_template = pbs.read_script("etc/job.pbs")
print(script_template)
r = db.qsub(jobname, host, 'echo "Hello"', template=script_template)
pprint(r)
banner('variable list')
pprint(OpenPBS.variable_list(r)) | rajpushkar83/pbs | cloudmesh_pbs/DbPBS.py | Python | apache-2.0 | 4,729 |
# Copyright 2011 Openstack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Connect your vlan to the world."""
import os
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova.api.openstack import extensions
from nova.auth import manager
from nova.cloudpipe import pipelib
from nova import compute
from nova.compute import vm_states
from nova import db
from nova import exception
from nova import flags
from nova import log as logging
from nova import utils
FLAGS = flags.FLAGS
LOG = logging.getLogger("nova.api.openstack.compute.contrib.cloudpipe")
authorize = extensions.extension_authorizer('compute', 'cloudpipe')
class CloudpipeTemplate(xmlutil.TemplateBuilder):
def construct(self):
return xmlutil.MasterTemplate(xmlutil.make_flat_dict('cloudpipe'), 1)
class CloudpipesTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('cloudpipes')
elem = xmlutil.make_flat_dict('cloudpipe', selector='cloudpipes',
subselector='cloudpipe')
root.append(elem)
return xmlutil.MasterTemplate(root, 1)
class CloudpipeController(object):
"""Handle creating and listing cloudpipe instances."""
def __init__(self):
self.compute_api = compute.API()
self.auth_manager = manager.AuthManager()
self.cloudpipe = pipelib.CloudPipe()
self.setup()
def setup(self):
"""Ensure the keychains and folders exist."""
# NOTE(vish): One of the drawbacks of doing this in the api is
# the keys will only be on the api node that launched
# the cloudpipe.
if not os.path.exists(FLAGS.keys_path):
os.makedirs(FLAGS.keys_path)
def _get_cloudpipe_for_project(self, context, project_id):
"""Get the cloudpipe instance for a project ID."""
# NOTE(todd): this should probably change to compute_api.get_all
# or db.instance_get_project_vpn
for instance in db.instance_get_all_by_project(context, project_id):
if (instance['image_id'] == str(FLAGS.vpn_image_id)
and instance['vm_state'] != vm_states.DELETED):
return instance
def _vpn_dict(self, project, vpn_instance):
rv = {'project_id': project.id,
'public_ip': project.vpn_ip,
'public_port': project.vpn_port}
if vpn_instance:
rv['instance_id'] = vpn_instance['uuid']
rv['created_at'] = utils.isotime(vpn_instance['created_at'])
address = vpn_instance.get('fixed_ip', None)
if address:
rv['internal_ip'] = address['address']
if project.vpn_ip and project.vpn_port:
if utils.vpn_ping(project.vpn_ip, project.vpn_port):
rv['state'] = 'running'
else:
rv['state'] = 'down'
else:
rv['state'] = 'invalid'
else:
rv['state'] = 'pending'
return rv
@wsgi.serializers(xml=CloudpipeTemplate)
def create(self, req, body):
"""Create a new cloudpipe instance, if none exists.
Parameters: {cloudpipe: {project_id: XYZ}}
"""
ctxt = req.environ['nova.context']
authorize(ctxt)
params = body.get('cloudpipe', {})
project_id = params.get('project_id', ctxt.project_id)
instance = self._get_cloudpipe_for_project(ctxt, project_id)
if not instance:
proj = self.auth_manager.get_project(project_id)
user_id = proj.project_manager_id
try:
self.cloudpipe.launch_vpn_instance(project_id, user_id)
except db.NoMoreNetworks:
msg = _("Unable to claim IP for VPN instances, ensure it "
"isn't running, and try again in a few minutes")
raise exception.ApiError(msg)
instance = self._get_cloudpipe_for_project(ctxt, proj)
return {'instance_id': instance['uuid']}
@wsgi.serializers(xml=CloudpipesTemplate)
def index(self, req):
"""List running cloudpipe instances."""
context = req.environ['nova.context']
authorize(context)
vpns = []
# TODO(todd): could use compute_api.get_all with admin context?
for project in self.auth_manager.get_projects():
instance = self._get_cloudpipe_for_project(context, project.id)
vpns.append(self._vpn_dict(project, instance))
return {'cloudpipes': vpns}
class Cloudpipe(extensions.ExtensionDescriptor):
"""Adds actions to create cloudpipe instances.
When running with the Vlan network mode, you need a mechanism to route
from the public Internet to your vlans. This mechanism is known as a
cloudpipe.
At the time of creating this class, only OpenVPN is supported. Support for
a SSH Bastion host is forthcoming.
"""
name = "Cloudpipe"
alias = "os-cloudpipe"
namespace = "http://docs.openstack.org/compute/ext/cloudpipe/api/v1.1"
updated = "2011-12-16T00:00:00+00:00"
def get_resources(self):
resources = []
res = extensions.ResourceExtension('os-cloudpipe',
CloudpipeController())
resources.append(res)
return resources
| rcbops/nova-buildpackage | nova/api/openstack/compute/contrib/cloudpipe.py | Python | apache-2.0 | 5,929 |
# Copyright (C) 2013-2015 eNovance SAS <licensing@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
from hardware import matcher
class TestMatcher(unittest.TestCase):
def test_equal(self):
lines = [('system', 'product', 'serial', 'CZJ31402CD')]
spec = ('system', 'product', 'serial', 'CZJ31402CD')
arr = {}
self.assertTrue(matcher.match_spec(spec, lines, arr))
def test_not_equal(self):
lines = [('system', 'product', 'serial', 'CZJ31402CD')]
spec = ('system', 'product', 'serial', 'CZJ31402CE')
arr = {}
self.assertFalse(matcher.match_spec(spec, lines, arr))
def test_var(self):
lines = [('disk', '1I:1:1', 'size', '1000GB')]
spec = ('disk', '$disk8', 'size', '1000GB')
arr = {}
self.assertTrue(matcher.match_spec(spec, lines, arr))
self.assertEqual(arr, {'disk8': '1I:1:1'})
def test_vars(self):
lines = [
('system', 'product', 'serial', 'CZJ31402CD'),
('disk', '1I:1:1', 'size', '1000GB'),
('disk', '1I:1:1', 'type', 'SATA'),
('disk', '1I:1:1', 'control', 'hpa'),
('disk', '1I:1:2', 'size', '1000GB'),
('disk', '1I:1:2', 'type', 'SATA'),
('disk', '1I:1:2', 'control', 'hpa'),
('disk', '1I:1:3', 'size', '1000GB'),
('disk', '1I:1:3', 'type', 'SATA'),
('disk', '1I:1:3', 'control', 'hpa'),
('disk', '1I:1:4', 'size', '1000GB'),
('disk', '1I:1:4', 'type', 'SATA'),
('disk', '1I:1:4', 'control', 'hpa'),
('disk', '2I:1:5', 'size', '1000GB'),
('disk', '2I:1:5', 'type', 'SATA'),
('disk', '2I:1:5', 'control', 'hpa'),
('disk', '2I:1:6', 'size', '1000GB'),
('disk', '2I:1:6', 'type', 'SATA'),
('disk', '2I:1:6', 'control', 'hpa'),
('disk', '2I:1:7', 'size', '100GB'),
('disk', '2I:1:7', 'type', 'SSDSATA'),
('disk', '2I:1:7', 'control', 'hpa'),
('disk', '2I:1:8', 'size', '100GB'),
('disk', '2I:1:8', 'type', 'SSDSATA'),
('disk', '2I:1:8', 'control', 'hpa'),
]
specs = [('system', 'product', 'serial', 'CZJ31402CD'),
('disk', '$disk1', 'size', '100GB'),
('disk', '$disk2', 'size', '100GB'),
('disk', '$disk3', 'size', '1000GB'),
('disk', '$disk4', 'size', '1000GB'),
('disk', '$disk5', 'size', '1000GB'),
('disk', '$disk6', 'size', '1000GB'),
('disk', '$disk7', 'size', '1000GB'),
('disk', '$disk8', 'size', '1000GB')]
arr = {}
self.assertTrue(matcher.match_all(lines, specs, arr, {}))
self.assertEqual(arr,
{'disk1': '2I:1:7',
'disk2': '2I:1:8',
'disk3': '1I:1:1',
'disk4': '1I:1:2',
'disk5': '1I:1:3',
'disk6': '1I:1:4',
'disk7': '2I:1:5',
'disk8': '2I:1:6',
}
)
def test_already_bound(self):
lines = [
('disk', '1I:1:2', 'size', '100GB'),
('disk', '1I:1:1', 'size', '1000GB'),
('disk', '1I:1:1', 'control', 'hpa'),
('disk', '1I:1:2', 'control', 'hpa'),
]
specs = [
('disk', '$disk1', 'size', '100GB'),
('disk', '$disk1', 'control', 'hpa'),
('disk', '$disk2', 'size', '1000GB'),
]
arr = {}
self.assertTrue(matcher.match_all(lines, specs, arr, {}))
self.assertEqual(arr,
{'disk1': '1I:1:2',
'disk2': '1I:1:1',
})
def test_order(self):
specs = [
('disk', '$disk1', 'size', '100'),
('disk', '$disk1', 'slot', '$slot1'),
('disk', '$disk2', 'size', '1000'),
('disk', '$disk2', 'slot', '$slot2'),
]
lines = [
('disk', '1I:1:1', 'size', '1000'),
('disk', '1I:1:1', 'control', 'hpa'),
('disk', '1I:1:1', 'slot', '2'),
('disk', '2I:1:8', 'size', '100'),
('disk', '2I:1:8', 'control', 'hpa'),
('disk', '2I:1:8', 'slot', '2'),
]
arr = {}
self.assertTrue(matcher.match_all(lines, specs, arr, {}))
def test_2vars(self):
specs = [
('disk', '$disk', 'size', '$size'),
]
lines = [
('disk', 'vda', 'size', '8'),
]
arr = {}
self.assertTrue(matcher.match_all(lines, specs, arr, {}))
self.assertEqual(arr,
{'size': '8',
'disk': 'vda',
})
def test_2dollars(self):
specs = [
('disk', '$$disk', 'size', '$size'),
]
lines = [
('disk', 'vda', 'size', '8'),
]
arr = {}
arr2 = {}
self.assertTrue(matcher.match_all(lines, specs, arr, arr2))
self.assertEqual(arr,
{'size': '8',
'disk': 'vda',
})
self.assertEqual(arr2,
{'disk': 'vda',
})
def test_multiple_vars(self):
specs = [
('disk', 'vda', 'size', '8'),
('disk', 'vdb', 'size', '16'),
]
specs2 = [
('disk', 'vda', 'size', '8'),
('disk', 'vdb', 'size', '8'),
]
lines = [
('disk', 'vda', 'size', '8'),
('disk', 'vdb', 'size', '8'),
]
arr = {}
self.assertFalse(matcher.match_all(lines, specs, arr, {}))
self.assertTrue(matcher.match_all(lines, specs2, arr, {}), lines)
def test_multiple(self):
spec = ('disk', '$disk', 'size', '8')
lines = [
('disk', 'vda', 'size', '8'),
('disk', 'vdb', 'size', '8'),
]
arr = {}
self.assertTrue(matcher.match_multiple(lines, spec, arr))
self.assertEqual(arr['disk'], ['vda', 'vdb'])
def test_gt(self):
specs = [('disk', '$disk', 'size', 'gt(10)')]
lines = [
('disk', 'vda', 'size', '20'),
]
arr = {}
self.assertTrue(matcher.match_all(lines, specs, arr, {}))
self.assertEqual(arr['disk'], 'vda')
def test_ge(self):
specs = [('disk', '$disk', 'size', 'ge(10.1)')]
lines = [
('disk', 'vda', 'size', '10.5'),
]
arr = {}
self.assertTrue(matcher.match_all(lines, specs, arr, {}))
self.assertEqual(arr['disk'], 'vda')
def test_lt(self):
specs = [('disk', '$disk', 'size', 'lt(30)')]
lines = [
('disk', 'vda', 'size', '20'),
]
arr = {}
self.assertTrue(matcher.match_all(lines, specs, arr, {}))
self.assertEqual(arr['disk'], 'vda')
def test_le(self):
specs = [('disk', '$disk', 'size', 'le(20)')]
lines = [
('disk', 'vda', 'size', '20'),
]
arr = {}
self.assertTrue(matcher.match_all(lines, specs, arr, {}))
self.assertEqual(arr['disk'], 'vda')
def test_not(self):
specs = [('network', '$eth', 'serial', '$mac=not(regexp(^28:d2:))')]
lines = [('network', 'eth0', 'serial', '20:d2:44:1b:0a:8b')]
arr = {}
self.assertTrue(matcher.match_all(lines, specs, arr, {}))
self.assertEqual(arr['eth'], 'eth0')
self.assertEqual(arr['mac'], '20:d2:44:1b:0a:8b')
def test_and(self):
specs = [('disk', '$disk', 'size', 'and(gt(20), lt(50))')]
lines = [('disk', 'vda', 'size', '40')]
arr = {}
self.assertTrue(matcher.match_all(lines, specs, arr, {}))
self.assertEqual(arr['disk'], 'vda')
def test_or(self):
specs = [('disk', '$disk', 'size', 'or(lt(20), gt(30))')]
lines = [('disk', 'vda', 'size', '40')]
arr = {}
self.assertTrue(matcher.match_all(lines, specs, arr, {}))
self.assertEqual(arr['disk'], 'vda')
def test_network(self):
specs = [('network', '$eth', 'ipv4', u'network(192.168.2.0/24)')]
lines = [('network', 'eth0', 'ipv4', u'192.168.2.2')]
arr = {}
self.assertTrue(matcher.match_all(lines, specs, arr, {}))
self.assertEqual(arr['eth'], 'eth0')
def test_le_var(self):
specs = [('disk', '$disk', 'size', '$size=le(20)')]
lines = [('disk', 'vda', 'size', '20')]
arr = {}
self.assertTrue(matcher.match_all(lines, specs, arr, {}))
self.assertEqual(arr['disk'], 'vda')
self.assertEqual(arr['size'], '20')
def test_in(self):
specs = [('disk', '$disk', 'size', 'in(10, 20, 30)')]
lines = [('disk', 'vda', 'size', '20')]
arr = {}
self.assertTrue(matcher.match_all(lines, specs, arr, {}))
self.assertEqual(arr['disk'], 'vda')
def test_in2(self):
specs = [('disk', '$disk=in("vda", "vdb")', 'size', '20')]
lines = [('disk', 'vda', 'size', '20')]
arr = {}
self.assertTrue(matcher.match_all(lines, specs, arr, {}))
self.assertEqual(arr['disk'], 'vda')
def test_rangeint(self):
specs = [('disk', '$disk', 'size', 'range(20, 25)')]
lines = [('disk', 'vda', 'size', '20')]
arr = {}
self.assertTrue(matcher.match_all(lines, specs, arr, {}))
self.assertEqual(arr['disk'], 'vda')
def test_rangefloat(self):
specs = [('ipmi', '+12V', 'value', 'range(11.9, 12.2)')]
lines = [('ipmi', '+12V', 'value', '12.14')]
arr = {}
self.assertTrue(matcher.match_all(lines, specs, arr, {}))
def test_regexp(self):
specs = [('network', '$eth', 'serial', 'regexp(^28:d2:)')]
lines = [('network', 'eth0', 'serial', '28:d2:44:1b:0a:8b')]
arr = {}
self.assertTrue(matcher.match_all(lines, specs, arr, {}))
def test_backtrack(self):
specs = [
('disk', '$disk', 'size', '8'),
('disk', '$disk', 'type', 'b'),
]
lines = [
('disk', 'vda', 'size', '8'),
('disk', 'vda', 'type', 'a'),
('disk', 'vdb', 'size', '8'),
('disk', 'vdb', 'type', 'b'),
]
arr = {}
self.assertTrue(matcher.match_all(lines, specs, arr, {}))
self.assertEqual(arr['disk'], 'vdb', arr)
def test_backtrack2(self):
specs = [
('disk', '$disk', 'size', '8'),
('disk', '$disk', 'type', 'b'),
('disk', '$disk2', 'size', '8'),
]
lines = [
('disk', 'vda', 'size', '8'),
('disk', 'vda', 'type', 'a'),
('disk', 'vdb', 'size', '8'),
('disk', 'vdb', 'type', 'b'),
]
arr = {}
self.assertTrue(matcher.match_all(lines, specs, arr, {}))
self.assertEqual(arr['disk2'], 'vda', arr)
self.assertEqual(arr['disk'], 'vdb', arr)
def test_backtrack3(self):
specs = [
('disk', '$disk', 'size', '8'),
('disk', '$disk', 'type', 'c'),
('disk', '$disk2', 'size', '8'),
]
lines = [
('disk', 'vda', 'size', '8'),
('disk', 'vda', 'type', 'a'),
('disk', 'vdb', 'size', '8'),
('disk', 'vdb', 'type', 'b'),
]
arr = {}
self.assertFalse(matcher.match_all(lines, specs, arr, {}))
def test_backtracklong(self):
specs = [
('disk', 'logical', 'count', '8'),
('disk', '$disk1', 'size', '1000'),
('disk', '$disk1', 'vendor', 'Hitachi'),
('disk', '$disk1', 'model', 'HUA722010CLA330'),
('disk', '$disk1', 'rev', 'R001'),
('disk', '$disk1', 'optimal_io_size', '0'),
('disk', '$disk1', 'physical_block_size', '512'),
('disk', '$disk1', 'rotational', '1'),
('disk', '$disk1', 'Write Cache Enable', '1'),
('disk', '$disk1', 'Read Cache Disable', '0'),
('disk', '$disk2', 'size', '1000'),
('disk', '$disk2', 'vendor', 'Seagate'),
('disk', '$disk2', 'model', 'ST31000528AS'),
('disk', '$disk2', 'rev', 'R001'),
('disk', '$disk2', 'optimal_io_size', '0'),
('disk', '$disk2', 'physical_block_size', '512'),
('disk', '$disk2', 'rotational', '1'),
('disk', '$disk2', 'Write Cache Enable', '1'),
('disk', '$disk2', 'Read Cache Disable', '0'),
('disk', '$disk3', 'size', '1000'),
('disk', '$disk3', 'optimal_io_size', '0'),
('disk', '$disk3', 'physical_block_size', '512'),
('disk', '$disk3', 'rotational', '1'),
('disk', '$disk3', 'Write Cache Enable', '1'),
('disk', '$disk3', 'Read Cache Disable', '0'),
('disk', '$disk4', 'size', '1000'),
('disk', '$disk4', 'optimal_io_size', '0'),
('disk', '$disk4', 'physical_block_size', '512'),
('disk', '$disk4', 'rotational', '1'),
('disk', '$disk4', 'Write Cache Enable', '1'),
('disk', '$disk4', 'Read Cache Disable', '0'),
('disk', '$disk5', 'size', '1000'),
('disk', '$disk5', 'optimal_io_size', '0'),
('disk', '$disk5', 'physical_block_size', '512'),
('disk', '$disk5', 'rotational', '1'),
('disk', '$disk5', 'Write Cache Enable', '1'),
('disk', '$disk5', 'Read Cache Disable', '0'),
('disk', '$disk6', 'size', '1000'),
('disk', '$disk6', 'optimal_io_size', '0'),
('disk', '$disk6', 'physical_block_size', '512'),
('disk', '$disk6', 'rotational', '1'),
('disk', '$disk6', 'Write Cache Enable', '1'),
('disk', '$disk6', 'Read Cache Disable', '0'),
('disk', '$disk7', 'size', '1000'),
('disk', '$disk7', 'optimal_io_size', '0'),
('disk', '$disk7', 'physical_block_size', '512'),
('disk', '$disk7', 'rotational', '1'),
('disk', '$disk7', 'Write Cache Enable', '1'),
('disk', '$disk7', 'Read Cache Disable', '0'),
('disk', '$disk8', 'size', '1000'),
('disk', '$disk8', 'optimal_io_size', '0'),
('disk', '$disk8', 'physical_block_size', '512'),
('disk', '$disk8', 'rotational', '1'),
('disk', '$disk8', 'Write Cache Enable', '1'),
('disk', '$disk8', 'Read Cache Disable', '0'),
]
arr = {}
self.assertTrue(matcher.match_all(X8_HW, specs, arr, {}))
def test_generate_filename_and_macs(self):
items = [('system', 'product', 'serial', 'Sysname'),
('network', 'eth0', 'serial', 'mac')]
self.assertEqual(matcher.generate_filename_and_macs(items),
{'sysname': 'Sysname-mac',
'sysserial': 'Sysname',
'eth': ['eth0'],
'serial': ['mac'],
})
def test_generate_filename_and_macs_no_sysname(self):
items = [('network', 'eth0', 'serial', 'aa:bb:cc')]
self.assertEqual(matcher.generate_filename_and_macs(items),
{'serial': ['aa:bb:cc'],
'eth': ['eth0'],
'sysname': 'aa-bb-cc',
})
def test_generate_filename_and_macs_virtualbox(self):
items = [('disk', 'sda', 'size', '8'),
('system', 'product', 'serial', '0'),
('system', 'product', 'name', 'VirtualBox ()'),
('system', 'product', 'vendor', 'innotek GmbH'),
('system', 'product', 'version', '1.2'),
('system', 'memory', 'size', '521113600'),
('network', 'eth0', 'serial', '08:00:27:6f:77:22'),
('network', 'eth0', 'vendor', 'Intel Corporation'),
('network', 'eth0', 'product',
'82540EM Gigabit Ethernet Controller'),
('network', 'eth0', 'size', '1000000000'),
('network', 'eth0', 'ipv4', '10.0.2.15'),
('network', 'eth0', 'link', 'yes'),
('network', 'eth0', 'driver', 'e1000'),
('system', 'cpu', 'number', '1')]
result = matcher.generate_filename_and_macs(items)
self.assertEqual(result['sysname'], 'VirtualBox-0-08-00-27-6f-77-22')
self.assertEqual(result['serial'], ['08:00:27:6f:77:22'])
self.assertEqual(result['eth'], ['eth0'])
if __name__ == "__main__":
unittest.main()
X8_HW = [('disk', 'logical', 'count', '8'),
('disk', 'sdd', 'size', '1000'),
('disk', 'sdd', 'model', 'HUA722010CLA330'),
('disk', 'sdd', 'vendor', 'Hitachi'),
('disk', 'sdd', 'rev', 'R001'),
('disk', 'sdd', 'optimal_io_size', '0'),
('disk', 'sdd', 'physical_block_size', '512'),
('disk', 'sdd', 'rotational', '1'),
('disk', 'sdd', 'Write Cache Enable', '1'),
('disk', 'sdd', 'Read Cache Disable', '0'),
('disk', 'sdd', 'scsi-id', 'scsi-2001b4d2001775100'),
('disk', 'sde', 'size', '1000'),
('disk', 'sde', 'vendor', 'Hitachi'),
('disk', 'sde', 'model', 'HUA722010CLA330'),
('disk', 'sde', 'rev', 'R001'),
('disk', 'sde', 'optimal_io_size', '0'),
('disk', 'sde', 'physical_block_size', '512'),
('disk', 'sde', 'rotational', '1'),
('disk', 'sde', 'Write Cache Enable', '1'),
('disk', 'sde', 'Read Cache Disable', '0'),
('disk', 'sde', 'scsi-id', 'scsi-2001b4d2001655500'),
('disk', 'sdf', 'size', '1000'),
('disk', 'sdf', 'vendor', 'Hitachi'),
('disk', 'sdf', 'model', 'HDS721010CLA330'),
('disk', 'sdf', 'rev', 'R001'),
('disk', 'sdf', 'optimal_io_size', '0'),
('disk', 'sdf', 'physical_block_size', '512'),
('disk', 'sdf', 'rotational', '1'),
('disk', 'sdf', 'Write Cache Enable', '1'),
('disk', 'sdf', 'Read Cache Disable', '0'),
('disk', 'sdf', 'scsi-id', 'scsi-2001b4d2012776300'),
('disk', 'sdg', 'size', '1000'),
('disk', 'sdg', 'vendor', 'Seagate'),
('disk', 'sdg', 'model', 'ST31000528AS'),
('disk', 'sdg', 'rev', 'R001'),
('disk', 'sdg', 'optimal_io_size', '0'),
('disk', 'sdg', 'physical_block_size', '512'),
('disk', 'sdg', 'rotational', '1'),
('disk', 'sdg', 'Write Cache Enable', '1'),
('disk', 'sdg', 'Read Cache Disable', '0'),
('disk', 'sda', 'size', '1000'),
('disk', 'sda', 'vendor', 'Seagate'),
('disk', 'sda', 'model', 'ST31000528AS'),
('disk', 'sda', 'rev', 'R001'),
('disk', 'sda', 'optimal_io_size', '0'),
('disk', 'sda', 'physical_block_size', '512'),
('disk', 'sda', 'rotational', '1'),
('disk', 'sda', 'Write Cache Enable', '1'),
('disk', 'sda', 'Read Cache Disable', '0'),
('disk', 'sdb', 'size', '1000'),
('disk', 'sdb', 'vendor', 'Seagate'),
('disk', 'sdb', 'model', 'ST31000528AS'),
('disk', 'sdb', 'rev', 'R001'),
('disk', 'sdb', 'optimal_io_size', '0'),
('disk', 'sdb', 'physical_block_size', '512'),
('disk', 'sdb', 'rotational', '1'),
('disk', 'sdb', 'Write Cache Enable', '1'),
('disk', 'sdb', 'Read Cache Disable', '0'),
('disk', 'sdb', 'scsi-id', 'scsi-2001b4d2000000000'),
('disk', 'sdc', 'size', '1000'),
('disk', 'sdc', 'vendor', 'Seagate'),
('disk', 'sdc', 'model', 'ST31000528AS'),
('disk', 'sdc', 'rev', 'R001'),
('disk', 'sdc', 'optimal_io_size', '0'),
('disk', 'sdc', 'physical_block_size', '512'),
('disk', 'sdc', 'rotational', '1'),
('disk', 'sdc', 'Write Cache Enable', '1'),
('disk', 'sdc', 'Read Cache Disable', '0'),
('disk', 'sdh', 'size', '1000'),
('disk', 'sdh', 'vendor', 'Hitachi'),
('disk', 'sdh', 'model', 'HDS721010CLA330'),
('disk', 'sdh', 'rev', 'R001'),
('disk', 'sdh', 'optimal_io_size', '0'),
('disk', 'sdh', 'physical_block_size', '512'),
('disk', 'sdh', 'rotational', '1'),
('disk', 'sdh', 'Write Cache Enable', '1'),
('disk', 'sdh', 'Read Cache Disable', '0'),
('disk', 'sdh', 'scsi-id', 'scsi-2001b4d2012486900')]
| enovance/hardware | hardware/tests/test_matcher.py | Python | apache-2.0 | 21,360 |
# -*- coding: utf-8 -*-
# Copyright 2014 Telefonica Investigación y Desarrollo, S.A.U
#
# This file is part of FI-WARE project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For those usages not covered by the Apache version 2.0 License please
# contact with opensource@tid.es
__author__ = 'jfernandez'
from lettuce import world, before, after
from commons.terrain_steps import setup_feature, setup_scenario, setup_outline, tear_down
from commons.provisioning_steps import ProvisioningSteps
from commons.rest_utils import RestUtils
from commons.configuration import CONFIG_VM_HOSTNAME
from commons.fabric_utils import execute_chef_client, execute_puppet_agent, remove_chef_client_cert_file, \
remove_puppet_agent_cert_file, execute_chef_client_stop, execute_puppet_agent_stop, \
remove_all_generated_test_files, remove_puppet_agent_catalog
provisioning_steps = ProvisioningSteps()
rest_utils = RestUtils()
@before.each_feature
def before_each_feature(feature):
"""
Hook: Will be executed before each feature. Configures global vars and gets token from keystone.
Launch agents (puppet and chef) in the target VM
"""
setup_feature(feature)
@before.each_scenario
def before_each_scenario(scenario):
"""
Hook: Will be executed before each Scenario.
Setup Scenario: initialize World vars and launch agents (puppet and chef) in the target VM
"""
setup_scenario(scenario)
execute_chef_client()
execute_puppet_agent()
@before.outline
def before_outline(param1, param2, param3, param4):
""" Hook: Will be executed before each Scenario Outline. Same behaviour as 'before_each_scenario'"""
setup_outline(param1, param2, param3, param4)
remove_all_generated_test_files()
remove_puppet_agent_catalog()
@after.each_scenario
def after_each_scenario(scenario):
"""
Hook: Will be executed after all each scenario
Removes Feature data and cleans the system. Kills all agents running in the VM.
"""
execute_chef_client_stop()
execute_puppet_agent_stop()
remove_chef_client_cert_file()
remove_puppet_agent_cert_file()
remove_all_generated_test_files()
remove_puppet_agent_catalog()
rest_utils.delete_node(world.headers, world.tenant_id, CONFIG_VM_HOSTNAME)
@after.all
def after_all(scenario):
"""
Hook: Will be executed after all each scenario
Removes Feature data and cleans the system. Kills all agents running in the VM.
"""
after_each_scenario(scenario)
tear_down(scenario)
| Fiware/cloud.SDC | test/acceptance/e2e/install_product/terrain.py | Python | apache-2.0 | 3,017 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Linear Estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import re
import six
from tensorflow.contrib import layers
from tensorflow.contrib.framework import deprecated
from tensorflow.contrib.framework import deprecated_arg_values
from tensorflow.contrib.framework.python.ops import variables as contrib_variables
from tensorflow.contrib.learn.python.learn import evaluable
from tensorflow.contrib.learn.python.learn import trainable
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import prediction_key
from tensorflow.contrib.learn.python.learn.utils import export
from tensorflow.contrib.linear_optimizer.python import sdca_optimizer
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training as train
# The default learning rate of 0.2 is a historical artifact of the initial
# implementation, but seems a reasonable choice.
_LEARNING_RATE = 0.2
def _get_optimizer(spec):
if isinstance(spec, six.string_types):
return layers.OPTIMIZER_CLS_NAMES[spec](
learning_rate=_LEARNING_RATE)
elif callable(spec):
return spec()
return spec
# TODO(ispir): Remove this function by fixing '_infer_model' with single outputs
# and as_iteable case.
def _as_iterable(preds, output):
for pred in preds:
yield pred[output]
def _add_bias_column(feature_columns, columns_to_tensors, bias_variable,
labels, columns_to_variables):
# TODO(b/31008490): Move definition to a common constants place.
bias_column_name = "tf_virtual_bias_column"
if any(col.name is bias_column_name for col in feature_columns):
raise ValueError("%s is a reserved column name." % bias_column_name)
bias_column = layers.real_valued_column(bias_column_name)
columns_to_tensors[bias_column] = array_ops.ones_like(labels,
dtype=dtypes.float32)
columns_to_variables[bias_column] = [bias_variable]
def _linear_model_fn(features, labels, mode, params):
"""A model_fn for linear models that use a gradient-based optimizer.
Args:
features: `Tensor` or dict of `Tensor` (depends on data passed to `fit`).
labels: `Tensor` of shape [batch_size, 1] or [batch_size] labels of
dtype `int32` or `int64` in the range `[0, n_classes)`.
mode: Defines whether this is training, evaluation or prediction.
See `ModeKeys`.
params: A dict of hyperparameters.
The following hyperparameters are expected:
* head: A `Head` instance.
* feature_columns: An iterable containing all the feature columns used by
the model.
* optimizer: string, `Optimizer` object, or callable that defines the
optimizer to use for training.
* gradient_clip_norm: A float > 0. If provided, gradients are
clipped to their global norm with this clipping ratio.
* num_ps_replicas: The number of parameter server replicas.
* joint_weights: If True, the weights for all columns will be stored in a
single (possibly partitioned) variable. It's more efficient, but it's
incompatible with SDCAOptimizer, and requires all feature columns are
sparse and use the 'sum' combiner.
Returns:
An `estimator.ModelFnOps` instance.
Raises:
ValueError: If mode is not any of the `ModeKeys`.
"""
head = params["head"]
feature_columns = params["feature_columns"]
optimizer = params["optimizer"]
gradient_clip_norm = params.get("gradient_clip_norm", None)
num_ps_replicas = params.get("num_ps_replicas", 0)
joint_weights = params.get("joint_weights", False)
if not isinstance(features, dict):
features = {"": features}
parent_scope = "linear"
partitioner = partitioned_variables.min_max_variable_partitioner(
max_partitions=num_ps_replicas,
min_slice_size=64 << 20)
with variable_scope.variable_scope(
parent_scope, values=features.values(), partitioner=partitioner) as scope:
if joint_weights:
logits, _, _ = (
layers.joint_weighted_sum_from_feature_columns(
columns_to_tensors=features,
feature_columns=feature_columns,
num_outputs=head.logits_dimension,
weight_collections=[parent_scope],
scope=scope))
else:
logits, _, _ = (
layers.weighted_sum_from_feature_columns(
columns_to_tensors=features,
feature_columns=feature_columns,
num_outputs=head.logits_dimension,
weight_collections=[parent_scope],
scope=scope))
def _train_op_fn(loss):
global_step = contrib_variables.get_global_step()
my_vars = ops.get_collection("linear")
grads = gradients.gradients(loss, my_vars)
if gradient_clip_norm:
grads, _ = clip_ops.clip_by_global_norm(grads, gradient_clip_norm)
return (optimizer.apply_gradients(
zip(grads, my_vars), global_step=global_step))
return head.head_ops(features, labels, mode, _train_op_fn, logits)
def sdca_model_fn(features, labels, mode, params):
"""A model_fn for linear models that use the SDCA optimizer.
Args:
features: A dict of `Tensor` keyed by column name.
labels: `Tensor` of shape [batch_size, 1] or [batch_size] labels of
dtype `int32` or `int64` in the range `[0, n_classes)`.
mode: Defines whether this is training, evaluation or prediction.
See `ModeKeys`.
params: A dict of hyperparameters.
The following hyperparameters are expected:
* head: A `Head` instance. Type must be one of `_BinarySvmHead`,
`_RegressionHead` or `_MultiClassHead`.
* feature_columns: An iterable containing all the feature columns used by
the model.
* optimizer: An `SDCAOptimizer` instance.
* weight_column_name: A string defining the weight feature column, or
None if there are no weights.
* update_weights_hook: A `SessionRunHook` object or None. Used to update
model weights.
Returns:
An `estimator.ModelFnOps` instance.
Raises:
ValueError: If `optimizer` is not an `SDCAOptimizer` instance.
ValueError: If the type of head is neither `_BinarySvmHead`, nor
`_RegressionHead` nor `_MultiClassHead`.
ValueError: If mode is not any of the `ModeKeys`.
"""
head = params["head"]
feature_columns = params["feature_columns"]
optimizer = params["optimizer"]
weight_column_name = params["weight_column_name"]
update_weights_hook = params.get("update_weights_hook", None)
if not isinstance(optimizer, sdca_optimizer.SDCAOptimizer):
raise ValueError("Optimizer must be of type SDCAOptimizer")
if isinstance(head, head_lib._BinarySvmHead): # pylint: disable=protected-access
loss_type = "hinge_loss"
elif isinstance(head, head_lib._MultiClassHead): # pylint: disable=protected-access
loss_type = "logistic_loss"
elif isinstance(head, head_lib._RegressionHead): # pylint: disable=protected-access
loss_type = "squared_loss"
else:
return ValueError("Unsupported head type: {}".format(head))
parent_scope = "linear"
with variable_scope.variable_op_scope(
features.values(), parent_scope) as scope:
logits, columns_to_variables, bias = (
layers.weighted_sum_from_feature_columns(
columns_to_tensors=features,
feature_columns=feature_columns,
num_outputs=1,
scope=scope))
_add_bias_column(feature_columns, features, bias, labels,
columns_to_variables)
def _train_op_fn(unused_loss):
global_step = contrib_variables.get_global_step()
sdca_model, train_op = optimizer.get_train_step(columns_to_variables,
weight_column_name,
loss_type, features,
labels, global_step)
if update_weights_hook is not None:
update_weights_hook.set_parameters(sdca_model, train_op)
return train_op
return head.head_ops(features, labels, mode, _train_op_fn, logits)
# Ensures consistency with LinearComposableModel.
def _get_default_optimizer(feature_columns):
learning_rate = min(_LEARNING_RATE, 1.0 / math.sqrt(len(feature_columns)))
return train.FtrlOptimizer(learning_rate=learning_rate)
class _SdcaUpdateWeightsHook(session_run_hook.SessionRunHook):
"""SessionRunHook to update and shrink SDCA model weights."""
def __init__(self):
pass
def set_parameters(self, sdca_model, train_op):
self._sdca_model = sdca_model
self._train_op = train_op
def begin(self):
"""Construct the update_weights op.
The op is implicitly added to the default graph.
"""
self._update_op = self._sdca_model.update_weights(self._train_op)
def before_run(self, run_context):
"""Return the update_weights op so that it is executed during this run."""
return session_run_hook.SessionRunArgs(self._update_op)
class LinearClassifier(evaluable.Evaluable, trainable.Trainable):
"""Linear classifier model.
Train a linear model to classify instances into one of multiple possible
classes. When number of possible classes is 2, this is binary classification.
Example:
```python
sparse_column_a = sparse_column_with_hash_bucket(...)
sparse_column_b = sparse_column_with_hash_bucket(...)
sparse_feature_a_x_sparse_feature_b = crossed_column(...)
# Estimator using the default optimizer.
estimator = LinearClassifier(
feature_columns=[sparse_column_a, sparse_feature_a_x_sparse_feature_b])
# Or estimator using the FTRL optimizer with regularization.
estimator = LinearClassifier(
feature_columns=[sparse_column_a, sparse_feature_a_x_sparse_feature_b],
optimizer=tf.train.FtrlOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
# Or estimator using the SDCAOptimizer.
estimator = LinearClassifier(
feature_columns=[sparse_column_a, sparse_feature_a_x_sparse_feature_b],
optimizer=tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id',
num_loss_partitions=...,
symmetric_l2_regularization=2.0
))
# Input builders
def input_fn_train: # returns x, y
...
def input_fn_eval: # returns x, y
...
estimator.fit(input_fn=input_fn_train)
estimator.evaluate(input_fn=input_fn_eval)
estimator.predict(x=x)
```
Input of `fit` and `evaluate` should have following features,
otherwise there will be a `KeyError`:
* if `weight_column_name` is not `None`, a feature with
`key=weight_column_name` whose value is a `Tensor`.
* for each `column` in `feature_columns`:
- if `column` is a `SparseColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `WeightedSparseColumn`, two features: the first with
`key` the id column name, the second with `key` the weight column name.
Both features' `value` must be a `SparseTensor`.
- if `column` is a `RealValuedColumn`, a feature with `key=column.name`
whose `value` is a `Tensor`.
"""
def __init__(self, # _joint_weight pylint: disable=invalid-name
feature_columns,
model_dir=None,
n_classes=2,
weight_column_name=None,
optimizer=None,
gradient_clip_norm=None,
enable_centered_bias=False,
_joint_weight=False,
config=None,
feature_engineering_fn=None):
"""Construct a `LinearClassifier` estimator object.
Args:
feature_columns: An iterable containing all the feature columns used by
the model. All items in the set should be instances of classes derived
from `FeatureColumn`.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator
to continue training a previously saved model.
n_classes: number of label classes. Default is binary classification.
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
optimizer: The optimizer used to train the model. If specified, it should
be either an instance of `tf.Optimizer` or the SDCAOptimizer. If `None`,
the Ftrl optimizer will be used.
gradient_clip_norm: A `float` > 0. If provided, gradients are clipped
to their global norm with this clipping ratio. See
`tf.clip_by_global_norm` for more details.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
_joint_weight: If True, the weights for all columns will be stored in a
single (possibly partitioned) variable. It's more efficient, but it's
incompatible with SDCAOptimizer, and requires all feature columns are
sparse and use the 'sum' combiner.
config: `RunConfig` object to configure the runtime settings.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and
returns features and labels which will be fed
into the model.
Returns:
A `LinearClassifier` estimator.
Raises:
ValueError: if n_classes < 2.
"""
# TODO(zoy): Give an unsupported error if enable_centered_bias is
# requested for SDCA once its default changes to False.
self._feature_columns = feature_columns
assert self._feature_columns
self._optimizer = _get_default_optimizer(feature_columns)
if optimizer:
self._optimizer = _get_optimizer(optimizer)
chief_hook = None
if (isinstance(optimizer, sdca_optimizer.SDCAOptimizer) and
enable_centered_bias):
enable_centered_bias = False
logging.warning("centered_bias is not supported with SDCA, "
"please disable it explicitly.")
head = head_lib._multi_class_head( # pylint: disable=protected-access
n_classes,
weight_column_name=weight_column_name,
enable_centered_bias=enable_centered_bias)
params = {
"head": head,
"feature_columns": feature_columns,
"optimizer": self._optimizer,
}
if isinstance(optimizer, sdca_optimizer.SDCAOptimizer):
assert not _joint_weight, ("_joint_weight is incompatible with the"
" SDCAOptimizer")
assert n_classes == 2, "SDCA only applies to binary classification."
model_fn = sdca_model_fn
# The model_fn passes the model parameters to the chief_hook. We then use
# the hook to update weights and shrink step only on the chief.
chief_hook = _SdcaUpdateWeightsHook()
params.update({
"weight_column_name": weight_column_name,
"update_weights_hook": chief_hook,
})
else:
model_fn = _linear_model_fn
params.update({
"gradient_clip_norm": gradient_clip_norm,
"num_ps_replicas": config.num_ps_replicas if config else 0,
"joint_weights": _joint_weight,
})
self._estimator = estimator.Estimator(
model_fn=model_fn,
model_dir=model_dir,
config=config,
params=params,
feature_engineering_fn=feature_engineering_fn)
self._additional_run_hook = (chief_hook if self._estimator.config.is_chief
else None)
def get_estimator(self):
return self._estimator
def fit(self, x=None, y=None, input_fn=None, steps=None, batch_size=None,
monitors=None, max_steps=None):
"""See trainable.Trainable."""
# TODO(roumposg): Remove when deprecated monitors are removed.
if monitors is None:
monitors = []
deprecated_monitors = [
m for m in monitors
if not isinstance(m, session_run_hook.SessionRunHook)
]
for monitor in deprecated_monitors:
monitor.set_estimator(self)
monitor._lock_estimator() # pylint: disable=protected-access
if self._additional_run_hook:
monitors.append(self._additional_run_hook)
result = self._estimator.fit(x=x, y=y, input_fn=input_fn, steps=steps,
batch_size=batch_size, monitors=monitors,
max_steps=max_steps)
for monitor in deprecated_monitors:
monitor._unlock_estimator() # pylint: disable=protected-access
return result
def evaluate(self, x=None, y=None, input_fn=None, feed_fn=None,
batch_size=None, steps=None, metrics=None, name=None):
"""See evaluable.Evaluable."""
return self._estimator.evaluate(x=x, y=y, input_fn=input_fn,
feed_fn=feed_fn, batch_size=batch_size,
steps=steps, metrics=metrics, name=name)
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
def predict(self, x=None, input_fn=None, batch_size=None, as_iterable=True):
"""Runs inference to determine the predicted class."""
key = prediction_key.PredictionKey.CLASSES
preds = self._estimator.predict(
x=x,
input_fn=input_fn,
batch_size=batch_size,
outputs=[key],
as_iterable=as_iterable)
if as_iterable:
return _as_iterable(preds, output=key)
return preds[key]
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
def predict_proba(self, x=None, input_fn=None, batch_size=None, outputs=None,
as_iterable=True):
"""Runs inference to determine the class probability predictions."""
key = prediction_key.PredictionKey.PROBABILITIES
preds = self._estimator.predict(
x=x,
input_fn=input_fn,
batch_size=batch_size,
outputs=[key],
as_iterable=as_iterable)
if as_iterable:
return _as_iterable(preds, output=key)
return preds[key]
def get_variable_names(self):
return self._estimator.get_variable_names()
def get_variable_value(self, name):
return self._estimator.get_variable_value(name)
def export(self,
export_dir,
input_fn=None,
input_feature_key=None,
use_deprecated_input_fn=True,
signature_fn=None,
default_batch_size=1,
exports_to_keep=None):
"""See BaseEstimator.export."""
def default_input_fn(unused_estimator, examples):
return layers.parse_feature_columns_from_examples(
examples, self._feature_columns)
return self._estimator.export(
export_dir=export_dir,
input_fn=input_fn or default_input_fn,
input_feature_key=input_feature_key,
use_deprecated_input_fn=use_deprecated_input_fn,
signature_fn=(signature_fn or
export.classification_signature_fn_with_prob),
prediction_key=prediction_key.PredictionKey.PROBABILITIES,
default_batch_size=default_batch_size,
exports_to_keep=exports_to_keep)
@property
@deprecated("2016-10-30",
"This method will be removed after the deprecation date. "
"To inspect variables, use get_variable_names() and "
"get_variable_value().")
def weights_(self):
values = {}
optimizer_regex = r".*/"+self._optimizer.get_name() + r"(_\d)?$"
for name in self.get_variable_names():
if (name.startswith("linear/") and
name != "linear/bias_weight" and
not re.match(optimizer_regex, name)):
values[name] = self.get_variable_value(name)
if len(values) == 1:
return values[list(values.keys())[0]]
return values
@property
@deprecated("2016-10-30",
"This method will be removed after the deprecation date. "
"To inspect variables, use get_variable_names() and "
"get_variable_value().")
def bias_(self):
return self.get_variable_value("linear/bias_weight")
@property
def config(self):
return self._estimator.config
@property
def model_dir(self):
return self._estimator.model_dir
class LinearRegressor(evaluable.Evaluable, trainable.Trainable):
"""Linear regressor model.
Train a linear regression model to predict label value given observation of
feature values.
Example:
```python
sparse_column_a = sparse_column_with_hash_bucket(...)
sparse_column_b = sparse_column_with_hash_bucket(...)
sparse_feature_a_x_sparse_feature_b = crossed_column(...)
estimator = LinearRegressor(
feature_columns=[sparse_column_a, sparse_feature_a_x_sparse_feature_b])
# Input builders
def input_fn_train: # returns x, y
...
def input_fn_eval: # returns x, y
...
estimator.fit(input_fn=input_fn_train)
estimator.evaluate(input_fn=input_fn_eval)
estimator.predict(x=x)
```
Input of `fit` and `evaluate` should have following features,
otherwise there will be a KeyError:
* if `weight_column_name` is not `None`:
key=weight_column_name, value=a `Tensor`
* for column in `feature_columns`:
- if isinstance(column, `SparseColumn`):
key=column.name, value=a `SparseTensor`
- if isinstance(column, `WeightedSparseColumn`):
{key=id column name, value=a `SparseTensor`,
key=weight column name, value=a `SparseTensor`}
- if isinstance(column, `RealValuedColumn`):
key=column.name, value=a `Tensor`
"""
def __init__(self, # _joint_weights: pylint: disable=invalid-name
feature_columns,
model_dir=None,
weight_column_name=None,
optimizer=None,
gradient_clip_norm=None,
enable_centered_bias=False,
label_dimension=1,
_joint_weights=False,
config=None,
feature_engineering_fn=None):
"""Construct a `LinearRegressor` estimator object.
Args:
feature_columns: An iterable containing all the feature columns used by
the model. All items in the set should be instances of classes derived
from `FeatureColumn`.
model_dir: Directory to save model parameters, graph, etc. This can
also be used to load checkpoints from the directory into a estimator
to continue training a previously saved model.
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
optimizer: An instance of `tf.Optimizer` used to train the model. If
`None`, will use an Ftrl optimizer.
gradient_clip_norm: A `float` > 0. If provided, gradients are clipped
to their global norm with this clipping ratio. See
`tf.clip_by_global_norm` for more details.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
label_dimension: dimension of the label for multilabels.
_joint_weights: If True use a single (possibly partitioned) variable to
store the weights. It's faster, but requires all feature columns are
sparse and have the 'sum' combiner. Incompatible with SDCAOptimizer.
config: `RunConfig` object to configure the runtime settings.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and
returns features and labels which will be fed
into the model.
Returns:
A `LinearRegressor` estimator.
"""
self._feature_columns = feature_columns
assert self._feature_columns
self._optimizer = _get_default_optimizer(feature_columns)
if optimizer:
self._optimizer = _get_optimizer(optimizer)
chief_hook = None
if (isinstance(optimizer, sdca_optimizer.SDCAOptimizer) and
enable_centered_bias):
enable_centered_bias = False
logging.warning("centered_bias is not supported with SDCA, "
"please disable it explicitly.")
head = head_lib._regression_head( # pylint: disable=protected-access
weight_column_name=weight_column_name,
label_dimension=label_dimension,
enable_centered_bias=enable_centered_bias)
params = {
"head": head,
"feature_columns": feature_columns,
"optimizer": self._optimizer,
}
if isinstance(optimizer, sdca_optimizer.SDCAOptimizer):
assert label_dimension == 1, "SDCA only applies for label_dimension=1."
assert not _joint_weights, ("_joint_weights is incompatible with"
" SDCAOptimizer.")
model_fn = sdca_model_fn
# The model_fn passes the model parameters to the chief_hook. We then use
# the hook to update weights and shrink step only on the chief.
chief_hook = _SdcaUpdateWeightsHook()
params.update({
"weight_column_name": weight_column_name,
"update_weights_hook": chief_hook,
})
else:
model_fn = _linear_model_fn
params.update({
"gradient_clip_norm": gradient_clip_norm,
"num_ps_replicas": config.num_ps_replicas if config else 0,
"joint_weights": _joint_weights,
})
self._estimator = estimator.Estimator(
model_fn=model_fn,
model_dir=model_dir,
config=config,
params=params,
feature_engineering_fn=feature_engineering_fn)
self._additional_run_hook = (chief_hook if self._estimator.config.is_chief
else None)
def fit(self, x=None, y=None, input_fn=None, steps=None, batch_size=None,
monitors=None, max_steps=None):
"""See trainable.Trainable."""
# TODO(roumposg): Remove when deprecated monitors are removed.
if monitors is None:
monitors = []
deprecated_monitors = [
m for m in monitors
if not isinstance(m, session_run_hook.SessionRunHook)
]
for monitor in deprecated_monitors:
monitor.set_estimator(self)
monitor._lock_estimator() # pylint: disable=protected-access
if self._additional_run_hook:
monitors.append(self._additional_run_hook)
result = self._estimator.fit(x=x, y=y, input_fn=input_fn, steps=steps,
batch_size=batch_size, monitors=monitors,
max_steps=max_steps)
for monitor in deprecated_monitors:
monitor._unlock_estimator() # pylint: disable=protected-access
return result
def evaluate(self, x=None, y=None, input_fn=None, feed_fn=None,
batch_size=None, steps=None, metrics=None, name=None):
"""See evaluable.Evaluable."""
return self._estimator.evaluate(x=x, y=y, input_fn=input_fn,
feed_fn=feed_fn, batch_size=batch_size,
steps=steps, metrics=metrics, name=name)
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
def predict(self, x=None, input_fn=None, batch_size=None, as_iterable=True):
"""Runs inference to determine the predicted class."""
key = prediction_key.PredictionKey.SCORES
preds = self._estimator.predict(
x=x,
input_fn=input_fn,
batch_size=batch_size,
outputs=[key],
as_iterable=as_iterable)
if as_iterable:
return _as_iterable(preds, output=key)
return preds[key]
def get_variable_names(self):
return self._estimator.get_variable_names()
def get_variable_value(self, name):
return self._estimator.get_variable_value(name)
def export(self,
export_dir,
input_fn=None,
input_feature_key=None,
use_deprecated_input_fn=True,
signature_fn=None,
default_batch_size=1,
exports_to_keep=None):
"""See BaseEstimator.export."""
def default_input_fn(unused_estimator, examples):
return layers.parse_feature_columns_from_examples(
examples, self._feature_columns)
return self._estimator.export(
export_dir=export_dir,
input_fn=input_fn or default_input_fn,
input_feature_key=input_feature_key,
use_deprecated_input_fn=use_deprecated_input_fn,
signature_fn=(signature_fn or export.regression_signature_fn),
prediction_key=prediction_key.PredictionKey.SCORES,
default_batch_size=default_batch_size,
exports_to_keep=exports_to_keep)
@property
@deprecated("2016-10-30",
"This method will be removed after the deprecation date. "
"To inspect variables, use get_variable_names() and "
"get_variable_value().")
def weights_(self):
values = {}
optimizer_regex = r".*/"+self._optimizer.get_name() + r"(_\d)?$"
for name in self.get_variable_names():
if (name.startswith("linear/") and
name != "linear/bias_weight" and
not re.match(optimizer_regex, name)):
values[name] = self.get_variable_value(name)
if len(values) == 1:
return values[list(values.keys())[0]]
return values
@property
@deprecated("2016-10-30",
"This method will be removed after the deprecation date. "
"To inspect variables, use get_variable_names() and "
"get_variable_value().")
def bias_(self):
return self.get_variable_value("linear/bias_weight")
@property
def config(self):
return self._estimator.config
@property
def model_dir(self):
return self._estimator.model_dir
| nanditav/15712-TensorFlow | tensorflow/contrib/learn/python/learn/estimators/linear.py | Python | apache-2.0 | 31,382 |
"""TempDir module."""
import tempfile
import shutil
class TempDir:
"""Class creating and managing the temporary directories."""
def __init__(self, directory: str):
self.temp_dir = directory
@classmethod
def create(cls) -> "TempDir":
"""Initializes a TempDir object."""
temp_dir = tempfile.mkdtemp(prefix="exonum_test_suite_")
return cls(temp_dir)
def path(self) -> str:
"""Returns the path of the temporary dir."""
return self.temp_dir
def remove(self) -> None:
"""Removes created temporary directory."""
shutil.rmtree(self.temp_dir)
| exonum/exonum | test-suite/exonum-py-tests/suite/temp_dir.py | Python | apache-2.0 | 629 |
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from clr import AddReference
AddReference("System")
AddReference("QuantConnect.Algorithm")
AddReference("QuantConnect.Common")
from System import *
from QuantConnect import *
from QuantConnect.Orders import *
from QuantConnect.Algorithm import *
from QuantConnect.Algorithm.Framework import *
from QuantConnect.Algorithm.Framework.Selection import *
from Alphas.RsiAlphaModel import RsiAlphaModel
from Portfolio.EqualWeightingPortfolioConstructionModel import EqualWeightingPortfolioConstructionModel
from Execution.VolumeWeightedAveragePriceExecutionModel import VolumeWeightedAveragePriceExecutionModel
from datetime import timedelta
### <summary>
### Regression algorithm for the VolumeWeightedAveragePriceExecutionModel.
### This algorithm shows how the execution model works to split up orders and
### submit them only when the price is on the favorable side of the intraday VWAP.
### </summary>
### <meta name="tag" content="using data" />
### <meta name="tag" content="using quantconnect" />
### <meta name="tag" content="trading and orders" />
class VolumeWeightedAveragePriceExecutionModelRegressionAlgorithm(QCAlgorithmFramework):
'''Regression algorithm for the VolumeWeightedAveragePriceExecutionModel.
This algorithm shows how the execution model works to split up orders and
submit them only when the price is on the favorable side of the intraday VWAP.'''
def Initialize(self):
self.UniverseSettings.Resolution = Resolution.Minute
self.SetStartDate(2013,10,7)
self.SetEndDate(2013,10,11)
self.SetCash(1000000)
self.SetUniverseSelection(ManualUniverseSelectionModel([
Symbol.Create('AIG', SecurityType.Equity, Market.USA),
Symbol.Create('BAC', SecurityType.Equity, Market.USA),
Symbol.Create('IBM', SecurityType.Equity, Market.USA),
Symbol.Create('SPY', SecurityType.Equity, Market.USA)
]))
# using hourly rsi to generate more insights
self.SetAlpha(RsiAlphaModel(14, Resolution.Hour))
self.SetPortfolioConstruction(EqualWeightingPortfolioConstructionModel())
self.SetExecution(VolumeWeightedAveragePriceExecutionModel())
self.InsightsGenerated += self.OnInsightsGenerated
def OnInsightsGenerated(self, algorithm, data):
self.Log(f"{self.Time}: {', '.join(str(x) for x in data.Insights)}")
def OnOrderEvent(self, orderEvent):
self.Log(f"{self.Time}: {orderEvent}") | AnshulYADAV007/Lean | Algorithm.Python/VolumeWeightedAveragePriceExecutionModelRegressionAlgorithm.py | Python | apache-2.0 | 3,155 |
# This file is part of Androguard.
#
# Copyright (C) 2012, Geoffroy Gueguen <geoffroy.gueguen@gmail.com>
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from builtins import range
from builtins import object
import logging
from struct import pack, unpack
import androguard.decompiler.dad.util as util
from androguard.decompiler.dad.instruction import (
ArrayLengthExpression, ArrayLoadExpression, ArrayStoreInstruction,
AssignExpression, BaseClass, BinaryCompExpression, BinaryExpression,
BinaryExpression2Addr, BinaryExpressionLit, CastExpression,
CheckCastExpression, ConditionalExpression, ConditionalZExpression,
Constant, FillArrayExpression, FilledArrayExpression, InstanceExpression,
InstanceInstruction, InvokeInstruction, InvokeDirectInstruction,
InvokeRangeInstruction, InvokeStaticInstruction, MonitorEnterExpression,
MonitorExitExpression, MoveExceptionExpression, MoveExpression,
MoveResultExpression, NewArrayExpression, NewInstance, NopExpression,
ThrowExpression, Variable, ReturnInstruction, StaticExpression,
StaticInstruction, SwitchExpression, ThisParam, UnaryExpression)
logger = logging.getLogger('dad.opcode_ins')
class Op(object):
CMP = 'cmp'
ADD = '+'
SUB = '-'
MUL = '*'
DIV = '/'
MOD = '%'
AND = '&'
OR = '|'
XOR = '^'
EQUAL = '=='
NEQUAL = '!='
GREATER = '>'
LOWER = '<'
GEQUAL = '>='
LEQUAL = '<='
NEG = '-'
NOT = '~'
INTSHL = '<<' # '(%s << ( %s & 0x1f ))'
INTSHR = '>>' # '(%s >> ( %s & 0x1f ))'
LONGSHL = '<<' # '(%s << ( %s & 0x3f ))'
LONGSHR = '>>' # '(%s >> ( %s & 0x3f ))'
def get_variables(vmap, *variables):
res = []
for variable in variables:
res.append(vmap.setdefault(variable, Variable(variable)))
if len(res) == 1:
return res[0]
return res
def assign_const(dest_reg, cst, vmap):
return AssignExpression(get_variables(vmap, dest_reg), cst)
def assign_cmp(val_a, val_b, val_c, cmp_type, vmap):
reg_a, reg_b, reg_c = get_variables(vmap, val_a, val_b, val_c)
exp = BinaryCompExpression(Op.CMP, reg_b, reg_c, cmp_type)
return AssignExpression(reg_a, exp)
def load_array_exp(val_a, val_b, val_c, ar_type, vmap):
reg_a, reg_b, reg_c = get_variables(vmap, val_a, val_b, val_c)
return AssignExpression(reg_a, ArrayLoadExpression(reg_b, reg_c, ar_type))
def store_array_inst(val_a, val_b, val_c, ar_type, vmap):
reg_a, reg_b, reg_c = get_variables(vmap, val_a, val_b, val_c)
return ArrayStoreInstruction(reg_a, reg_b, reg_c, ar_type)
def assign_cast_exp(val_a, val_b, val_op, op_type, vmap):
reg_a, reg_b = get_variables(vmap, val_a, val_b)
return AssignExpression(reg_a, CastExpression(val_op, op_type, reg_b))
def assign_binary_exp(ins, val_op, op_type, vmap):
reg_a, reg_b, reg_c = get_variables(vmap, ins.AA, ins.BB, ins.CC)
return AssignExpression(reg_a, BinaryExpression(val_op, reg_b, reg_c,
op_type))
def assign_binary_2addr_exp(ins, val_op, op_type, vmap):
reg_a, reg_b = get_variables(vmap, ins.A, ins.B)
return AssignExpression(reg_a, BinaryExpression2Addr(val_op, reg_a, reg_b,
op_type))
def assign_lit(op_type, val_cst, val_a, val_b, vmap):
cst = Constant(val_cst, 'I')
var_a, var_b = get_variables(vmap, val_a, val_b)
return AssignExpression(var_a, BinaryExpressionLit(op_type, var_b, cst))
# nop
def nop(ins, vmap):
return NopExpression()
# move vA, vB ( 4b, 4b )
def move(ins, vmap):
logger.debug('Move %s', ins.get_output())
reg_a, reg_b = get_variables(vmap, ins.A, ins.B)
return MoveExpression(reg_a, reg_b)
# move/from16 vAA, vBBBB ( 8b, 16b )
def movefrom16(ins, vmap):
logger.debug('MoveFrom16 %s', ins.get_output())
reg_a, reg_b = get_variables(vmap, ins.AA, ins.BBBB)
return MoveExpression(reg_a, reg_b)
# move/16 vAAAA, vBBBB ( 16b, 16b )
def move16(ins, vmap):
logger.debug('Move16 %s', ins.get_output())
reg_a, reg_b = get_variables(vmap, ins.AAAA, ins.BBBB)
return MoveExpression(reg_a, reg_b)
# move-wide vA, vB ( 4b, 4b )
def movewide(ins, vmap):
logger.debug('MoveWide %s', ins.get_output())
reg_a, reg_b = get_variables(vmap, ins.A, ins.B)
return MoveExpression(reg_a, reg_b)
# move-wide/from16 vAA, vBBBB ( 8b, 16b )
def movewidefrom16(ins, vmap):
logger.debug('MoveWideFrom16 : %s', ins.get_output())
reg_a, reg_b = get_variables(vmap, ins.AA, ins.BBBB)
return MoveExpression(reg_a, reg_b)
# move-wide/16 vAAAA, vBBBB ( 16b, 16b )
def movewide16(ins, vmap):
logger.debug('MoveWide16 %s', ins.get_output())
reg_a, reg_b = get_variables(vmap, ins.AAAA, ins.BBBB)
return MoveExpression(reg_a, reg_b)
# move-object vA, vB ( 4b, 4b )
def moveobject(ins, vmap):
logger.debug('MoveObject %s', ins.get_output())
reg_a, reg_b = get_variables(vmap, ins.A, ins.B)
return MoveExpression(reg_a, reg_b)
# move-object/from16 vAA, vBBBB ( 8b, 16b )
def moveobjectfrom16(ins, vmap):
logger.debug('MoveObjectFrom16 : %s', ins.get_output())
reg_a, reg_b = get_variables(vmap, ins.AA, ins.BBBB)
return MoveExpression(reg_a, reg_b)
# move-object/16 vAAAA, vBBBB ( 16b, 16b )
def moveobject16(ins, vmap):
logger.debug('MoveObject16 : %s', ins.get_output())
reg_a, reg_b = get_variables(vmap, ins.AAAA, ins.BBBB)
return MoveExpression(reg_a, reg_b)
# move-result vAA ( 8b )
def moveresult(ins, vmap, ret):
logger.debug('MoveResult : %s', ins.get_output())
return MoveResultExpression(get_variables(vmap, ins.AA), ret)
# move-result-wide vAA ( 8b )
def moveresultwide(ins, vmap, ret):
logger.debug('MoveResultWide : %s', ins.get_output())
return MoveResultExpression(get_variables(vmap, ins.AA), ret)
# move-result-object vAA ( 8b )
def moveresultobject(ins, vmap, ret):
logger.debug('MoveResultObject : %s', ins.get_output())
return MoveResultExpression(get_variables(vmap, ins.AA), ret)
# move-exception vAA ( 8b )
def moveexception(ins, vmap, _type):
logger.debug('MoveException : %s', ins.get_output())
return MoveExceptionExpression(get_variables(vmap, ins.AA), _type)
# return-void
def returnvoid(ins, vmap):
logger.debug('ReturnVoid')
return ReturnInstruction(None)
# return vAA ( 8b )
def return_reg(ins, vmap):
logger.debug('Return : %s', ins.get_output())
return ReturnInstruction(get_variables(vmap, ins.AA))
# return-wide vAA ( 8b )
def returnwide(ins, vmap):
logger.debug('ReturnWide : %s', ins.get_output())
return ReturnInstruction(get_variables(vmap, ins.AA))
# return-object vAA ( 8b )
def returnobject(ins, vmap):
logger.debug('ReturnObject : %s', ins.get_output())
return ReturnInstruction(get_variables(vmap, ins.AA))
# const/4 vA, #+B ( 4b, 4b )
def const4(ins, vmap):
logger.debug('Const4 : %s', ins.get_output())
cst = Constant(ins.B, 'I')
return assign_const(ins.A, cst, vmap)
# const/16 vAA, #+BBBB ( 8b, 16b )
def const16(ins, vmap):
logger.debug('Const16 : %s', ins.get_output())
cst = Constant(ins.BBBB, 'I')
return assign_const(ins.AA, cst, vmap)
# const vAA, #+BBBBBBBB ( 8b, 32b )
def const(ins, vmap):
logger.debug('Const : %s', ins.get_output())
value = unpack("=f", pack("=i", ins.BBBBBBBB))[0]
cst = Constant(value, 'I', ins.BBBBBBBB)
return assign_const(ins.AA, cst, vmap)
# const/high16 vAA, #+BBBB0000 ( 8b, 16b )
def consthigh16(ins, vmap):
logger.debug('ConstHigh16 : %s', ins.get_output())
value = unpack('=f', pack('=i', ins.BBBB << 16))[0]
cst = Constant(value, 'I', ins.BBBB << 16)
return assign_const(ins.AA, cst, vmap)
# const-wide/16 vAA, #+BBBB ( 8b, 16b )
def constwide16(ins, vmap):
logger.debug('ConstWide16 : %s', ins.get_output())
value = unpack('=d', pack('=d', ins.BBBB))[0]
cst = Constant(value, 'J', ins.BBBB)
return assign_const(ins.AA, cst, vmap)
# const-wide/32 vAA, #+BBBBBBBB ( 8b, 32b )
def constwide32(ins, vmap):
logger.debug('ConstWide32 : %s', ins.get_output())
value = unpack('=d', pack('=d', ins.BBBBBBBB))[0]
cst = Constant(value, 'J', ins.BBBBBBBB)
return assign_const(ins.AA, cst, vmap)
# const-wide vAA, #+BBBBBBBBBBBBBBBB ( 8b, 64b )
def constwide(ins, vmap):
logger.debug('ConstWide : %s', ins.get_output())
value = unpack('=d', pack('=q', ins.BBBBBBBBBBBBBBBB))[0]
cst = Constant(value, 'D', ins.BBBBBBBBBBBBBBBB)
return assign_const(ins.AA, cst, vmap)
# const-wide/high16 vAA, #+BBBB000000000000 ( 8b, 16b )
def constwidehigh16(ins, vmap):
logger.debug('ConstWideHigh16 : %s', ins.get_output())
value = unpack('=d', b'\x00\x00\x00\x00\x00\x00' + pack('=h', ins.BBBB))[0]
cst = Constant(value, 'D', ins.BBBB)
return assign_const(ins.AA, cst, vmap)
# const-string vAA ( 8b )
def conststring(ins, vmap):
logger.debug('ConstString : %s', ins.get_output())
cst = Constant(ins.get_raw_string(), 'Ljava/lang/String;')
return assign_const(ins.AA, cst, vmap)
# const-string/jumbo vAA ( 8b )
def conststringjumbo(ins, vmap):
logger.debug('ConstStringJumbo %s', ins.get_output())
cst = Constant(ins.get_raw_string(), 'Ljava/lang/String;')
return assign_const(ins.AA, cst, vmap)
# const-class vAA, type@BBBB ( 8b )
def constclass(ins, vmap):
logger.debug('ConstClass : %s', ins.get_output())
cst = Constant(util.get_type(ins.get_string()),
'Ljava/lang/Class;',
descriptor=ins.get_string())
return assign_const(ins.AA, cst, vmap)
# monitor-enter vAA ( 8b )
def monitorenter(ins, vmap):
logger.debug('MonitorEnter : %s', ins.get_output())
return MonitorEnterExpression(get_variables(vmap, ins.AA))
# monitor-exit vAA ( 8b )
def monitorexit(ins, vmap):
logger.debug('MonitorExit : %s', ins.get_output())
a = get_variables(vmap, ins.AA)
return MonitorExitExpression(a)
# check-cast vAA ( 8b )
def checkcast(ins, vmap):
logger.debug('CheckCast: %s', ins.get_output())
cast_type = util.get_type(ins.get_translated_kind())
cast_var = get_variables(vmap, ins.AA)
cast_expr = CheckCastExpression(cast_var,
cast_type,
descriptor=ins.get_translated_kind())
return AssignExpression(cast_var, cast_expr)
# instance-of vA, vB ( 4b, 4b )
def instanceof(ins, vmap):
logger.debug('InstanceOf : %s', ins.get_output())
reg_a, reg_b = get_variables(vmap, ins.A, ins.B)
reg_c = BaseClass(util.get_type(ins.get_translated_kind()),
descriptor=ins.get_translated_kind())
exp = BinaryExpression('instanceof', reg_b, reg_c, 'Z')
return AssignExpression(reg_a, exp)
# array-length vA, vB ( 4b, 4b )
def arraylength(ins, vmap):
logger.debug('ArrayLength: %s', ins.get_output())
reg_a, reg_b = get_variables(vmap, ins.A, ins.B)
return AssignExpression(reg_a, ArrayLengthExpression(reg_b))
# new-instance vAA ( 8b )
def newinstance(ins, vmap):
logger.debug('NewInstance : %s', ins.get_output())
reg_a = get_variables(vmap, ins.AA)
ins_type = ins.cm.get_type(ins.BBBB)
return AssignExpression(reg_a, NewInstance(ins_type))
# new-array vA, vB ( 8b, size )
def newarray(ins, vmap):
logger.debug('NewArray : %s', ins.get_output())
a, b = get_variables(vmap, ins.A, ins.B)
exp = NewArrayExpression(b, ins.cm.get_type(ins.CCCC))
return AssignExpression(a, exp)
# filled-new-array {vD, vE, vF, vG, vA} ( 4b each )
def fillednewarray(ins, vmap, ret):
logger.debug('FilledNewArray : %s', ins.get_output())
c, d, e, f, g = get_variables(vmap, ins.C, ins.D, ins.E, ins.F, ins.G)
array_type = ins.cm.get_type(ins.BBBB)
exp = FilledArrayExpression(ins.A, array_type, [c, d, e, f, g][:ins.A])
return AssignExpression(ret, exp)
# filled-new-array/range {vCCCC..vNNNN} ( 16b )
def fillednewarrayrange(ins, vmap, ret):
logger.debug('FilledNewArrayRange : %s', ins.get_output())
a, c, n = get_variables(vmap, ins.AA, ins.CCCC, ins.NNNN)
array_type = ins.cm.get_type(ins.BBBB)
exp = FilledArrayExpression(a, array_type, [c, n])
return AssignExpression(ret, exp)
# fill-array-data vAA, +BBBBBBBB ( 8b, 32b )
def fillarraydata(ins, vmap, value):
logger.debug('FillArrayData : %s', ins.get_output())
return FillArrayExpression(get_variables(vmap, ins.AA), value)
# fill-array-data-payload vAA, +BBBBBBBB ( 8b, 32b )
def fillarraydatapayload(ins, vmap):
logger.debug('FillArrayDataPayload : %s', ins.get_output())
return FillArrayExpression(None)
# throw vAA ( 8b )
def throw(ins, vmap):
logger.debug('Throw : %s', ins.get_output())
return ThrowExpression(get_variables(vmap, ins.AA))
# goto +AA ( 8b )
def goto(ins, vmap):
return NopExpression()
# goto/16 +AAAA ( 16b )
def goto16(ins, vmap):
return NopExpression()
# goto/32 +AAAAAAAA ( 32b )
def goto32(ins, vmap):
return NopExpression()
# packed-switch vAA, +BBBBBBBB ( reg to test, 32b )
def packedswitch(ins, vmap):
logger.debug('PackedSwitch : %s', ins.get_output())
reg_a = get_variables(vmap, ins.AA)
return SwitchExpression(reg_a, ins.BBBBBBBB)
# sparse-switch vAA, +BBBBBBBB ( reg to test, 32b )
def sparseswitch(ins, vmap):
logger.debug('SparseSwitch : %s', ins.get_output())
reg_a = get_variables(vmap, ins.AA)
return SwitchExpression(reg_a, ins.BBBBBBBB)
# cmpl-float vAA, vBB, vCC ( 8b, 8b, 8b )
def cmplfloat(ins, vmap):
logger.debug('CmpglFloat : %s', ins.get_output())
return assign_cmp(ins.AA, ins.BB, ins.CC, 'F', vmap)
# cmpg-float vAA, vBB, vCC ( 8b, 8b, 8b )
def cmpgfloat(ins, vmap):
logger.debug('CmpgFloat : %s', ins.get_output())
return assign_cmp(ins.AA, ins.BB, ins.CC, 'F', vmap)
# cmpl-double vAA, vBB, vCC ( 8b, 8b, 8b )
def cmpldouble(ins, vmap):
logger.debug('CmplDouble : %s', ins.get_output())
return assign_cmp(ins.AA, ins.BB, ins.CC, 'D', vmap)
# cmpg-double vAA, vBB, vCC ( 8b, 8b, 8b )
def cmpgdouble(ins, vmap):
logger.debug('CmpgDouble : %s', ins.get_output())
return assign_cmp(ins.AA, ins.BB, ins.CC, 'D', vmap)
# cmp-long vAA, vBB, vCC ( 8b, 8b, 8b )
def cmplong(ins, vmap):
logger.debug('CmpLong : %s', ins.get_output())
return assign_cmp(ins.AA, ins.BB, ins.CC, 'J', vmap)
# if-eq vA, vB, +CCCC ( 4b, 4b, 16b )
def ifeq(ins, vmap):
logger.debug('IfEq : %s', ins.get_output())
a, b = get_variables(vmap, ins.A, ins.B)
return ConditionalExpression(Op.EQUAL, a, b)
# if-ne vA, vB, +CCCC ( 4b, 4b, 16b )
def ifne(ins, vmap):
logger.debug('IfNe : %s', ins.get_output())
a, b = get_variables(vmap, ins.A, ins.B)
return ConditionalExpression(Op.NEQUAL, a, b)
# if-lt vA, vB, +CCCC ( 4b, 4b, 16b )
def iflt(ins, vmap):
logger.debug('IfLt : %s', ins.get_output())
a, b = get_variables(vmap, ins.A, ins.B)
return ConditionalExpression(Op.LOWER, a, b)
# if-ge vA, vB, +CCCC ( 4b, 4b, 16b )
def ifge(ins, vmap):
logger.debug('IfGe : %s', ins.get_output())
a, b = get_variables(vmap, ins.A, ins.B)
return ConditionalExpression(Op.GEQUAL, a, b)
# if-gt vA, vB, +CCCC ( 4b, 4b, 16b )
def ifgt(ins, vmap):
logger.debug('IfGt : %s', ins.get_output())
a, b = get_variables(vmap, ins.A, ins.B)
return ConditionalExpression(Op.GREATER, a, b)
# if-le vA, vB, +CCCC ( 4b, 4b, 16b )
def ifle(ins, vmap):
logger.debug('IfLe : %s', ins.get_output())
a, b = get_variables(vmap, ins.A, ins.B)
return ConditionalExpression(Op.LEQUAL, a, b)
# if-eqz vAA, +BBBB ( 8b, 16b )
def ifeqz(ins, vmap):
logger.debug('IfEqz : %s', ins.get_output())
return ConditionalZExpression(Op.EQUAL, get_variables(vmap, ins.AA))
# if-nez vAA, +BBBB ( 8b, 16b )
def ifnez(ins, vmap):
logger.debug('IfNez : %s', ins.get_output())
return ConditionalZExpression(Op.NEQUAL, get_variables(vmap, ins.AA))
# if-ltz vAA, +BBBB ( 8b, 16b )
def ifltz(ins, vmap):
logger.debug('IfLtz : %s', ins.get_output())
return ConditionalZExpression(Op.LOWER, get_variables(vmap, ins.AA))
# if-gez vAA, +BBBB ( 8b, 16b )
def ifgez(ins, vmap):
logger.debug('IfGez : %s', ins.get_output())
return ConditionalZExpression(Op.GEQUAL, get_variables(vmap, ins.AA))
# if-gtz vAA, +BBBB ( 8b, 16b )
def ifgtz(ins, vmap):
logger.debug('IfGtz : %s', ins.get_output())
return ConditionalZExpression(Op.GREATER, get_variables(vmap, ins.AA))
# if-lez vAA, +BBBB (8b, 16b )
def iflez(ins, vmap):
logger.debug('IfLez : %s', ins.get_output())
return ConditionalZExpression(Op.LEQUAL, get_variables(vmap, ins.AA))
# TODO: check type for all aget
# aget vAA, vBB, vCC ( 8b, 8b, 8b )
def aget(ins, vmap):
logger.debug('AGet : %s', ins.get_output())
return load_array_exp(ins.AA, ins.BB, ins.CC, None, vmap)
# aget-wide vAA, vBB, vCC ( 8b, 8b, 8b )
def agetwide(ins, vmap):
logger.debug('AGetWide : %s', ins.get_output())
return load_array_exp(ins.AA, ins.BB, ins.CC, 'W', vmap)
# aget-object vAA, vBB, vCC ( 8b, 8b, 8b )
def agetobject(ins, vmap):
logger.debug('AGetObject : %s', ins.get_output())
return load_array_exp(ins.AA, ins.BB, ins.CC, 'O', vmap)
# aget-boolean vAA, vBB, vCC ( 8b, 8b, 8b )
def agetboolean(ins, vmap):
logger.debug('AGetBoolean : %s', ins.get_output())
return load_array_exp(ins.AA, ins.BB, ins.CC, 'Z', vmap)
# aget-byte vAA, vBB, vCC ( 8b, 8b, 8b )
def agetbyte(ins, vmap):
logger.debug('AGetByte : %s', ins.get_output())
return load_array_exp(ins.AA, ins.BB, ins.CC, 'B', vmap)
# aget-char vAA, vBB, vCC ( 8b, 8b, 8b )
def agetchar(ins, vmap):
logger.debug('AGetChar : %s', ins.get_output())
return load_array_exp(ins.AA, ins.BB, ins.CC, 'C', vmap)
# aget-short vAA, vBB, vCC ( 8b, 8b, 8b )
def agetshort(ins, vmap):
logger.debug('AGetShort : %s', ins.get_output())
return load_array_exp(ins.AA, ins.BB, ins.CC, 'S', vmap)
# aput vAA, vBB, vCC
def aput(ins, vmap):
logger.debug('APut : %s', ins.get_output())
return store_array_inst(ins.AA, ins.BB, ins.CC, None, vmap)
# aput-wide vAA, vBB, vCC ( 8b, 8b, 8b )
def aputwide(ins, vmap):
logger.debug('APutWide : %s', ins.get_output())
return store_array_inst(ins.AA, ins.BB, ins.CC, 'W', vmap)
# aput-object vAA, vBB, vCC ( 8b, 8b, 8b )
def aputobject(ins, vmap):
logger.debug('APutObject : %s', ins.get_output())
return store_array_inst(ins.AA, ins.BB, ins.CC, 'O', vmap)
# aput-boolean vAA, vBB, vCC ( 8b, 8b, 8b )
def aputboolean(ins, vmap):
logger.debug('APutBoolean : %s', ins.get_output())
return store_array_inst(ins.AA, ins.BB, ins.CC, 'Z', vmap)
# aput-byte vAA, vBB, vCC ( 8b, 8b, 8b )
def aputbyte(ins, vmap):
logger.debug('APutByte : %s', ins.get_output())
return store_array_inst(ins.AA, ins.BB, ins.CC, 'B', vmap)
# aput-char vAA, vBB, vCC ( 8b, 8b, 8b )
def aputchar(ins, vmap):
logger.debug('APutChar : %s', ins.get_output())
return store_array_inst(ins.AA, ins.BB, ins.CC, 'C', vmap)
# aput-short vAA, vBB, vCC ( 8b, 8b, 8b )
def aputshort(ins, vmap):
logger.debug('APutShort : %s', ins.get_output())
return store_array_inst(ins.AA, ins.BB, ins.CC, 'S', vmap)
# iget vA, vB ( 4b, 4b )
def iget(ins, vmap):
logger.debug('IGet : %s', ins.get_output())
klass, ftype, name = ins.cm.get_field(ins.CCCC)
a, b = get_variables(vmap, ins.A, ins.B)
exp = InstanceExpression(b, klass, ftype, name)
return AssignExpression(a, exp)
# iget-wide vA, vB ( 4b, 4b )
def igetwide(ins, vmap):
logger.debug('IGetWide : %s', ins.get_output())
klass, ftype, name = ins.cm.get_field(ins.CCCC)
a, b = get_variables(vmap, ins.A, ins.B)
exp = InstanceExpression(b, klass, ftype, name)
return AssignExpression(a, exp)
# iget-object vA, vB ( 4b, 4b )
def igetobject(ins, vmap):
logger.debug('IGetObject : %s', ins.get_output())
klass, ftype, name = ins.cm.get_field(ins.CCCC)
a, b = get_variables(vmap, ins.A, ins.B)
exp = InstanceExpression(b, klass, ftype, name)
return AssignExpression(a, exp)
# iget-boolean vA, vB ( 4b, 4b )
def igetboolean(ins, vmap):
logger.debug('IGetBoolean : %s', ins.get_output())
klass, ftype, name = ins.cm.get_field(ins.CCCC)
a, b = get_variables(vmap, ins.A, ins.B)
exp = InstanceExpression(b, klass, ftype, name)
return AssignExpression(a, exp)
# iget-byte vA, vB ( 4b, 4b )
def igetbyte(ins, vmap):
logger.debug('IGetByte : %s', ins.get_output())
klass, ftype, name = ins.cm.get_field(ins.CCCC)
a, b = get_variables(vmap, ins.A, ins.B)
exp = InstanceExpression(b, klass, ftype, name)
return AssignExpression(a, exp)
# iget-char vA, vB ( 4b, 4b )
def igetchar(ins, vmap):
logger.debug('IGetChar : %s', ins.get_output())
klass, ftype, name = ins.cm.get_field(ins.CCCC)
a, b = get_variables(vmap, ins.A, ins.B)
exp = InstanceExpression(b, klass, ftype, name)
return AssignExpression(a, exp)
# iget-short vA, vB ( 4b, 4b )
def igetshort(ins, vmap):
logger.debug('IGetShort : %s', ins.get_output())
klass, ftype, name = ins.cm.get_field(ins.CCCC)
a, b = get_variables(vmap, ins.A, ins.B)
exp = InstanceExpression(b, klass, ftype, name)
return AssignExpression(a, exp)
# iput vA, vB ( 4b, 4b )
def iput(ins, vmap):
logger.debug('IPut %s', ins.get_output())
klass, atype, name = ins.cm.get_field(ins.CCCC)
a, b = get_variables(vmap, ins.A, ins.B)
return InstanceInstruction(a, b, klass, atype, name)
# iput-wide vA, vB ( 4b, 4b )
def iputwide(ins, vmap):
logger.debug('IPutWide %s', ins.get_output())
klass, atype, name = ins.cm.get_field(ins.CCCC)
a, b = get_variables(vmap, ins.A, ins.B)
return InstanceInstruction(a, b, klass, atype, name)
# iput-object vA, vB ( 4b, 4b )
def iputobject(ins, vmap):
logger.debug('IPutObject %s', ins.get_output())
klass, atype, name = ins.cm.get_field(ins.CCCC)
a, b = get_variables(vmap, ins.A, ins.B)
return InstanceInstruction(a, b, klass, atype, name)
# iput-boolean vA, vB ( 4b, 4b )
def iputboolean(ins, vmap):
logger.debug('IPutBoolean %s', ins.get_output())
klass, atype, name = ins.cm.get_field(ins.CCCC)
a, b = get_variables(vmap, ins.A, ins.B)
return InstanceInstruction(a, b, klass, atype, name)
# iput-byte vA, vB ( 4b, 4b )
def iputbyte(ins, vmap):
logger.debug('IPutByte %s', ins.get_output())
klass, atype, name = ins.cm.get_field(ins.CCCC)
a, b = get_variables(vmap, ins.A, ins.B)
return InstanceInstruction(a, b, klass, atype, name)
# iput-char vA, vB ( 4b, 4b )
def iputchar(ins, vmap):
logger.debug('IPutChar %s', ins.get_output())
klass, atype, name = ins.cm.get_field(ins.CCCC)
a, b = get_variables(vmap, ins.A, ins.B)
return InstanceInstruction(a, b, klass, atype, name)
# iput-short vA, vB ( 4b, 4b )
def iputshort(ins, vmap):
logger.debug('IPutShort %s', ins.get_output())
klass, atype, name = ins.cm.get_field(ins.CCCC)
a, b = get_variables(vmap, ins.A, ins.B)
return InstanceInstruction(a, b, klass, atype, name)
# sget vAA ( 8b )
def sget(ins, vmap):
logger.debug('SGet : %s', ins.get_output())
klass, atype, name = ins.cm.get_field(ins.BBBB)
exp = StaticExpression(klass, atype, name)
a = get_variables(vmap, ins.AA)
return AssignExpression(a, exp)
# sget-wide vAA ( 8b )
def sgetwide(ins, vmap):
logger.debug('SGetWide : %s', ins.get_output())
klass, atype, name = ins.cm.get_field(ins.BBBB)
exp = StaticExpression(klass, atype, name)
a = get_variables(vmap, ins.AA)
return AssignExpression(a, exp)
# sget-object vAA ( 8b )
def sgetobject(ins, vmap):
logger.debug('SGetObject : %s', ins.get_output())
klass, atype, name = ins.cm.get_field(ins.BBBB)
exp = StaticExpression(klass, atype, name)
a = get_variables(vmap, ins.AA)
return AssignExpression(a, exp)
# sget-boolean vAA ( 8b )
def sgetboolean(ins, vmap):
logger.debug('SGetBoolean : %s', ins.get_output())
klass, atype, name = ins.cm.get_field(ins.BBBB)
exp = StaticExpression(klass, atype, name)
a = get_variables(vmap, ins.AA)
return AssignExpression(a, exp)
# sget-byte vAA ( 8b )
def sgetbyte(ins, vmap):
logger.debug('SGetByte : %s', ins.get_output())
klass, atype, name = ins.cm.get_field(ins.BBBB)
exp = StaticExpression(klass, atype, name)
a = get_variables(vmap, ins.AA)
return AssignExpression(a, exp)
# sget-char vAA ( 8b )
def sgetchar(ins, vmap):
logger.debug('SGetChar : %s', ins.get_output())
klass, atype, name = ins.cm.get_field(ins.BBBB)
exp = StaticExpression(klass, atype, name)
a = get_variables(vmap, ins.AA)
return AssignExpression(a, exp)
# sget-short vAA ( 8b )
def sgetshort(ins, vmap):
logger.debug('SGetShort : %s', ins.get_output())
klass, atype, name = ins.cm.get_field(ins.BBBB)
exp = StaticExpression(klass, atype, name)
a = get_variables(vmap, ins.AA)
return AssignExpression(a, exp)
# sput vAA ( 8b )
def sput(ins, vmap):
logger.debug('SPut : %s', ins.get_output())
klass, ftype, name = ins.cm.get_field(ins.BBBB)
a = get_variables(vmap, ins.AA)
return StaticInstruction(a, klass, ftype, name)
# sput-wide vAA ( 8b )
def sputwide(ins, vmap):
logger.debug('SPutWide : %s', ins.get_output())
klass, ftype, name = ins.cm.get_field(ins.BBBB)
a = get_variables(vmap, ins.AA)
return StaticInstruction(a, klass, ftype, name)
# sput-object vAA ( 8b )
def sputobject(ins, vmap):
logger.debug('SPutObject : %s', ins.get_output())
klass, ftype, name = ins.cm.get_field(ins.BBBB)
a = get_variables(vmap, ins.AA)
return StaticInstruction(a, klass, ftype, name)
# sput-boolean vAA ( 8b )
def sputboolean(ins, vmap):
logger.debug('SPutBoolean : %s', ins.get_output())
klass, ftype, name = ins.cm.get_field(ins.BBBB)
a = get_variables(vmap, ins.AA)
return StaticInstruction(a, klass, ftype, name)
# sput-wide vAA ( 8b )
def sputbyte(ins, vmap):
logger.debug('SPutByte : %s', ins.get_output())
klass, ftype, name = ins.cm.get_field(ins.BBBB)
a = get_variables(vmap, ins.AA)
return StaticInstruction(a, klass, ftype, name)
# sput-char vAA ( 8b )
def sputchar(ins, vmap):
logger.debug('SPutChar : %s', ins.get_output())
klass, ftype, name = ins.cm.get_field(ins.BBBB)
a = get_variables(vmap, ins.AA)
return StaticInstruction(a, klass, ftype, name)
# sput-short vAA ( 8b )
def sputshort(ins, vmap):
logger.debug('SPutShort : %s', ins.get_output())
klass, ftype, name = ins.cm.get_field(ins.BBBB)
a = get_variables(vmap, ins.AA)
return StaticInstruction(a, klass, ftype, name)
def get_args(vmap, param_type, largs):
num_param = 0
args = []
if len(param_type) > len(largs):
logger.warning('len(param_type) > len(largs) !')
return args
for type_ in param_type:
param = largs[num_param]
args.append(param)
num_param += util.get_type_size(type_)
if len(param_type) == 1:
return [get_variables(vmap, *args)]
return get_variables(vmap, *args)
# invoke-virtual {vD, vE, vF, vG, vA} ( 4b each )
def invokevirtual(ins, vmap, ret):
logger.debug('InvokeVirtual : %s', ins.get_output())
method = ins.cm.get_method_ref(ins.BBBB)
cls_name = util.get_type(method.get_class_name())
name = method.get_name()
param_type, ret_type = method.get_proto()
param_type = util.get_params_type(param_type)
largs = [ins.D, ins.E, ins.F, ins.G]
args = get_args(vmap, param_type, largs)
c = get_variables(vmap, ins.C)
returned = None if ret_type == 'V' else ret.new()
exp = InvokeInstruction(cls_name, name, c, ret_type, param_type, args,
method.get_triple())
return AssignExpression(returned, exp)
# invoke-super {vD, vE, vF, vG, vA} ( 4b each )
def invokesuper(ins, vmap, ret):
logger.debug('InvokeSuper : %s', ins.get_output())
method = ins.cm.get_method_ref(ins.BBBB)
cls_name = util.get_type(method.get_class_name())
name = method.get_name()
param_type, ret_type = method.get_proto()
param_type = util.get_params_type(param_type)
largs = [ins.D, ins.E, ins.F, ins.G]
args = get_args(vmap, param_type, largs)
superclass = BaseClass('super')
returned = None if ret_type == 'V' else ret.new()
exp = InvokeInstruction(cls_name, name, superclass, ret_type, param_type,
args, method.get_triple())
return AssignExpression(returned, exp)
# invoke-direct {vD, vE, vF, vG, vA} ( 4b each )
def invokedirect(ins, vmap, ret):
logger.debug('InvokeDirect : %s', ins.get_output())
method = ins.cm.get_method_ref(ins.BBBB)
cls_name = util.get_type(method.get_class_name())
name = method.get_name()
param_type, ret_type = method.get_proto()
param_type = util.get_params_type(param_type)
largs = [ins.D, ins.E, ins.F, ins.G]
args = get_args(vmap, param_type, largs)
base = get_variables(vmap, ins.C)
if ret_type == 'V':
if isinstance(base, ThisParam):
returned = None
else:
returned = base
ret.set_to(base)
else:
returned = ret.new()
exp = InvokeDirectInstruction(cls_name, name, base, ret_type, param_type,
args, method.get_triple())
return AssignExpression(returned, exp)
# invoke-static {vD, vE, vF, vG, vA} ( 4b each )
def invokestatic(ins, vmap, ret):
logger.debug('InvokeStatic : %s', ins.get_output())
method = ins.cm.get_method_ref(ins.BBBB)
cls_name = util.get_type(method.get_class_name())
name = method.get_name()
param_type, ret_type = method.get_proto()
param_type = util.get_params_type(param_type)
largs = [ins.C, ins.D, ins.E, ins.F, ins.G]
args = get_args(vmap, param_type, largs)
base = BaseClass(cls_name, descriptor=method.get_class_name())
returned = None if ret_type == 'V' else ret.new()
exp = InvokeStaticInstruction(cls_name, name, base, ret_type, param_type,
args, method.get_triple())
return AssignExpression(returned, exp)
# invoke-interface {vD, vE, vF, vG, vA} ( 4b each )
def invokeinterface(ins, vmap, ret):
logger.debug('InvokeInterface : %s', ins.get_output())
method = ins.cm.get_method_ref(ins.BBBB)
cls_name = util.get_type(method.get_class_name())
name = method.get_name()
param_type, ret_type = method.get_proto()
param_type = util.get_params_type(param_type)
largs = [ins.D, ins.E, ins.F, ins.G]
args = get_args(vmap, param_type, largs)
c = get_variables(vmap, ins.C)
returned = None if ret_type == 'V' else ret.new()
exp = InvokeInstruction(cls_name, name, c, ret_type, param_type, args,
method.get_triple())
return AssignExpression(returned, exp)
# invoke-virtual/range {vCCCC..vNNNN} ( 16b each )
def invokevirtualrange(ins, vmap, ret):
logger.debug('InvokeVirtualRange : %s', ins.get_output())
method = ins.cm.get_method_ref(ins.BBBB)
cls_name = util.get_type(method.get_class_name())
name = method.get_name()
param_type, ret_type = method.get_proto()
param_type = util.get_params_type(param_type)
largs = list(range(ins.CCCC, ins.NNNN + 1))
this_arg = get_variables(vmap, largs[0])
args = get_args(vmap, param_type, largs[1:])
returned = None if ret_type == 'V' else ret.new()
exp = InvokeRangeInstruction(cls_name, name, ret_type, param_type,
[this_arg] + args, method.get_triple())
return AssignExpression(returned, exp)
# invoke-super/range {vCCCC..vNNNN} ( 16b each )
def invokesuperrange(ins, vmap, ret):
logger.debug('InvokeSuperRange : %s', ins.get_output())
method = ins.cm.get_method_ref(ins.BBBB)
cls_name = util.get_type(method.get_class_name())
name = method.get_name()
param_type, ret_type = method.get_proto()
param_type = util.get_params_type(param_type)
largs = list(range(ins.CCCC, ins.NNNN + 1))
args = get_args(vmap, param_type, largs[1:])
base = get_variables(vmap, ins.CCCC)
if ret_type != 'V':
returned = ret.new()
else:
returned = base
ret.set_to(base)
superclass = BaseClass('super')
exp = InvokeRangeInstruction(cls_name, name, ret_type, param_type,
[superclass] + args, method.get_triple())
return AssignExpression(returned, exp)
# invoke-direct/range {vCCCC..vNNNN} ( 16b each )
def invokedirectrange(ins, vmap, ret):
logger.debug('InvokeDirectRange : %s', ins.get_output())
method = ins.cm.get_method_ref(ins.BBBB)
cls_name = util.get_type(method.get_class_name())
name = method.get_name()
param_type, ret_type = method.get_proto()
param_type = util.get_params_type(param_type)
largs = list(range(ins.CCCC, ins.NNNN + 1))
this_arg = get_variables(vmap, largs[0])
args = get_args(vmap, param_type, largs[1:])
base = get_variables(vmap, ins.CCCC)
if ret_type != 'V':
returned = ret.new()
else:
returned = base
ret.set_to(base)
exp = InvokeRangeInstruction(cls_name, name, ret_type, param_type,
[this_arg] + args, method.get_triple())
return AssignExpression(returned, exp)
# invoke-static/range {vCCCC..vNNNN} ( 16b each )
def invokestaticrange(ins, vmap, ret):
logger.debug('InvokeStaticRange : %s', ins.get_output())
method = ins.cm.get_method_ref(ins.BBBB)
cls_name = util.get_type(method.get_class_name())
name = method.get_name()
param_type, ret_type = method.get_proto()
param_type = util.get_params_type(param_type)
largs = list(range(ins.CCCC, ins.NNNN + 1))
args = get_args(vmap, param_type, largs)
base = BaseClass(cls_name, descriptor=method.get_class_name())
returned = None if ret_type == 'V' else ret.new()
exp = InvokeStaticInstruction(cls_name, name, base, ret_type, param_type,
args, method.get_triple())
return AssignExpression(returned, exp)
# invoke-interface/range {vCCCC..vNNNN} ( 16b each )
def invokeinterfacerange(ins, vmap, ret):
logger.debug('InvokeInterfaceRange : %s', ins.get_output())
method = ins.cm.get_method_ref(ins.BBBB)
cls_name = util.get_type(method.get_class_name())
name = method.get_name()
param_type, ret_type = method.get_proto()
param_type = util.get_params_type(param_type)
largs = list(range(ins.CCCC, ins.NNNN + 1))
base_arg = get_variables(vmap, largs[0])
args = get_args(vmap, param_type, largs[1:])
returned = None if ret_type == 'V' else ret.new()
exp = InvokeRangeInstruction(cls_name, name, ret_type, param_type,
[base_arg] + args, method.get_triple())
return AssignExpression(returned, exp)
# neg-int vA, vB ( 4b, 4b )
def negint(ins, vmap):
logger.debug('NegInt : %s', ins.get_output())
a, b = get_variables(vmap, ins.A, ins.B)
exp = UnaryExpression(Op.NEG, b, 'I')
return AssignExpression(a, exp)
# not-int vA, vB ( 4b, 4b )
def notint(ins, vmap):
logger.debug('NotInt : %s', ins.get_output())
a, b = get_variables(vmap, ins.A, ins.B)
exp = UnaryExpression(Op.NOT, b, 'I')
return AssignExpression(a, exp)
# neg-long vA, vB ( 4b, 4b )
def neglong(ins, vmap):
logger.debug('NegLong : %s', ins.get_output())
a, b = get_variables(vmap, ins.A, ins.B)
exp = UnaryExpression(Op.NEG, b, 'J')
return AssignExpression(a, exp)
# not-long vA, vB ( 4b, 4b )
def notlong(ins, vmap):
logger.debug('NotLong : %s', ins.get_output())
a, b = get_variables(vmap, ins.A, ins.B)
exp = UnaryExpression(Op.NOT, b, 'J')
return AssignExpression(a, exp)
# neg-float vA, vB ( 4b, 4b )
def negfloat(ins, vmap):
logger.debug('NegFloat : %s', ins.get_output())
a, b = get_variables(vmap, ins.A, ins.B)
exp = UnaryExpression(Op.NEG, b, 'F')
return AssignExpression(a, exp)
# neg-double vA, vB ( 4b, 4b )
def negdouble(ins, vmap):
logger.debug('NegDouble : %s', ins.get_output())
a, b = get_variables(vmap, ins.A, ins.B)
exp = UnaryExpression(Op.NEG, b, 'D')
return AssignExpression(a, exp)
# int-to-long vA, vB ( 4b, 4b )
def inttolong(ins, vmap):
logger.debug('IntToLong : %s', ins.get_output())
return assign_cast_exp(ins.A, ins.B, '(long)', 'J', vmap)
# int-to-float vA, vB ( 4b, 4b )
def inttofloat(ins, vmap):
logger.debug('IntToFloat : %s', ins.get_output())
return assign_cast_exp(ins.A, ins.B, '(float)', 'F', vmap)
# int-to-double vA, vB ( 4b, 4b )
def inttodouble(ins, vmap):
logger.debug('IntToDouble : %s', ins.get_output())
return assign_cast_exp(ins.A, ins.B, '(double)', 'D', vmap)
# long-to-int vA, vB ( 4b, 4b )
def longtoint(ins, vmap):
logger.debug('LongToInt : %s', ins.get_output())
return assign_cast_exp(ins.A, ins.B, '(int)', 'I', vmap)
# long-to-float vA, vB ( 4b, 4b )
def longtofloat(ins, vmap):
logger.debug('LongToFloat : %s', ins.get_output())
return assign_cast_exp(ins.A, ins.B, '(float)', 'F', vmap)
# long-to-double vA, vB ( 4b, 4b )
def longtodouble(ins, vmap):
logger.debug('LongToDouble : %s', ins.get_output())
return assign_cast_exp(ins.A, ins.B, '(double)', 'D', vmap)
# float-to-int vA, vB ( 4b, 4b )
def floattoint(ins, vmap):
logger.debug('FloatToInt : %s', ins.get_output())
return assign_cast_exp(ins.A, ins.B, '(int)', 'I', vmap)
# float-to-long vA, vB ( 4b, 4b )
def floattolong(ins, vmap):
logger.debug('FloatToLong : %s', ins.get_output())
return assign_cast_exp(ins.A, ins.B, '(long)', 'J', vmap)
# float-to-double vA, vB ( 4b, 4b )
def floattodouble(ins, vmap):
logger.debug('FloatToDouble : %s', ins.get_output())
return assign_cast_exp(ins.A, ins.B, '(double)', 'D', vmap)
# double-to-int vA, vB ( 4b, 4b )
def doubletoint(ins, vmap):
logger.debug('DoubleToInt : %s', ins.get_output())
return assign_cast_exp(ins.A, ins.B, '(int)', 'I', vmap)
# double-to-long vA, vB ( 4b, 4b )
def doubletolong(ins, vmap):
logger.debug('DoubleToLong : %s', ins.get_output())
return assign_cast_exp(ins.A, ins.B, '(long)', 'J', vmap)
# double-to-float vA, vB ( 4b, 4b )
def doubletofloat(ins, vmap):
logger.debug('DoubleToFloat : %s', ins.get_output())
return assign_cast_exp(ins.A, ins.B, '(float)', 'F', vmap)
# int-to-byte vA, vB ( 4b, 4b )
def inttobyte(ins, vmap):
logger.debug('IntToByte : %s', ins.get_output())
return assign_cast_exp(ins.A, ins.B, '(byte)', 'B', vmap)
# int-to-char vA, vB ( 4b, 4b )
def inttochar(ins, vmap):
logger.debug('IntToChar : %s', ins.get_output())
return assign_cast_exp(ins.A, ins.B, '(char)', 'C', vmap)
# int-to-short vA, vB ( 4b, 4b )
def inttoshort(ins, vmap):
logger.debug('IntToShort : %s', ins.get_output())
return assign_cast_exp(ins.A, ins.B, '(short)', 'S', vmap)
# add-int vAA, vBB, vCC ( 8b, 8b, 8b )
def addint(ins, vmap):
logger.debug('AddInt : %s', ins.get_output())
return assign_binary_exp(ins, Op.ADD, 'I', vmap)
# sub-int vAA, vBB, vCC ( 8b, 8b, 8b )
def subint(ins, vmap):
logger.debug('SubInt : %s', ins.get_output())
return assign_binary_exp(ins, Op.SUB, 'I', vmap)
# mul-int vAA, vBB, vCC ( 8b, 8b, 8b )
def mulint(ins, vmap):
logger.debug('MulInt : %s', ins.get_output())
return assign_binary_exp(ins, Op.MUL, 'I', vmap)
# div-int vAA, vBB, vCC ( 8b, 8b, 8b )
def divint(ins, vmap):
logger.debug('DivInt : %s', ins.get_output())
return assign_binary_exp(ins, Op.DIV, 'I', vmap)
# rem-int vAA, vBB, vCC ( 8b, 8b, 8b )
def remint(ins, vmap):
logger.debug('RemInt : %s', ins.get_output())
return assign_binary_exp(ins, Op.MOD, 'I', vmap)
# and-int vAA, vBB, vCC ( 8b, 8b, 8b )
def andint(ins, vmap):
logger.debug('AndInt : %s', ins.get_output())
return assign_binary_exp(ins, Op.AND, 'I', vmap)
# or-int vAA, vBB, vCC ( 8b, 8b, 8b )
def orint(ins, vmap):
logger.debug('OrInt : %s', ins.get_output())
return assign_binary_exp(ins, Op.OR, 'I', vmap)
# xor-int vAA, vBB, vCC ( 8b, 8b, 8b )
def xorint(ins, vmap):
logger.debug('XorInt : %s', ins.get_output())
return assign_binary_exp(ins, Op.XOR, 'I', vmap)
# shl-int vAA, vBB, vCC ( 8b, 8b, 8b )
def shlint(ins, vmap):
logger.debug('ShlInt : %s', ins.get_output())
return assign_binary_exp(ins, Op.INTSHL, 'I', vmap)
# shr-int vAA, vBB, vCC ( 8b, 8b, 8b )
def shrint(ins, vmap):
logger.debug('ShrInt : %s', ins.get_output())
return assign_binary_exp(ins, Op.INTSHR, 'I', vmap)
# ushr-int vAA, vBB, vCC ( 8b, 8b, 8b )
def ushrint(ins, vmap):
logger.debug('UShrInt : %s', ins.get_output())
return assign_binary_exp(ins, Op.INTSHR, 'I', vmap)
# add-long vAA, vBB, vCC ( 8b, 8b, 8b )
def addlong(ins, vmap):
logger.debug('AddLong : %s', ins.get_output())
return assign_binary_exp(ins, Op.ADD, 'J', vmap)
# sub-long vAA, vBB, vCC ( 8b, 8b, 8b )
def sublong(ins, vmap):
logger.debug('SubLong : %s', ins.get_output())
return assign_binary_exp(ins, Op.SUB, 'J', vmap)
# mul-long vAA, vBB, vCC ( 8b, 8b, 8b )
def mullong(ins, vmap):
logger.debug('MulLong : %s', ins.get_output())
return assign_binary_exp(ins, Op.MUL, 'J', vmap)
# div-long vAA, vBB, vCC ( 8b, 8b, 8b )
def divlong(ins, vmap):
logger.debug('DivLong : %s', ins.get_output())
return assign_binary_exp(ins, Op.DIV, 'J', vmap)
# rem-long vAA, vBB, vCC ( 8b, 8b, 8b )
def remlong(ins, vmap):
logger.debug('RemLong : %s', ins.get_output())
return assign_binary_exp(ins, Op.MOD, 'J', vmap)
# and-long vAA, vBB, vCC ( 8b, 8b, 8b )
def andlong(ins, vmap):
logger.debug('AndLong : %s', ins.get_output())
return assign_binary_exp(ins, Op.AND, 'J', vmap)
# or-long vAA, vBB, vCC ( 8b, 8b, 8b )
def orlong(ins, vmap):
logger.debug('OrLong : %s', ins.get_output())
return assign_binary_exp(ins, Op.OR, 'J', vmap)
# xor-long vAA, vBB, vCC ( 8b, 8b, 8b )
def xorlong(ins, vmap):
logger.debug('XorLong : %s', ins.get_output())
return assign_binary_exp(ins, Op.XOR, 'J', vmap)
# shl-long vAA, vBB, vCC ( 8b, 8b, 8b )
def shllong(ins, vmap):
logger.debug('ShlLong : %s', ins.get_output())
return assign_binary_exp(ins, Op.LONGSHL, 'J', vmap)
# shr-long vAA, vBB, vCC ( 8b, 8b, 8b )
def shrlong(ins, vmap):
logger.debug('ShrLong : %s', ins.get_output())
return assign_binary_exp(ins, Op.LONGSHR, 'J', vmap)
# ushr-long vAA, vBB, vCC ( 8b, 8b, 8b )
def ushrlong(ins, vmap):
logger.debug('UShrLong : %s', ins.get_output())
return assign_binary_exp(ins, Op.LONGSHR, 'J', vmap)
# add-float vAA, vBB, vCC ( 8b, 8b, 8b )
def addfloat(ins, vmap):
logger.debug('AddFloat : %s', ins.get_output())
return assign_binary_exp(ins, Op.ADD, 'F', vmap)
# sub-float vAA, vBB, vCC ( 8b, 8b, 8b )
def subfloat(ins, vmap):
logger.debug('SubFloat : %s', ins.get_output())
return assign_binary_exp(ins, Op.SUB, 'F', vmap)
# mul-float vAA, vBB, vCC ( 8b, 8b, 8b )
def mulfloat(ins, vmap):
logger.debug('MulFloat : %s', ins.get_output())
return assign_binary_exp(ins, Op.MUL, 'F', vmap)
# div-float vAA, vBB, vCC ( 8b, 8b, 8b )
def divfloat(ins, vmap):
logger.debug('DivFloat : %s', ins.get_output())
return assign_binary_exp(ins, Op.DIV, 'F', vmap)
# rem-float vAA, vBB, vCC ( 8b, 8b, 8b )
def remfloat(ins, vmap):
logger.debug('RemFloat : %s', ins.get_output())
return assign_binary_exp(ins, Op.MOD, 'F', vmap)
# add-double vAA, vBB, vCC ( 8b, 8b, 8b )
def adddouble(ins, vmap):
logger.debug('AddDouble : %s', ins.get_output())
return assign_binary_exp(ins, Op.ADD, 'D', vmap)
# sub-double vAA, vBB, vCC ( 8b, 8b, 8b )
def subdouble(ins, vmap):
logger.debug('SubDouble : %s', ins.get_output())
return assign_binary_exp(ins, Op.SUB, 'D', vmap)
# mul-double vAA, vBB, vCC ( 8b, 8b, 8b )
def muldouble(ins, vmap):
logger.debug('MulDouble : %s', ins.get_output())
return assign_binary_exp(ins, Op.MUL, 'D', vmap)
# div-double vAA, vBB, vCC ( 8b, 8b, 8b )
def divdouble(ins, vmap):
logger.debug('DivDouble : %s', ins.get_output())
return assign_binary_exp(ins, Op.DIV, 'D', vmap)
# rem-double vAA, vBB, vCC ( 8b, 8b, 8b )
def remdouble(ins, vmap):
logger.debug('RemDouble : %s', ins.get_output())
return assign_binary_exp(ins, Op.MOD, 'D', vmap)
# add-int/2addr vA, vB ( 4b, 4b )
def addint2addr(ins, vmap):
logger.debug('AddInt2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.ADD, 'I', vmap)
# sub-int/2addr vA, vB ( 4b, 4b )
def subint2addr(ins, vmap):
logger.debug('SubInt2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.SUB, 'I', vmap)
# mul-int/2addr vA, vB ( 4b, 4b )
def mulint2addr(ins, vmap):
logger.debug('MulInt2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.MUL, 'I', vmap)
# div-int/2addr vA, vB ( 4b, 4b )
def divint2addr(ins, vmap):
logger.debug('DivInt2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.DIV, 'I', vmap)
# rem-int/2addr vA, vB ( 4b, 4b )
def remint2addr(ins, vmap):
logger.debug('RemInt2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.MOD, 'I', vmap)
# and-int/2addr vA, vB ( 4b, 4b )
def andint2addr(ins, vmap):
logger.debug('AndInt2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.AND, 'I', vmap)
# or-int/2addr vA, vB ( 4b, 4b )
def orint2addr(ins, vmap):
logger.debug('OrInt2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.OR, 'I', vmap)
# xor-int/2addr vA, vB ( 4b, 4b )
def xorint2addr(ins, vmap):
logger.debug('XorInt2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.XOR, 'I', vmap)
# shl-int/2addr vA, vB ( 4b, 4b )
def shlint2addr(ins, vmap):
logger.debug('ShlInt2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.INTSHL, 'I', vmap)
# shr-int/2addr vA, vB ( 4b, 4b )
def shrint2addr(ins, vmap):
logger.debug('ShrInt2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.INTSHR, 'I', vmap)
# ushr-int/2addr vA, vB ( 4b, 4b )
def ushrint2addr(ins, vmap):
logger.debug('UShrInt2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.INTSHR, 'I', vmap)
# add-long/2addr vA, vB ( 4b, 4b )
def addlong2addr(ins, vmap):
logger.debug('AddLong2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.ADD, 'J', vmap)
# sub-long/2addr vA, vB ( 4b, 4b )
def sublong2addr(ins, vmap):
logger.debug('SubLong2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.SUB, 'J', vmap)
# mul-long/2addr vA, vB ( 4b, 4b )
def mullong2addr(ins, vmap):
logger.debug('MulLong2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.MUL, 'J', vmap)
# div-long/2addr vA, vB ( 4b, 4b )
def divlong2addr(ins, vmap):
logger.debug('DivLong2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.DIV, 'J', vmap)
# rem-long/2addr vA, vB ( 4b, 4b )
def remlong2addr(ins, vmap):
logger.debug('RemLong2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.MOD, 'J', vmap)
# and-long/2addr vA, vB ( 4b, 4b )
def andlong2addr(ins, vmap):
logger.debug('AndLong2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.AND, 'J', vmap)
# or-long/2addr vA, vB ( 4b, 4b )
def orlong2addr(ins, vmap):
logger.debug('OrLong2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.OR, 'J', vmap)
# xor-long/2addr vA, vB ( 4b, 4b )
def xorlong2addr(ins, vmap):
logger.debug('XorLong2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.XOR, 'J', vmap)
# shl-long/2addr vA, vB ( 4b, 4b )
def shllong2addr(ins, vmap):
logger.debug('ShlLong2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.LONGSHL, 'J', vmap)
# shr-long/2addr vA, vB ( 4b, 4b )
def shrlong2addr(ins, vmap):
logger.debug('ShrLong2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.LONGSHR, 'J', vmap)
# ushr-long/2addr vA, vB ( 4b, 4b )
def ushrlong2addr(ins, vmap):
logger.debug('UShrLong2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.LONGSHR, 'J', vmap)
# add-float/2addr vA, vB ( 4b, 4b )
def addfloat2addr(ins, vmap):
logger.debug('AddFloat2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.ADD, 'F', vmap)
# sub-float/2addr vA, vB ( 4b, 4b )
def subfloat2addr(ins, vmap):
logger.debug('SubFloat2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.SUB, 'F', vmap)
# mul-float/2addr vA, vB ( 4b, 4b )
def mulfloat2addr(ins, vmap):
logger.debug('MulFloat2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.MUL, 'F', vmap)
# div-float/2addr vA, vB ( 4b, 4b )
def divfloat2addr(ins, vmap):
logger.debug('DivFloat2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.DIV, 'F', vmap)
# rem-float/2addr vA, vB ( 4b, 4b )
def remfloat2addr(ins, vmap):
logger.debug('RemFloat2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.MOD, 'F', vmap)
# add-double/2addr vA, vB ( 4b, 4b )
def adddouble2addr(ins, vmap):
logger.debug('AddDouble2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.ADD, 'D', vmap)
# sub-double/2addr vA, vB ( 4b, 4b )
def subdouble2addr(ins, vmap):
logger.debug('subDouble2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.SUB, 'D', vmap)
# mul-double/2addr vA, vB ( 4b, 4b )
def muldouble2addr(ins, vmap):
logger.debug('MulDouble2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.MUL, 'D', vmap)
# div-double/2addr vA, vB ( 4b, 4b )
def divdouble2addr(ins, vmap):
logger.debug('DivDouble2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.DIV, 'D', vmap)
# rem-double/2addr vA, vB ( 4b, 4b )
def remdouble2addr(ins, vmap):
logger.debug('RemDouble2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.MOD, 'D', vmap)
# add-int/lit16 vA, vB, #+CCCC ( 4b, 4b, 16b )
def addintlit16(ins, vmap):
logger.debug('AddIntLit16 : %s', ins.get_output())
return assign_lit(Op.ADD, ins.CCCC, ins.A, ins.B, vmap)
# rsub-int vA, vB, #+CCCC ( 4b, 4b, 16b )
def rsubint(ins, vmap):
logger.debug('RSubInt : %s', ins.get_output())
var_a, var_b = get_variables(vmap, ins.A, ins.B)
cst = Constant(ins.CCCC, 'I')
return AssignExpression(var_a, BinaryExpressionLit(Op.SUB, cst, var_b))
# mul-int/lit16 vA, vB, #+CCCC ( 4b, 4b, 16b )
def mulintlit16(ins, vmap):
logger.debug('MulIntLit16 : %s', ins.get_output())
return assign_lit(Op.MUL, ins.CCCC, ins.A, ins.B, vmap)
# div-int/lit16 vA, vB, #+CCCC ( 4b, 4b, 16b )
def divintlit16(ins, vmap):
logger.debug('DivIntLit16 : %s', ins.get_output())
return assign_lit(Op.DIV, ins.CCCC, ins.A, ins.B, vmap)
# rem-int/lit16 vA, vB, #+CCCC ( 4b, 4b, 16b )
def remintlit16(ins, vmap):
logger.debug('RemIntLit16 : %s', ins.get_output())
return assign_lit(Op.MOD, ins.CCCC, ins.A, ins.B, vmap)
# and-int/lit16 vA, vB, #+CCCC ( 4b, 4b, 16b )
def andintlit16(ins, vmap):
logger.debug('AndIntLit16 : %s', ins.get_output())
return assign_lit(Op.AND, ins.CCCC, ins.A, ins.B, vmap)
# or-int/lit16 vA, vB, #+CCCC ( 4b, 4b, 16b )
def orintlit16(ins, vmap):
logger.debug('OrIntLit16 : %s', ins.get_output())
return assign_lit(Op.OR, ins.CCCC, ins.A, ins.B, vmap)
# xor-int/lit16 vA, vB, #+CCCC ( 4b, 4b, 16b )
def xorintlit16(ins, vmap):
logger.debug('XorIntLit16 : %s', ins.get_output())
return assign_lit(Op.XOR, ins.CCCC, ins.A, ins.B, vmap)
# add-int/lit8 vAA, vBB, #+CC ( 8b, 8b, 8b )
def addintlit8(ins, vmap):
logger.debug('AddIntLit8 : %s', ins.get_output())
literal, op = [(ins.CC, Op.ADD), (-ins.CC, Op.SUB)][ins.CC < 0]
return assign_lit(op, literal, ins.AA, ins.BB, vmap)
# rsub-int/lit8 vAA, vBB, #+CC ( 8b, 8b, 8b )
def rsubintlit8(ins, vmap):
logger.debug('RSubIntLit8 : %s', ins.get_output())
var_a, var_b = get_variables(vmap, ins.AA, ins.BB)
cst = Constant(ins.CC, 'I')
return AssignExpression(var_a, BinaryExpressionLit(Op.SUB, cst, var_b))
# mul-int/lit8 vAA, vBB, #+CC ( 8b, 8b, 8b )
def mulintlit8(ins, vmap):
logger.debug('MulIntLit8 : %s', ins.get_output())
return assign_lit(Op.MUL, ins.CC, ins.AA, ins.BB, vmap)
# div-int/lit8 vAA, vBB, #+CC ( 8b, 8b, 8b )
def divintlit8(ins, vmap):
logger.debug('DivIntLit8 : %s', ins.get_output())
return assign_lit(Op.DIV, ins.CC, ins.AA, ins.BB, vmap)
# rem-int/lit8 vAA, vBB, #+CC ( 8b, 8b, 8b )
def remintlit8(ins, vmap):
logger.debug('RemIntLit8 : %s', ins.get_output())
return assign_lit(Op.MOD, ins.CC, ins.AA, ins.BB, vmap)
# and-int/lit8 vAA, vBB, #+CC ( 8b, 8b, 8b )
def andintlit8(ins, vmap):
logger.debug('AndIntLit8 : %s', ins.get_output())
return assign_lit(Op.AND, ins.CC, ins.AA, ins.BB, vmap)
# or-int/lit8 vAA, vBB, #+CC ( 8b, 8b, 8b )
def orintlit8(ins, vmap):
logger.debug('OrIntLit8 : %s', ins.get_output())
return assign_lit(Op.OR, ins.CC, ins.AA, ins.BB, vmap)
# xor-int/lit8 vAA, vBB, #+CC ( 8b, 8b, 8b )
def xorintlit8(ins, vmap):
logger.debug('XorIntLit8 : %s', ins.get_output())
return assign_lit(Op.XOR, ins.CC, ins.AA, ins.BB, vmap)
# shl-int/lit8 vAA, vBB, #+CC ( 8b, 8b, 8b )
def shlintlit8(ins, vmap):
logger.debug('ShlIntLit8 : %s', ins.get_output())
return assign_lit(Op.INTSHL, ins.CC, ins.AA, ins.BB, vmap)
# shr-int/lit8 vAA, vBB, #+CC ( 8b, 8b, 8b )
def shrintlit8(ins, vmap):
logger.debug('ShrIntLit8 : %s', ins.get_output())
return assign_lit(Op.INTSHR, ins.CC, ins.AA, ins.BB, vmap)
# ushr-int/lit8 vAA, vBB, #+CC ( 8b, 8b, 8b )
def ushrintlit8(ins, vmap):
logger.debug('UShrIntLit8 : %s', ins.get_output())
return assign_lit(Op.INTSHR, ins.CC, ins.AA, ins.BB, vmap)
INSTRUCTION_SET = [
# 0x00
nop, # nop
move, # move
movefrom16, # move/from16
move16, # move/16
movewide, # move-wide
movewidefrom16, # move-wide/from16
movewide16, # move-wide/16
moveobject, # move-object
moveobjectfrom16, # move-object/from16
moveobject16, # move-object/16
moveresult, # move-result
moveresultwide, # move-result-wide
moveresultobject, # move-result-object
moveexception, # move-exception
returnvoid, # return-void
return_reg, # return
# 0x10
returnwide, # return-wide
returnobject, # return-object
const4, # const/4
const16, # const/16
const, # const
consthigh16, # const/high16
constwide16, # const-wide/16
constwide32, # const-wide/32
constwide, # const-wide
constwidehigh16, # const-wide/high16
conststring, # const-string
conststringjumbo, # const-string/jumbo
constclass, # const-class
monitorenter, # monitor-enter
monitorexit, # monitor-exit
checkcast, # check-cast
# 0x20
instanceof, # instance-of
arraylength, # array-length
newinstance, # new-instance
newarray, # new-array
fillednewarray, # filled-new-array
fillednewarrayrange, # filled-new-array/range
fillarraydata, # fill-array-data
throw, # throw
goto, # goto
goto16, # goto/16
goto32, # goto/32
packedswitch, # packed-switch
sparseswitch, # sparse-switch
cmplfloat, # cmpl-float
cmpgfloat, # cmpg-float
cmpldouble, # cmpl-double
# 0x30
cmpgdouble, # cmpg-double
cmplong, # cmp-long
ifeq, # if-eq
ifne, # if-ne
iflt, # if-lt
ifge, # if-ge
ifgt, # if-gt
ifle, # if-le
ifeqz, # if-eqz
ifnez, # if-nez
ifltz, # if-ltz
ifgez, # if-gez
ifgtz, # if-gtz
iflez, # if-l
nop, # unused
nop, # unused
# 0x40
nop, # unused
nop, # unused
nop, # unused
nop, # unused
aget, # aget
agetwide, # aget-wide
agetobject, # aget-object
agetboolean, # aget-boolean
agetbyte, # aget-byte
agetchar, # aget-char
agetshort, # aget-short
aput, # aput
aputwide, # aput-wide
aputobject, # aput-object
aputboolean, # aput-boolean
aputbyte, # aput-byte
# 0x50
aputchar, # aput-char
aputshort, # aput-short
iget, # iget
igetwide, # iget-wide
igetobject, # iget-object
igetboolean, # iget-boolean
igetbyte, # iget-byte
igetchar, # iget-char
igetshort, # iget-short
iput, # iput
iputwide, # iput-wide
iputobject, # iput-object
iputboolean, # iput-boolean
iputbyte, # iput-byte
iputchar, # iput-char
iputshort, # iput-short
# 0x60
sget, # sget
sgetwide, # sget-wide
sgetobject, # sget-object
sgetboolean, # sget-boolean
sgetbyte, # sget-byte
sgetchar, # sget-char
sgetshort, # sget-short
sput, # sput
sputwide, # sput-wide
sputobject, # sput-object
sputboolean, # sput-boolean
sputbyte, # sput-byte
sputchar, # sput-char
sputshort, # sput-short
invokevirtual, # invoke-virtual
invokesuper, # invoke-super
# 0x70
invokedirect, # invoke-direct
invokestatic, # invoke-static
invokeinterface, # invoke-interface
nop, # unused
invokevirtualrange, # invoke-virtual/range
invokesuperrange, # invoke-super/range
invokedirectrange, # invoke-direct/range
invokestaticrange, # invoke-static/range
invokeinterfacerange, # invoke-interface/range
nop, # unused
nop, # unused
negint, # neg-int
notint, # not-int
neglong, # neg-long
notlong, # not-long
negfloat, # neg-float
# 0x80
negdouble, # neg-double
inttolong, # int-to-long
inttofloat, # int-to-float
inttodouble, # int-to-double
longtoint, # long-to-int
longtofloat, # long-to-float
longtodouble, # long-to-double
floattoint, # float-to-int
floattolong, # float-to-long
floattodouble, # float-to-double
doubletoint, # double-to-int
doubletolong, # double-to-long
doubletofloat, # double-to-float
inttobyte, # int-to-byte
inttochar, # int-to-char
inttoshort, # int-to-short
# 0x90
addint, # add-int
subint, # sub-int
mulint, # mul-int
divint, # div-int
remint, # rem-int
andint, # and-int
orint, # or-int
xorint, # xor-int
shlint, # shl-int
shrint, # shr-int
ushrint, # ushr-int
addlong, # add-long
sublong, # sub-long
mullong, # mul-long
divlong, # div-long
remlong, # rem-long
# 0xa0
andlong, # and-long
orlong, # or-long
xorlong, # xor-long
shllong, # shl-long
shrlong, # shr-long
ushrlong, # ushr-long
addfloat, # add-float
subfloat, # sub-float
mulfloat, # mul-float
divfloat, # div-float
remfloat, # rem-float
adddouble, # add-double
subdouble, # sub-double
muldouble, # mul-double
divdouble, # div-double
remdouble, # rem-double
# 0xb0
addint2addr, # add-int/2addr
subint2addr, # sub-int/2addr
mulint2addr, # mul-int/2addr
divint2addr, # div-int/2addr
remint2addr, # rem-int/2addr
andint2addr, # and-int/2addr
orint2addr, # or-int/2addr
xorint2addr, # xor-int/2addr
shlint2addr, # shl-int/2addr
shrint2addr, # shr-int/2addr
ushrint2addr, # ushr-int/2addr
addlong2addr, # add-long/2addr
sublong2addr, # sub-long/2addr
mullong2addr, # mul-long/2addr
divlong2addr, # div-long/2addr
remlong2addr, # rem-long/2addr
# 0xc0
andlong2addr, # and-long/2addr
orlong2addr, # or-long/2addr
xorlong2addr, # xor-long/2addr
shllong2addr, # shl-long/2addr
shrlong2addr, # shr-long/2addr
ushrlong2addr, # ushr-long/2addr
addfloat2addr, # add-float/2addr
subfloat2addr, # sub-float/2addr
mulfloat2addr, # mul-float/2addr
divfloat2addr, # div-float/2addr
remfloat2addr, # rem-float/2addr
adddouble2addr, # add-double/2addr
subdouble2addr, # sub-double/2addr
muldouble2addr, # mul-double/2addr
divdouble2addr, # div-double/2addr
remdouble2addr, # rem-double/2addr
# 0xd0
addintlit16, # add-int/lit16
rsubint, # rsub-int
mulintlit16, # mul-int/lit16
divintlit16, # div-int/lit16
remintlit16, # rem-int/lit16
andintlit16, # and-int/lit16
orintlit16, # or-int/lit16
xorintlit16, # xor-int/lit16
addintlit8, # add-int/lit8
rsubintlit8, # rsub-int/lit8
mulintlit8, # mul-int/lit8
divintlit8, # div-int/lit8
remintlit8, # rem-int/lit8
andintlit8, # and-int/lit8
orintlit8, # or-int/lit8
xorintlit8, # xor-int/lit8
# 0xe0
shlintlit8, # shl-int/lit8
shrintlit8, # shr-int/lit8
ushrintlit8, # ushr-int/lit8
]
| shuxin/androguard | androguard/decompiler/dad/opcode_ins.py | Python | apache-2.0 | 61,738 |
#
# Copyright (c) 2013-2020 Contributors to the Eclipse Foundation
#
# See the NOTICE file distributed with this work for additional information regarding copyright
# ownership. All rights reserved. This program and the accompanying materials are made available
# under the terms of the Apache License, Version 2.0 which accompanies this distribution and is
# available at http://www.apache.org/licenses/LICENSE-2.0.txt
# ===============================================================================================
"""
This module contains classes specific to Cassandra data stores.
It contains the following import shortcuts:
```python
from pygw.store.cassandra import CassandraOptions
```
"""
from .options import CassandraOptions
| locationtech/geowave | python/src/main/python/pygw/store/cassandra/__init__.py | Python | apache-2.0 | 739 |
import sqlite3
FIELD_MAX_WIDTH = 20
TABLE_NAME = 'people'
SELECT = 'select * from %s order by age, name_last' % TABLE_NAME
con = sqlite3.connect("mydb")
cur = con.cursor()
cur.execute(SELECT)
# Print a header.
for fieldDesc in cur.description:
print(fieldDesc[0].ljust(FIELD_MAX_WIDTH), end=' ')
print() # Finish the header with a newline.
print('-' * 78)
# For each row, print the value of each field left-justified within
# the maximum possible width of that field.
fieldIndices = range(len(cur.description))
for row in cur:
for fieldIndex in fieldIndices:
fieldValue = str(row[fieldIndex])
print(fieldValue.ljust(FIELD_MAX_WIDTH), end=' ')
print() # Finish the row with a newline.
con.close()
| batermj/algorithm-challenger | code-analysis/programming_anguage/python/source_codes/Python3.8.0/Python-3.8.0/Doc/includes/sqlite3/simple_tableprinter.py | Python | apache-2.0 | 731 |
##############################################################################
#
# Copyright Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
import time
class Wait(object):
class TimeOutWaitingFor(Exception):
"A test condition timed out"
timeout = 9
wait = .01
def __init__(self, timeout=None, wait=None, exception=None,
getnow=(lambda: time.time), getsleep=(lambda: time.sleep)):
if timeout is not None:
self.timeout = timeout
if wait is not None:
self.wait = wait
if exception is not None:
self.TimeOutWaitingFor = exception
self.getnow = getnow
self.getsleep = getsleep
def __call__(self, func=None, timeout=None, wait=None, message=None):
if func is None:
return lambda func: self(func, timeout, wait, message)
if func():
return
now = self.getnow()
sleep = self.getsleep()
if timeout is None:
timeout = self.timeout
if wait is None:
wait = self.wait
wait = float(wait)
deadline = now() + timeout
while 1:
sleep(wait)
if func():
return
if now() > deadline:
raise self.TimeOutWaitingFor(
message or
getattr(func, '__doc__') or
getattr(func, '__name__')
)
wait = Wait() | dcm-oss/blockade | blockade/tests/util.py | Python | apache-2.0 | 1,956 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
import os
import json
from subprocess import call
import cairis.core.BorgFactory
from cairis.core.Borg import Borg
from cairis.core.ReferenceSynopsis import ReferenceSynopsis
from cairis.core.ReferenceContribution import ReferenceContribution
from cairis.core.ARM import DatabaseProxyException
from cairis.mio.ModelImport import importModelFile
__author__ = 'Shamal Faily'
class UseCaseContributionTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cairis.core.BorgFactory.initialise()
importModelFile(os.environ['CAIRIS_SRC'] + '/test/webinos.xml',1)
def setUp(self):
f = open(os.environ['CAIRIS_SRC'] + '/test/usecase_contributions.json')
d = json.load(f)
f.close()
self.csData = d['characteristic_synopses'][0]
self.rcData = d['usecase_contributions'][0]
def tearDown(self):
pass
def testAddContribution(self):
ics = ReferenceSynopsis(-1,self.csData['theReference'],self.csData['theSynopsis'],self.csData['theDimension'],self.csData['theActorType'],self.csData['theActor'])
b = Borg()
b.dbProxy.addCharacteristicSynopsis(ics)
irc = ReferenceContribution(self.rcData['theSource'],self.rcData['theDestination'],self.rcData['theMeansEnd'],self.rcData['theContribution'])
b.dbProxy.addUseCaseContribution(irc)
orcs = b.dbProxy.getUseCaseContributions(self.rcData['theSource'])
orc,rType = orcs[self.rcData['theDestination']]
self.assertEqual(orc.source(), irc.source())
self.assertEqual(orc.destination(), irc.destination())
self.assertEqual(orc.meansEnd(), irc.meansEnd())
self.assertEqual(orc.contribution(), irc.contribution())
def testUpdateContribution(self):
b = Borg()
orcs = b.dbProxy.getUseCaseContributions(self.rcData['theSource'])
orc,rType = orcs[self.rcData['theDestination']]
orc.theContribution = 'Break'
b.dbProxy.updateUseCaseContribution(orc)
urcs = b.dbProxy.getUseCaseContributions(self.rcData['theSource'])
urc,rType = urcs[self.rcData['theDestination']]
self.assertEqual(orc.source(), urc.source())
self.assertEqual(orc.destination(), urc.destination())
self.assertEqual(orc.meansEnd(), urc.meansEnd())
self.assertEqual(orc.contribution(), urc.contribution())
if __name__ == '__main__':
unittest.main()
| nathanbjenx/cairis | cairis/test/test_UseCaseContribution.py | Python | apache-2.0 | 3,094 |
"""Auto-generated file, do not edit by hand. LY metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_LY = PhoneMetadata(id='LY', country_code=None, international_prefix=None,
general_desc=PhoneNumberDesc(national_number_pattern='1\\d{2}', possible_length=(3,)),
emergency=PhoneNumberDesc(national_number_pattern='19[013]', example_number='193', possible_length=(3,)),
short_code=PhoneNumberDesc(national_number_pattern='19[013]', example_number='193', possible_length=(3,)),
short_data=True)
| gencer/python-phonenumbers | python/phonenumbers/shortdata/region_LY.py | Python | apache-2.0 | 556 |
"""Support for Meteo-France raining forecast sensor."""
import logging
from meteofrance_api.helpers import (
get_warning_text_status_from_indice_color,
readeable_phenomenoms_dict,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_ATTRIBUTION
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
)
from homeassistant.util import dt as dt_util
from .const import (
ATTR_NEXT_RAIN_1_HOUR_FORECAST,
ATTR_NEXT_RAIN_DT_REF,
ATTRIBUTION,
COORDINATOR_ALERT,
COORDINATOR_FORECAST,
COORDINATOR_RAIN,
DOMAIN,
ENTITY_API_DATA_PATH,
ENTITY_DEVICE_CLASS,
ENTITY_ENABLE,
ENTITY_ICON,
ENTITY_NAME,
ENTITY_UNIT,
SENSOR_TYPES,
)
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistantType, entry: ConfigEntry, async_add_entities
) -> None:
"""Set up the Meteo-France sensor platform."""
coordinator_forecast = hass.data[DOMAIN][entry.entry_id][COORDINATOR_FORECAST]
coordinator_rain = hass.data[DOMAIN][entry.entry_id][COORDINATOR_RAIN]
coordinator_alert = hass.data[DOMAIN][entry.entry_id][COORDINATOR_ALERT]
entities = []
for sensor_type in SENSOR_TYPES:
if sensor_type == "next_rain":
if coordinator_rain:
entities.append(MeteoFranceRainSensor(sensor_type, coordinator_rain))
elif sensor_type == "weather_alert":
if coordinator_alert:
entities.append(MeteoFranceAlertSensor(sensor_type, coordinator_alert))
elif sensor_type in ["rain_chance", "freeze_chance", "snow_chance"]:
if coordinator_forecast.data.probability_forecast:
entities.append(MeteoFranceSensor(sensor_type, coordinator_forecast))
else:
_LOGGER.warning(
"Sensor %s skipped for %s as data is missing in the API",
sensor_type,
coordinator_forecast.data.position["name"],
)
else:
entities.append(MeteoFranceSensor(sensor_type, coordinator_forecast))
async_add_entities(
entities,
False,
)
class MeteoFranceSensor(CoordinatorEntity):
"""Representation of a Meteo-France sensor."""
def __init__(self, sensor_type: str, coordinator: DataUpdateCoordinator):
"""Initialize the Meteo-France sensor."""
super().__init__(coordinator)
self._type = sensor_type
if hasattr(self.coordinator.data, "position"):
city_name = self.coordinator.data.position["name"]
self._name = f"{city_name} {SENSOR_TYPES[self._type][ENTITY_NAME]}"
self._unique_id = f"{self.coordinator.data.position['lat']},{self.coordinator.data.position['lon']}_{self._type}"
@property
def unique_id(self):
"""Return the unique id."""
return self._unique_id
@property
def name(self):
"""Return the name."""
return self._name
@property
def state(self):
"""Return the state."""
path = SENSOR_TYPES[self._type][ENTITY_API_DATA_PATH].split(":")
data = getattr(self.coordinator.data, path[0])
# Specific case for probability forecast
if path[0] == "probability_forecast":
if len(path) == 3:
# This is a fix compared to other entitty as first index is always null in API result for unknown reason
value = _find_first_probability_forecast_not_null(data, path)
else:
value = data[0][path[1]]
# General case
else:
if len(path) == 3:
value = data[path[1]][path[2]]
else:
value = data[path[1]]
if self._type in ["wind_speed", "wind_gust"]:
# convert API wind speed from m/s to km/h
value = round(value * 3.6)
return value
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return SENSOR_TYPES[self._type][ENTITY_UNIT]
@property
def icon(self):
"""Return the icon."""
return SENSOR_TYPES[self._type][ENTITY_ICON]
@property
def device_class(self):
"""Return the device class."""
return SENSOR_TYPES[self._type][ENTITY_DEVICE_CLASS]
@property
def entity_registry_enabled_default(self) -> bool:
"""Return if the entity should be enabled when first added to the entity registry."""
return SENSOR_TYPES[self._type][ENTITY_ENABLE]
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {ATTR_ATTRIBUTION: ATTRIBUTION}
class MeteoFranceRainSensor(MeteoFranceSensor):
"""Representation of a Meteo-France rain sensor."""
@property
def state(self):
"""Return the state."""
# search first cadran with rain
next_rain = next(
(cadran for cadran in self.coordinator.data.forecast if cadran["rain"] > 1),
None,
)
return (
dt_util.utc_from_timestamp(next_rain["dt"]).isoformat()
if next_rain
else None
)
@property
def device_state_attributes(self):
"""Return the state attributes."""
reference_dt = self.coordinator.data.forecast[0]["dt"]
return {
ATTR_NEXT_RAIN_DT_REF: dt_util.utc_from_timestamp(reference_dt).isoformat(),
ATTR_NEXT_RAIN_1_HOUR_FORECAST: {
f"{int((item['dt'] - reference_dt) / 60)} min": item["desc"]
for item in self.coordinator.data.forecast
},
ATTR_ATTRIBUTION: ATTRIBUTION,
}
class MeteoFranceAlertSensor(MeteoFranceSensor):
"""Representation of a Meteo-France alert sensor."""
# pylint: disable=super-init-not-called
def __init__(self, sensor_type: str, coordinator: DataUpdateCoordinator):
"""Initialize the Meteo-France sensor."""
super().__init__(sensor_type, coordinator)
dept_code = self.coordinator.data.domain_id
self._name = f"{dept_code} {SENSOR_TYPES[self._type][ENTITY_NAME]}"
self._unique_id = self._name
@property
def state(self):
"""Return the state."""
return get_warning_text_status_from_indice_color(
self.coordinator.data.get_domain_max_color()
)
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {
**readeable_phenomenoms_dict(self.coordinator.data.phenomenons_max_colors),
ATTR_ATTRIBUTION: ATTRIBUTION,
}
def _find_first_probability_forecast_not_null(
probability_forecast: list, path: list
) -> int:
"""Search the first not None value in the first forecast elements."""
for forecast in probability_forecast[0:3]:
if forecast[path[1]][path[2]] is not None:
return forecast[path[1]][path[2]]
# Default return value if no value founded
return None
| tboyce021/home-assistant | homeassistant/components/meteo_france/sensor.py | Python | apache-2.0 | 7,140 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2018 João Pedro Rodrigues
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit Tests for `pdb_chain`.
"""
import os
import sys
import unittest
from config import data_dir
from utils import OutputCapture
class TestTool(unittest.TestCase):
"""
Generic class for testing tools.
"""
def setUp(self):
# Dynamically import the module
name = 'pdbtools.pdb_chain'
self.module = __import__(name, fromlist=[''])
def exec_module(self):
"""
Execs module.
"""
with OutputCapture() as output:
try:
self.module.main()
except SystemExit as e:
self.retcode = e.code
self.stdout = output.stdout
self.stderr = output.stderr
return
def test_default(self):
"""$ pdb_chain data/dummy.pdb"""
# Simulate input
sys.argv = ['', os.path.join(data_dir, 'dummy.pdb')]
# Execute the script
self.exec_module()
# Validate results
self.assertEqual(self.retcode, 0) # ensure the program exited OK.
self.assertEqual(len(self.stdout), 204) # no lines deleted
self.assertEqual(len(self.stderr), 0) # no errors
records = (('ATOM', 'HETATM'))
chain_ids = [l[21] for l in self.stdout if l.startswith(records)]
unique_chain_ids = list(set(chain_ids))
self.assertEqual(unique_chain_ids, [' '])
def test_two_options(self):
"""$ pdb_chain -X data/dummy.pdb"""
sys.argv = ['', '-X', os.path.join(data_dir, 'dummy.pdb')]
self.exec_module()
self.assertEqual(self.retcode, 0)
self.assertEqual(len(self.stdout), 204)
self.assertEqual(len(self.stderr), 0)
records = (('ATOM', 'HETATM'))
chain_ids = [l[21] for l in self.stdout if l.startswith(records)]
unique_chain_ids = list(set(chain_ids))
self.assertEqual(unique_chain_ids, ['X'])
def test_file_not_found(self):
"""$ pdb_chain -A not_existing.pdb"""
afile = os.path.join(data_dir, 'not_existing.pdb')
sys.argv = ['', '-A', afile]
self.exec_module()
self.assertEqual(self.retcode, 1) # exit code is 1 (error)
self.assertEqual(len(self.stdout), 0) # nothing written to stdout
self.assertEqual(self.stderr[0][:22],
"ERROR!! File not found") # proper error message
def test_file_missing(self):
"""$ pdb_chain -A"""
sys.argv = ['', '-A']
self.exec_module()
self.assertEqual(self.retcode, 1)
self.assertEqual(len(self.stdout), 0) # no output
self.assertEqual(self.stderr[0],
"ERROR!! No data to process!")
def test_helptext(self):
"""$ pdb_chain"""
sys.argv = ['']
self.exec_module()
self.assertEqual(self.retcode, 1) # ensure the program exited gracefully.
self.assertEqual(len(self.stdout), 0) # no output
self.assertEqual(self.stderr, self.module.__doc__.split("\n")[:-1])
def test_invalid_option(self):
"""$ pdb_chain -AH data/dummy.pdb"""
sys.argv = ['', '-AH', os.path.join(data_dir, 'dummy.pdb')]
self.exec_module()
self.assertEqual(self.retcode, 1)
self.assertEqual(len(self.stdout), 0)
self.assertEqual(self.stderr[0][:47],
"ERROR!! Chain identifiers must be a single char")
def test_not_an_option(self):
"""$ pdb_chain A data/dummy.pdb"""
sys.argv = ['', 'A', os.path.join(data_dir, 'dummy.pdb')]
self.exec_module()
self.assertEqual(self.retcode, 1)
self.assertEqual(len(self.stdout), 0)
self.assertEqual(self.stderr[0],
"ERROR! First argument is not an option: 'A'")
if __name__ == '__main__':
from config import test_dir
mpath = os.path.abspath(os.path.join(test_dir, '..'))
sys.path.insert(0, mpath) # so we load dev files before any installation
unittest.main()
| JoaoRodrigues/pdb-tools | tests/test_pdb_chain.py | Python | apache-2.0 | 4,623 |
default_app_config = 'workflow.apps.WorkflowAppConfig'
| toladata/TolaActivity | workflow/__init__.py | Python | apache-2.0 | 55 |
"""Implement the auth feature from Hass.io for Add-ons."""
from ipaddress import ip_address
import logging
import os
from aiohttp import web
from aiohttp.web_exceptions import (
HTTPInternalServerError,
HTTPNotFound,
HTTPUnauthorized,
)
import voluptuous as vol
from homeassistant.auth.models import User
from homeassistant.components.http import HomeAssistantView
from homeassistant.components.http.const import KEY_HASS_USER, KEY_REAL_IP
from homeassistant.components.http.data_validator import RequestDataValidator
from homeassistant.core import callback
from homeassistant.exceptions import HomeAssistantError
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.typing import HomeAssistantType
from .const import ATTR_ADDON, ATTR_PASSWORD, ATTR_USERNAME
_LOGGER = logging.getLogger(__name__)
SCHEMA_API_AUTH = vol.Schema(
{
vol.Required(ATTR_USERNAME): cv.string,
vol.Required(ATTR_PASSWORD): cv.string,
vol.Required(ATTR_ADDON): cv.string,
},
extra=vol.ALLOW_EXTRA,
)
SCHEMA_API_PASSWORD_RESET = vol.Schema(
{vol.Required(ATTR_USERNAME): cv.string, vol.Required(ATTR_PASSWORD): cv.string},
extra=vol.ALLOW_EXTRA,
)
@callback
def async_setup_auth_view(hass: HomeAssistantType, user: User):
"""Auth setup."""
hassio_auth = HassIOAuth(hass, user)
hassio_password_reset = HassIOPasswordReset(hass, user)
hass.http.register_view(hassio_auth)
hass.http.register_view(hassio_password_reset)
class HassIOBaseAuth(HomeAssistantView):
"""Hass.io view to handle auth requests."""
def __init__(self, hass: HomeAssistantType, user: User):
"""Initialize WebView."""
self.hass = hass
self.user = user
def _check_access(self, request: web.Request):
"""Check if this call is from Supervisor."""
# Check caller IP
hassio_ip = os.environ["HASSIO"].split(":")[0]
if request[KEY_REAL_IP] != ip_address(hassio_ip):
_LOGGER.error("Invalid auth request from %s", request[KEY_REAL_IP])
raise HTTPUnauthorized()
# Check caller token
if request[KEY_HASS_USER].id != self.user.id:
_LOGGER.error("Invalid auth request from %s", request[KEY_HASS_USER].name)
raise HTTPUnauthorized()
def _get_provider(self):
"""Return Homeassistant auth provider."""
prv = self.hass.auth.get_auth_provider("homeassistant", None)
if prv is not None:
return prv
_LOGGER.error("Can't find Home Assistant auth.")
raise HTTPNotFound()
class HassIOAuth(HassIOBaseAuth):
"""Hass.io view to handle auth requests."""
name = "api:hassio:auth"
url = "/api/hassio_auth"
@RequestDataValidator(SCHEMA_API_AUTH)
async def post(self, request, data):
"""Handle auth requests."""
self._check_access(request)
await self._check_login(data[ATTR_USERNAME], data[ATTR_PASSWORD])
return web.Response(status=200)
async def _check_login(self, username, password):
"""Check User credentials."""
provider = self._get_provider()
try:
await provider.async_validate_login(username, password)
except HomeAssistantError:
raise HTTPUnauthorized() from None
class HassIOPasswordReset(HassIOBaseAuth):
"""Hass.io view to handle password reset requests."""
name = "api:hassio:auth:password:reset"
url = "/api/hassio_auth/password_reset"
@RequestDataValidator(SCHEMA_API_PASSWORD_RESET)
async def post(self, request, data):
"""Handle password reset requests."""
self._check_access(request)
await self._change_password(data[ATTR_USERNAME], data[ATTR_PASSWORD])
return web.Response(status=200)
async def _change_password(self, username, password):
"""Check User credentials."""
provider = self._get_provider()
try:
await self.hass.async_add_executor_job(
provider.data.change_password, username, password
)
await provider.data.async_save()
except HomeAssistantError:
raise HTTPInternalServerError()
| Teagan42/home-assistant | homeassistant/components/hassio/auth.py | Python | apache-2.0 | 4,204 |
# Licensed to the Apache Software Foundation (ASF) under one *
# or more contributor license agreements. See the NOTICE file *
# distributed with this work for additional information *
# regarding copyright ownership. The ASF licenses this file *
# to you under the Apache License, Version 2.0 (the *
# "License"); you may not use this file except in compliance *
# with the License. You may obtain a copy of the License at *
# *
# http://www.apache.org/licenses/LICENSE-2.0 *
# *
# Unless required by applicable law or agreed to in writing, *
# software distributed under the License is distributed on an *
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
# KIND, either express or implied. See the License for the *
# specific language governing permissions and limitations *
# under the License.
import unittest
from etch.python.Types import *
from etch.binding.support.Validator_float import *
from etch.binding.transport.fmt.TypeCode import *
class Test_EtchSupportValidator_float(unittest.TestCase):
def _test_validator(self):
_test = self._test
# TODO - resolve casting issues in validator tests
_test(0, "float[0]", TypeCode.FLOAT4, Float, Float(Float.MAX_VALUE), "1")
_test(0, "float[0]", TypeCode.FLOAT4, Float, Float(Float.MIN_VALUE), "2" )
_test(0, "float[0]", TypeCode.FLOAT4, Float, Float(Float.MAX_VALUE), "3" )
_test(0, "float[0]", TypeCode.FLOAT4, Float, Float(Float.MIN_VALUE), "4" )
_test(0, "float[0]", TypeCode.FLOAT4, Float, Float(Float.MAX_VALUE), "5" )
_test(0, "float[0]", TypeCode.FLOAT4, Float, Float(Float.MIN_VALUE), "abc" )
_test(1, "float[1]", TypeCode.ARRAY, [Float], [], 1)
_test(2, "float[2]", TypeCode.ARRAY, [[Float]], [[]], [] )
_test(3, "float[3]", TypeCode.ARRAY, [[[Float]]], [[[]]], [[]] )
_test(4, "float[4]", TypeCode.ARRAY, [[[[Float]]]], [[[[]]]], [[[]]] )
_test(5, "float[5]", TypeCode.ARRAY, [[[[[Float]]]]], [[[[[]]]]], [[[[]]]] )
def test_getNeg1(self):
self.assertRaises(IllegalArgumentException, Validator_float.get, -1)
def test_getNeg2(self):
self.assertRaises(IllegalArgumentException, Validator_float.get(0).elementValidator)
def test_getMaxPlusOne(self):
self.assertRaises(IllegalArgumentException, Validator_float.get, Validator.MAX_NDIMS + 1)
def _test(self, n, s, tc, clss, good, bad):
v = Validator_float.get(n)
self.assertEqual(n, v.getNDims())
self.assertEqual(clss, v.getExpectedClass())
self.assertEqual(s, repr(v))
self.assertEqual(True, v.validate(good))
self.assertEqual(False, v.validate(bad))
self.assertEqual(tc, v.checkValue(good))
self.assertEqual(None, v.checkValue(bad))
if n > 0:
self.assertEqual(n-1, v.elementValidator().getNDims())
if __name__=='__main__':
unittest.main()
| OBIGOGIT/etch | binding-python/runtime/src/test/python/tests/binding/support/TestValidator_float.py | Python | apache-2.0 | 3,261 |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os.path
import shutil
import tempfile
import unittest
from StringIO import StringIO
from datetime import datetime
import trac.ticket.api
import trac.ticket.report
import trac.ticket.roadmap
import trac.wiki.api
from trac import resource
from trac.attachment import Attachment
from trac.ticket.model import Ticket
from trac.util.datefmt import utc
from trac.wiki.model import WikiPage
from multiproduct.api import MultiProductSystem
from multiproduct.env import ProductEnvironment
from tests.env import MultiproductTestCase
class ProductResourceTestCase(MultiproductTestCase):
def setUp(self):
self._mp_setup()
self.global_env = self.env
self._load_product_from_data(self.global_env, u'xü')
self.env = ProductEnvironment(self.global_env, self.default_product)
self.env1 = ProductEnvironment(self.global_env, u'xü')
self._load_default_data(self.global_env)
self._load_default_data(self.env1)
# Enable product system component in product context
self.env.enable_component(MultiProductSystem)
def tearDown(self):
self.global_env.reset_db()
self.global_env = self.env = None
class ProductAttachmentResourceTestCase(ProductResourceTestCase):
def setUp(self):
ProductResourceTestCase.setUp(self)
self.global_env.path = os.path.join(tempfile.gettempdir(),
'trac-tempenv')
if os.path.exists(self.global_env.path):
shutil.rmtree(self.global_env.path)
os.mkdir(self.global_env.path)
attachment = Attachment(self.global_env, 'ticket', 1)
attachment.description = 'Global Bar'
attachment.insert('foo.txt', StringIO(''), 0)
attachment = Attachment(self.env1, 'ticket', 1)
attachment.description = 'Product Bar'
attachment.insert('foo.txt', StringIO(''), 0)
self.resource = resource.Resource('ticket',
1).child('attachment', 'foo.txt')
def tearDown(self):
shutil.rmtree(self.global_env.path)
ProductResourceTestCase.tearDown(self)
def test_global_neighborhood_attachments(self):
target = resource.Neighborhood('global', None).child(self.resource)
self.assertEquals("[global:] Attachment 'foo.txt' in [global:] Ticket #1",
resource.get_resource_description(self.env, target))
self.assertEquals("[global:] Attachment 'foo.txt' in [global:] Ticket #1",
resource.get_resource_name(self.env, target))
self.assertEquals("[global:] foo.txt ([global:] Ticket #1)",
resource.get_resource_shortname(self.env, target))
self.assertEquals('Global Bar',
resource.get_resource_summary(self.env, target))
self.assertEquals('http://example.org/trac.cgi/attachment/ticket/1/foo.txt',
resource.get_resource_url(self.env,
target, self.env.href))
def test_product_neighborhood_attachments(self):
target = resource.Neighborhood('product', u'xü').child(self.resource)
self.assertEquals(u"[product:xü] Attachment 'foo.txt' in [product:xü] Ticket #1",
resource.get_resource_description(self.env, target))
self.assertEquals(u"[product:xü] Attachment 'foo.txt' in [product:xü] Ticket #1",
resource.get_resource_name(self.env, target))
self.assertEquals(u"[product:xü] foo.txt ([product:xü] Ticket #1)",
resource.get_resource_shortname(self.env, target))
self.assertEquals('Product Bar',
resource.get_resource_summary(self.env, target))
self.assertEquals('http://example.org/trac.cgi/products/x%C3%BC/attachment/ticket/1/foo.txt',
resource.get_resource_url(self.env,
target, self.env.href))
class ProductMilestoneResourceTestCase(ProductResourceTestCase):
resource = resource.Resource('milestone', 'milestone1')
def test_global_neighborhood_milestone(self):
target = resource.Neighborhood('global', None).child(self.resource)
self.assertEquals("[global:] Milestone milestone1",
resource.get_resource_description(self.env, target))
self.assertEquals("[global:] Milestone milestone1",
resource.get_resource_name(self.env, target))
self.assertEquals("milestone1",
resource.get_resource_shortname(self.env, target))
self.assertEquals("[global:] Milestone milestone1",
resource.get_resource_summary(self.env, target))
self.assertEquals('http://example.org/trac.cgi/milestone/milestone1',
resource.get_resource_url(self.env,
target, self.env.href))
def test_product_neighborhood_milestone(self):
target = resource.Neighborhood('product', u'xü').child(self.resource)
self.assertEquals(u"[product:xü] Milestone milestone1",
resource.get_resource_description(self.env, target))
self.assertEquals(u"[product:xü] Milestone milestone1",
resource.get_resource_name(self.env, target))
self.assertEquals(u"milestone1",
resource.get_resource_shortname(self.env, target))
self.assertEquals(u"[product:xü] Milestone milestone1",
resource.get_resource_summary(self.env, target))
self.assertEquals('http://example.org/trac.cgi/products/x%C3%BC/milestone/milestone1',
resource.get_resource_url(self.env,
target, self.env.href))
# FIXME: No resource manager for reports in core ?
class ProductReportResourceTestCase(ProductResourceTestCase):
resource = resource.Resource('report', 1)
def test_global_neighborhood_report(self):
target = resource.Neighborhood('global', None).child(self.resource)
self.assertEquals("[global:] report:1",
resource.get_resource_description(self.env, target))
self.assertEquals("[global:] report:1",
resource.get_resource_name(self.env, target))
self.assertEquals("[global:] report:1",
resource.get_resource_shortname(self.env, target))
self.assertEquals('[global:] report:1 at version None',
resource.get_resource_summary(self.env, target))
self.assertEquals('http://example.org/trac.cgi/report/1',
resource.get_resource_url(self.env,
target, self.env.href))
def test_product_neighborhood_report(self):
target = resource.Neighborhood('product', u'xü').child(self.resource)
self.assertEquals(u"[product:xü] report:1",
resource.get_resource_description(self.env, target))
self.assertEquals(u"[product:xü] report:1",
resource.get_resource_name(self.env, target))
self.assertEquals(u"[product:xü] report:1",
resource.get_resource_shortname(self.env, target))
self.assertEquals(u"[product:xü] report:1 at version None",
resource.get_resource_summary(self.env, target))
self.assertEquals('http://example.org/trac.cgi/products/x%C3%BC/report/1',
resource.get_resource_url(self.env,
target, self.env.href))
class ProductTicketResourceTestCase(ProductResourceTestCase):
def _new_ticket(self, env, ticket_dict):
ticket = Ticket(env)
ticket.populate(ticket_dict)
return ticket.insert()
def setUp(self):
ProductResourceTestCase.setUp(self)
def test_global_neighborhood_ticket(self):
nbh = resource.Neighborhood('global', None)
data = dict(summary='Ticket summary', description='Ticket description',
type='enhancement', status='new')
target = nbh.child('ticket', self._new_ticket(self.global_env, data))
self.assertEquals("[global:] Ticket #1",
resource.get_resource_description(self.env, target))
self.assertEquals("[global:] Ticket #1",
resource.get_resource_name(self.env, target))
self.assertEquals("[global:] #1",
resource.get_resource_shortname(self.env, target))
self.assertEquals('enhancement: Ticket summary (new)',
resource.get_resource_summary(self.env, target))
self.assertEquals('http://example.org/trac.cgi/ticket/1',
resource.get_resource_url(self.env,
target, self.env.href))
def test_product_neighborhood_ticket(self):
nbh = resource.Neighborhood('product', u'xü')
data = dict(summary='Ticket summary', description='Ticket description',
type='task', status='accepted')
target = nbh.child('ticket', self._new_ticket(self.env1, data))
self.assertEquals(u"[product:xü] Ticket #1",
resource.get_resource_description(self.env, target))
self.assertEquals(u"[product:xü] Ticket #1",
resource.get_resource_name(self.env, target))
self.assertEquals(u"[product:xü] #1",
resource.get_resource_shortname(self.env, target))
self.assertEquals(u"task: Ticket summary (accepted)",
resource.get_resource_summary(self.env, target))
self.assertEquals('http://example.org/trac.cgi/products/x%C3%BC/ticket/1',
resource.get_resource_url(self.env,
target, self.env.href))
class ProductWikiResourceTestCase(ProductResourceTestCase):
resource = resource.Resource('wiki', 'TestPage', version=2)
def setUp(self):
ProductResourceTestCase.setUp(self)
page = WikiPage(self.global_env)
page.name = 'TestPage'
page.text = 'Bla bla'
t = datetime(2001, 1, 1, 1, 1, 1, 0, utc)
page.save('joe', 'Testing global', '::1', t)
page.text = 'Bla bla bla'
t = datetime(2002, 2, 2, 2, 2, 2, 0, utc)
page.save('joe', 'Testing global 2', '::1', t)
page = WikiPage(self.env1)
page.name = 'TestPage'
page.text = 'alb alB'
t = datetime(2011, 1, 1, 1, 1, 1, 0, utc)
page.save('mary', 'Testing product', '::1', t)
page.text = 'Bla bla bla'
t = datetime(2012, 2, 2, 2, 2, 2, 0, utc)
page.save('mary', 'Testing product 2', '::1', t)
def test_global_neighborhood_wiki(self):
target = resource.Neighborhood('global', None).child(self.resource)
self.assertEquals("TestPage",
resource.get_resource_description(self.env, target))
self.assertEquals("TestPage",
resource.get_resource_name(self.env, target))
self.assertEquals("TestPage",
resource.get_resource_shortname(self.env, target))
self.assertEquals("TestPage",
resource.get_resource_summary(self.env, target))
self.assertEquals('http://example.org/trac.cgi/wiki/TestPage?version=2',
resource.get_resource_url(self.env,
target, self.env.href))
def test_product_neighborhood_wiki(self):
target = resource.Neighborhood('product', u'xü').child(self.resource)
self.assertEquals(u"TestPage",
resource.get_resource_description(self.env, target))
self.assertEquals(u"TestPage",
resource.get_resource_name(self.env, target))
self.assertEquals(u"TestPage",
resource.get_resource_shortname(self.env, target))
self.assertEquals(u"TestPage",
resource.get_resource_summary(self.env, target))
self.assertEquals('http://example.org/trac.cgi/products/x%C3%BC/wiki/TestPage?version=2',
resource.get_resource_url(self.env,
target, self.env.href))
class NeighborhoodTestCase(MultiproductTestCase):
def setUp(self):
self._mp_setup()
def test_get_known_neighborhoods(self):
rsys = resource.ResourceSystem(self.env)
self.assertEquals(['global', 'product'],
sorted(rsys.get_known_neighborhoods()))
def test_suite():
return unittest.TestSuite([
unittest.makeSuite(ProductAttachmentResourceTestCase, 'test'),
unittest.makeSuite(ProductMilestoneResourceTestCase, 'test'),
unittest.makeSuite(ProductReportResourceTestCase, 'test'),
unittest.makeSuite(ProductTicketResourceTestCase, 'test'),
unittest.makeSuite(ProductWikiResourceTestCase, 'test'),
unittest.makeSuite(NeighborhoodTestCase, 'test'),
])
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| apache/bloodhound | bloodhound_multiproduct/tests/resource.py | Python | apache-2.0 | 14,341 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2011 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import tempfile
from cloudfiles.errors import ContainerNotEmpty
from django import http
from django import template
from django.contrib import messages
from django.core.urlresolvers import reverse
from mox import IgnoreArg, IsA
from horizon import api
from horizon import test
from .tables import ContainersTable, ObjectsTable
CONTAINER_INDEX_URL = reverse('horizon:nova:containers:index')
class ContainerViewTests(test.BaseViewTests):
def setUp(self):
super(ContainerViewTests, self).setUp()
self.container = api.Container(None)
self.container.name = 'containerName'
self.container.size_used = 128
self.containers = (self.container,)
def test_index(self):
self.mox.StubOutWithMock(api, 'swift_get_containers')
api.swift_get_containers(
IsA(http.HttpRequest), marker=None).AndReturn(
([self.container], False))
self.mox.ReplayAll()
res = self.client.get(CONTAINER_INDEX_URL)
self.assertTemplateUsed(res, 'nova/containers/index.html')
self.assertIn('table', res.context)
containers = res.context['table'].data
self.assertEqual(len(containers), 1)
self.assertEqual(containers[0].name, 'containerName')
def test_delete_container(self):
self.mox.StubOutWithMock(api, 'swift_delete_container')
api.swift_delete_container(IsA(http.HttpRequest),
'containerName')
self.mox.ReplayAll()
action_string = "containers__delete__%s" % self.container.name
form_data = {"action": action_string}
req = self.factory.post(CONTAINER_INDEX_URL, form_data)
table = ContainersTable(req, self.containers)
handled = table.maybe_handle()
self.assertEqual(handled['location'], CONTAINER_INDEX_URL)
def test_delete_container_nonempty(self):
self.mox.StubOutWithMock(api, 'swift_delete_container')
exception = ContainerNotEmpty('containerNotEmpty')
api.swift_delete_container(
IsA(http.HttpRequest),
'containerName').AndRaise(exception)
self.mox.ReplayAll()
action_string = "containers__delete__%s" % self.container.name
form_data = {"action": action_string}
req = self.factory.post(CONTAINER_INDEX_URL, form_data)
table = ContainersTable(req, self.containers)
handled = table.maybe_handle()
self.assertEqual(handled['location'], CONTAINER_INDEX_URL)
def test_create_container_get(self):
res = self.client.get(reverse('horizon:nova:containers:create'))
self.assertTemplateUsed(res, 'nova/containers/create.html')
def test_create_container_post(self):
formData = {'name': 'containerName',
'method': 'CreateContainer'}
self.mox.StubOutWithMock(api, 'swift_create_container')
api.swift_create_container(
IsA(http.HttpRequest), u'containerName')
self.mox.ReplayAll()
res = self.client.post(reverse('horizon:nova:containers:create'),
formData)
self.assertRedirectsNoFollow(res, CONTAINER_INDEX_URL)
class ObjectViewTests(test.BaseViewTests):
CONTAINER_NAME = 'containerName'
def setUp(self):
class FakeCloudFile(object):
def __init__(self):
self.metadata = {}
def sync_metadata(self):
pass
super(ObjectViewTests, self).setUp()
swift_object = api.swift.SwiftObject(FakeCloudFile())
swift_object.name = "test_object"
swift_object.size = '128'
swift_object.container = api.swift.Container(None)
swift_object.container.name = 'container_name'
self.swift_objects = [swift_object]
def test_index(self):
self.mox.StubOutWithMock(api, 'swift_get_objects')
api.swift_get_objects(
IsA(http.HttpRequest),
self.CONTAINER_NAME,
marker=None).AndReturn((self.swift_objects, False))
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:nova:containers:object_index',
args=[self.CONTAINER_NAME]))
self.assertTemplateUsed(res, 'nova/objects/index.html')
self.assertItemsEqual(res.context['table'].data, self.swift_objects)
def test_upload_index(self):
res = self.client.get(reverse('horizon:nova:containers:object_upload',
args=[self.CONTAINER_NAME]))
self.assertTemplateUsed(res, 'nova/objects/upload.html')
def test_upload(self):
OBJECT_DATA = 'objectData'
OBJECT_FILE = tempfile.TemporaryFile()
OBJECT_FILE.write(OBJECT_DATA)
OBJECT_FILE.flush()
OBJECT_FILE.seek(0)
OBJECT_NAME = 'objectName'
formData = {'method': 'UploadObject',
'container_name': self.CONTAINER_NAME,
'name': OBJECT_NAME,
'object_file': OBJECT_FILE}
self.mox.StubOutWithMock(api, 'swift_upload_object')
api.swift_upload_object(IsA(http.HttpRequest),
unicode(self.CONTAINER_NAME),
unicode(OBJECT_NAME),
OBJECT_DATA).AndReturn(self.swift_objects[0])
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:nova:containers:object_upload',
args=[self.CONTAINER_NAME]))
self.assertContains(res, 'enctype="multipart/form-data"')
res = self.client.post(reverse('horizon:nova:containers:object_upload',
args=[self.CONTAINER_NAME]),
formData)
self.assertRedirectsNoFollow(res,
reverse('horizon:nova:containers:object_index',
args=[self.CONTAINER_NAME]))
def test_delete(self):
self.mox.StubOutWithMock(api, 'swift_delete_object')
api.swift_delete_object(
IsA(http.HttpRequest),
self.CONTAINER_NAME, self.swift_objects[0].name)
self.mox.ReplayAll()
OBJECT_INDEX_URL = reverse('horizon:nova:containers:object_index',
args=[self.CONTAINER_NAME])
action_string = "objects__delete__%s" % self.swift_objects[0].name
form_data = {"action": action_string}
req = self.factory.post(OBJECT_INDEX_URL, form_data)
kwargs = {"container_name": self.CONTAINER_NAME}
table = ObjectsTable(req, self.swift_objects, **kwargs)
handled = table.maybe_handle()
self.assertEqual(handled['location'], OBJECT_INDEX_URL)
def test_download(self):
OBJECT_DATA = 'objectData'
OBJECT_NAME = 'objectName'
self.mox.StubOutWithMock(api, 'swift_get_object_data')
self.mox.StubOutWithMock(api.swift, 'swift_get_object')
api.swift.swift_get_object(IsA(http.HttpRequest),
unicode(self.CONTAINER_NAME),
unicode(OBJECT_NAME)) \
.AndReturn(self.swift_objects[0])
api.swift_get_object_data(IsA(http.HttpRequest),
unicode(self.CONTAINER_NAME),
unicode(OBJECT_NAME)).AndReturn(OBJECT_DATA)
self.mox.ReplayAll()
res = self.client.get(reverse(
'horizon:nova:containers:object_download',
args=[self.CONTAINER_NAME, OBJECT_NAME]))
self.assertEqual(res.content, OBJECT_DATA)
self.assertTrue(res.has_header('Content-Disposition'))
def test_copy_index(self):
OBJECT_NAME = 'objectName'
container = self.mox.CreateMock(api.Container)
container.name = self.CONTAINER_NAME
self.mox.StubOutWithMock(api, 'swift_get_containers')
api.swift_get_containers(
IsA(http.HttpRequest)).AndReturn(([container], False))
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:nova:containers:object_copy',
args=[self.CONTAINER_NAME,
OBJECT_NAME]))
self.assertTemplateUsed(res, 'nova/objects/copy.html')
def test_copy(self):
NEW_CONTAINER_NAME = self.CONTAINER_NAME
NEW_OBJECT_NAME = 'newObjectName'
ORIG_CONTAINER_NAME = 'origContainerName'
ORIG_OBJECT_NAME = 'origObjectName'
formData = {'method': 'CopyObject',
'new_container_name': NEW_CONTAINER_NAME,
'new_object_name': NEW_OBJECT_NAME,
'orig_container_name': ORIG_CONTAINER_NAME,
'orig_object_name': ORIG_OBJECT_NAME}
container = self.mox.CreateMock(api.Container)
container.name = self.CONTAINER_NAME
self.mox.StubOutWithMock(api, 'swift_get_containers')
api.swift_get_containers(
IsA(http.HttpRequest)).AndReturn(([container], False))
self.mox.StubOutWithMock(api, 'swift_copy_object')
api.swift_copy_object(IsA(http.HttpRequest),
ORIG_CONTAINER_NAME,
ORIG_OBJECT_NAME,
NEW_CONTAINER_NAME,
NEW_OBJECT_NAME)
self.mox.ReplayAll()
res = self.client.post(reverse('horizon:nova:containers:object_copy',
args=[ORIG_CONTAINER_NAME,
ORIG_OBJECT_NAME]),
formData)
self.assertRedirectsNoFollow(res,
reverse('horizon:nova:containers:object_index',
args=[NEW_CONTAINER_NAME]))
| citrix-openstack/horizon | horizon/horizon/dashboards/nova/containers/tests.py | Python | apache-2.0 | 10,798 |
#! /usr/bin/env python2
# encoding:utf-8
# python 2.7 测试通过
# python 3 更换适当的开发库就能使用,在此我们不额外提供
import httplib
import json
import hashlib
import random
import time
import argparse
class SmsSingleSender:
""" 单发类定义"""
appid = 0
appkey = ""
url = "https://yun.tim.qq.com/v5/tlssmssvr/sendsms"
template = "短信报警:"
def __init__(self, appid, appkey):
self.appid = appid
self.appkey = appkey
self.util = SmsSenderUtil()
def send(self, sms_type, nation_code, phone_number, msg, extend, ext):
""" 普通群发接口
明确指定内容,如果有多个签名,请在内容中以【】的方式添加到信息内容中,否则系统将使用默认签名
Args:
sms_type: 短信类型,0 为普通短信,1 为营销短信
nation_code: 国家码,如 86 为中国
phone_number: 不带国家码的手机号
msg: 信息内容,必须与申请的模板格式一致,否则将返回错误
extend: 扩展码,可填空串
ext: 服务端原样返回的参数,可填空串
Returns:
json string { "result": xxxx, "errmsg": "xxxxx" ... },被省略的内容参见协议文档
请求包体
{
"tel": {
"nationcode": "86",
"mobile": "13788888888"
},
"type": 0,
"msg": "你的验证码是1234",
"sig": "fdba654e05bc0d15796713a1a1a2318c",
"time": 1479888540,
"extend": "",
"ext": ""
}
应答包体
{
"result": 0,
"errmsg": "OK",
"ext": "",
"sid": "xxxxxxx",
"fee": 1
}
"""
rnd = self.util.get_random()
cur_time = self.util.get_cur_time()
data = {}
tel = {"nationcode": nation_code, "mobile": phone_number}
data["tel"] = tel
data["type"] = sms_type
data["msg"] = msg
data["sig"] = hashlib.sha256("appkey=" + self.appkey + "&random=" + str(rnd)
+ "&time=" + str(cur_time) + "&mobile=" + phone_number).hexdigest()
data["time"] = cur_time
data["extend"] = extend
data["ext"] = ext
whole_url = self.url + "?sdkappid=" + str(self.appid) + "&random=" + str(rnd)
result = self.util.send_post_request("yun.tim.qq.com", whole_url, data)
obj = json.loads(result)
if obj["result"] == 0 and obj["errmsg"] == "OK":
return True, result
else:
return False, result
class SmsSenderUtil:
""" 工具类定义 """
def get_random(self):
return random.randint(100000, 999999)
def get_cur_time(self):
return long(time.time())
def calculate_sig(self, appkey, rnd, cur_time, phone_numbers):
phone_numbers_string = phone_numbers[0]
for i in range(1, len(phone_numbers)):
phone_numbers_string += "," + phone_numbers[i]
return hashlib.sha256("appkey=" + appkey + "&random=" + str(rnd) + "&time=" + str(cur_time)
+ "&mobile=" + phone_numbers_string).hexdigest()
# def calculate_sig_for_templ_phone_numbers(self, appkey, rnd, cur_time, phone_numbers):
# """ 计算带模板和手机号列表的 sig """
# phone_numbers_string = phone_numbers[0]
# for i in range(1, len(phone_numbers)):
# phone_numbers_string += "," + phone_numbers[i]
# return hashlib.sha256("appkey=" + appkey + "&random=" + str(rnd) + "&time="
# + str(cur_time) + "&mobile=" + phone_numbers_string).hexdigest()
# def calculate_sig_for_templ(self, appkey, rnd, cur_time, phone_number):
# phone_numbers = [phone_number]
# return self.calculate_sig_for_templ_phone_numbers(appkey, rnd, cur_time, phone_numbers)
# def phone_numbers_to_list(self, nation_code, phone_numbers):
# tel = []
# for phone_number in phone_numbers:
# tel.append({"nationcode": nation_code, "mobile":phone_number})
# return tel
def send_post_request(self, host, url, data):
con = None
try:
con = httplib.HTTPSConnection(host)
con.request('POST', url, json.dumps(data))
response = con.getresponse()
if '200' != str(response.status):
obj = {}
obj["result"] = -1
obj["errmsg"] = "connect failed:\t"+str(response.status) + " " + response.reason
result = json.dumps(obj)
else:
result = response.read()
except Exception,e:
obj = {}
obj["result"] = -2
obj["errmsg"] = "connect failed:\t" + str(e)
result = json.dumps(obj)
finally:
if con:
con.close()
return result
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="script for sending alarm sms_type")
parser.add_argument("subject", help="the subject of the alarm sms")
parser.add_argument("content", help="the content of the alarm sms")
parser.add_argument("receiver", help="the phone number who receive the sms")
args = parser.parse_args()
ss = SmsSingleSender(SmsSingleSender.appid, SmsSingleSender.appkey)
receiver = json.loads(args.receiver)
status, response = ss.send(0, 86, receiver["phone"], "{0}{1}".format(SmsSingleSender.template, args.content), "", "")
print status, response
| TalkingData/owl | src/controller/scripts/actions/send_tencent_sms.py | Python | apache-2.0 | 5,749 |
from ilogue.fexpect.api import expect, controlchar, expecting, run, sudo, local
| ilogue/fexpect | ilogue/fexpect/__init__.py | Python | bsd-2-clause | 80 |
from django.shortcuts import get_object_or_404
from rest_framework.response import Response
from geotrek.api.v2 import serializers as api_serializers, viewsets as api_viewsets, filters as api_filters
from geotrek.authent import models as authent_models
class StructureViewSet(api_viewsets.GeotrekViewSet):
filter_backends = api_viewsets.GeotrekViewSet.filter_backends + (api_filters.RelatedPortalStructureOrReservationSystemFilter,)
serializer_class = api_serializers.StructureSerializer
queryset = authent_models.Structure.objects.all()
def retrieve(self, request, pk=None, format=None):
# Allow to retrieve objects even if not visible in list view
elem = get_object_or_404(authent_models.Structure, pk=pk)
serializer = api_serializers.StructureSerializer(elem, many=False, context={'request': request})
return Response(serializer.data)
| GeotrekCE/Geotrek-admin | geotrek/api/v2/views/authent.py | Python | bsd-2-clause | 891 |
from webtest import TestApp
import helloworld
import os
def test_index():
test_index.testbed.init_taskqueue_stub(task_retry_seconds=42, root_path=os.path.dirname(__file__))
app = TestApp(helloworld.app)
# fires off a task queue and should pass without exceptions
response = app.get('/')
assert 'Hello world!' in str(response)
| Trii/NoseGAE | examples/function_manual_config/test.py | Python | bsd-2-clause | 348 |
# -*- coding: UTF-8 -*-
# Copyright 2016-2017 Rumma & Ko Ltd
# License: BSD (see file COPYING for details)
from lino_xl.lib.tickets.models import *
from lino.api import _
Ticket.hide_elements('closed')
# class Ticket(Ticket):
# class Meta(Ticket.Meta):
# app_label = 'tickets'
# verbose_name = _("Plea")
# verbose_name_plural = _("Pleas")
# abstract = dd.is_abstract_model(__name__, 'Ticket')
# ActiveTickets._label = _("Active pleas")
# UnassignedTickets._label = _("Unassigned pleas")
# PublicTickets._label = _("Public pleas")
# TicketsToTriage._label = _("Pleas to triage")
# TicketsToTalk._label = _("Pleas to talk")
# # TicketsToDo._label = _("Pleas to to")
# AllTickets._label = _("All pleas")
dd.update_field(
'tickets.Ticket', 'upgrade_notes', verbose_name=_("Solution"))
# dd.update_field(
# 'tickets.Ticket', 'state', default=TicketStates.todo.as_callable)
class TicketDetail(TicketDetail):
main = "general history_tab more"
general = dd.Panel("""
general1:60 votes.VotesByVotable:20 uploads.UploadsByController
description:30 comments.CommentsByRFC:30 skills.DemandsByDemander #working.SessionsByTicket:20
""", label=_("General"))
general1 = """
summary:40 id:6 deadline
user:12 end_user:12 #faculty #topic
site workflow_buttons
"""
history_tab = dd.Panel("""
changes.ChangesByMaster:50 #stars.StarsByController:20
""", label=_("History"), required_roles=dd.login_required(Triager))
more = dd.Panel("""
more1:60 #skills.AssignableWorkersByTicket:20
upgrade_notes LinksByTicket skills.OffersByDemander
""", label=_("More"), required_roles=dd.login_required(Triager))
more1 = """
created modified ticket_type:10
state priority project
# standby feedback closed
"""
Tickets.detail_layout = TicketDetail()
| lino-framework/book | lino_book/projects/anna/lib/tickets/models.py | Python | bsd-2-clause | 1,862 |
# TODO inspect for Cython (see sagenb.misc.sageinspect)
from __future__ import print_function
from nose.plugins.skip import SkipTest
from nose.tools import assert_true
from os import path as op
import sys
import inspect
import warnings
import imp
from pkgutil import walk_packages
from inspect import getsource
import mne
from mne.utils import run_tests_if_main
from mne.fixes import _get_args
public_modules = [
# the list of modules users need to access for all functionality
'mne',
'mne.beamformer',
'mne.connectivity',
'mne.datasets',
'mne.datasets.megsim',
'mne.datasets.sample',
'mne.datasets.spm_face',
'mne.decoding',
'mne.filter',
'mne.gui',
'mne.inverse_sparse',
'mne.io',
'mne.io.kit',
'mne.minimum_norm',
'mne.preprocessing',
'mne.realtime',
'mne.report',
'mne.simulation',
'mne.source_estimate',
'mne.source_space',
'mne.stats',
'mne.time_frequency',
'mne.viz',
]
docscrape_path = op.join(op.dirname(__file__), '..', '..', 'doc', 'sphinxext',
'numpy_ext', 'docscrape.py')
if op.isfile(docscrape_path):
docscrape = imp.load_source('docscrape', docscrape_path)
else:
docscrape = None
def get_name(func):
parts = []
module = inspect.getmodule(func)
if module:
parts.append(module.__name__)
if hasattr(func, 'im_class'):
parts.append(func.im_class.__name__)
parts.append(func.__name__)
return '.'.join(parts)
# functions to ignore args / docstring of
_docstring_ignores = [
'mne.io.write', # always ignore these
'mne.fixes._in1d', # fix function
'mne.epochs.average_movements', # deprecated pos param
]
_tab_ignores = [
'mne.channels.tests.test_montage', # demo data has a tab
]
def check_parameters_match(func, doc=None):
"""Helper to check docstring, returns list of incorrect results"""
incorrect = []
name_ = get_name(func)
if not name_.startswith('mne.') or name_.startswith('mne.externals'):
return incorrect
if inspect.isdatadescriptor(func):
return incorrect
args = _get_args(func)
# drop self
if len(args) > 0 and args[0] == 'self':
args = args[1:]
if doc is None:
with warnings.catch_warnings(record=True) as w:
doc = docscrape.FunctionDoc(func)
if len(w):
raise RuntimeError('Error for %s:\n%s' % (name_, w[0]))
# check set
param_names = [name for name, _, _ in doc['Parameters']]
# clean up some docscrape output:
param_names = [name.split(':')[0].strip('` ') for name in param_names]
param_names = [name for name in param_names if '*' not in name]
if len(param_names) != len(args):
bad = str(sorted(list(set(param_names) - set(args)) +
list(set(args) - set(param_names))))
if not any(d in name_ for d in _docstring_ignores) and \
'deprecation_wrapped' not in func.__code__.co_name:
incorrect += [name_ + ' arg mismatch: ' + bad]
else:
for n1, n2 in zip(param_names, args):
if n1 != n2:
incorrect += [name_ + ' ' + n1 + ' != ' + n2]
return incorrect
def test_docstring_parameters():
"""Test module docsting formatting"""
if docscrape is None:
raise SkipTest('This must be run from the mne-python source directory')
incorrect = []
for name in public_modules:
module = __import__(name, globals())
for submod in name.split('.')[1:]:
module = getattr(module, submod)
classes = inspect.getmembers(module, inspect.isclass)
for cname, cls in classes:
if cname.startswith('_'):
continue
with warnings.catch_warnings(record=True) as w:
cdoc = docscrape.ClassDoc(cls)
if len(w):
raise RuntimeError('Error for __init__ of %s in %s:\n%s'
% (cls, name, w[0]))
if hasattr(cls, '__init__'):
incorrect += check_parameters_match(cls.__init__, cdoc)
for method_name in cdoc.methods:
method = getattr(cls, method_name)
incorrect += check_parameters_match(method)
if hasattr(cls, '__call__'):
incorrect += check_parameters_match(cls.__call__)
functions = inspect.getmembers(module, inspect.isfunction)
for fname, func in functions:
if fname.startswith('_'):
continue
incorrect += check_parameters_match(func)
msg = '\n' + '\n'.join(sorted(list(set(incorrect))))
if len(incorrect) > 0:
raise AssertionError(msg)
def test_tabs():
"""Test that there are no tabs in our source files"""
for importer, modname, ispkg in walk_packages(mne.__path__, prefix='mne.'):
if not ispkg and modname not in _tab_ignores:
# mod = importlib.import_module(modname) # not py26 compatible!
__import__(modname) # because we don't import e.g. mne.tests w/mne
mod = sys.modules[modname]
source = getsource(mod)
assert_true('\t' not in source,
'"%s" has tabs, please remove them or add it to the'
'ignore list' % modname)
run_tests_if_main()
| wronk/mne-python | mne/tests/test_docstring_parameters.py | Python | bsd-3-clause | 5,347 |
#!/usr/bin/env python
import argparse
import logging
import os
import tractconverter.info as info
import tractconverter
from tractconverter import FORMATS
from tractconverter import EXT_ANAT
# Script description
DESCRIPTION = """
TractInfo {0}.
Print info about a streamlines file.
Supported formats are {1}
""".format(info.__version__,
",".join(FORMATS.keys()))
#####
# Script part
###
def buildArgsParser():
p = argparse.ArgumentParser(description=DESCRIPTION)
p.add_argument('-i', action='store', dest='input',
metavar='FILE', required=True,
help='input track file ({0})'.format(",".join(FORMATS.keys())))
return p
def main():
parser = buildArgsParser()
args = parser.parse_args()
in_filename = args.input
if not os.path.isfile(in_filename):
parser.error('"{0}" must be an existing file!'.format(in_filename))
if not tractconverter.is_supported(in_filename):
parser.error('Input file must be one of {0}!'.format(",".join(FORMATS.keys())))
inFormat = tractconverter.detect_format(in_filename)
#Print info about the input file.
print inFormat(in_filename, None)
if __name__ == "__main__":
main()
| MarcCote/tractconverter | scripts/TractInfo.py | Python | bsd-3-clause | 1,223 |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import importlib
from django.apps import apps
def get_seo_views(metadata_class):
return get_view_names(metadata_class._meta.seo_views)
def get_view_names(seo_views):
output = []
for name in seo_views:
try:
app = apps.get_app_config(name).models_module
except:
output.append(name)
else:
app_name = app.__name__.split(".")[:-1]
app_name.append("urls")
try:
urls = importlib.import_module(".".join(app_name)).urlpatterns
except (ImportError, AttributeError):
output.append(name)
else:
for url in urls:
if getattr(url, 'name', None):
output.append(url.name)
return output
| tangochin/django-seo | djangoseo/systemviews.py | Python | bsd-3-clause | 835 |
import ui_plot
import sys
import numpy
from PyQt4 import QtCore, QtGui
import PyQt4.Qwt5 as Qwt
from recorder import *
def plotSomething():
if SR.newAudio==False:
return
xs,ys=SR.fft()
c.setData(xs,ys)
uiplot.qwtPlot.replot()
SR.newAudio=False
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
win_plot = ui_plot.QtGui.QMainWindow()
uiplot = ui_plot.Ui_win_plot()
uiplot.setupUi(win_plot)
uiplot.btnA.clicked.connect(plotSomething)
#uiplot.btnB.clicked.connect(lambda: uiplot.timer.setInterval(100.0))
#uiplot.btnC.clicked.connect(lambda: uiplot.timer.setInterval(10.0))
#uiplot.btnD.clicked.connect(lambda: uiplot.timer.setInterval(1.0))
c=Qwt.QwtPlotCurve()
c.attach(uiplot.qwtPlot)
uiplot.qwtPlot.setAxisScale(uiplot.qwtPlot.yLeft, 0, 1000)
uiplot.timer = QtCore.QTimer()
uiplot.timer.start(1.0)
win_plot.connect(uiplot.timer, QtCore.SIGNAL('timeout()'), plotSomething)
SR=SwhRecorder()
SR.setup()
SR.continuousStart()
### DISPLAY WINDOWS
win_plot.show()
code=app.exec_()
SR.close()
sys.exit(code)
| LucidBlue/mykeepon-storyteller | src/realTimeAudio.py | Python | bsd-3-clause | 1,209 |
"""
Our version of sphinx-apidoc
@author : Spencer Lyon
@date : 2014-07-16
This file should be called from the command line. It accepts one
additional command line parameter. If we pass the parameter `single`
when running the file, this file will create a single directory named
modules where each module in quantecon will be documented. The index.rst
file will then contain a single list of all modules.
If no argument is passed or if the argument is anything other than
`single`, two directories will be created: models and tools. The models
directory will contain documentation instructions for the different
models in quantecon, whereas the tools directory will contain docs for
the tools in the package. The generated index.rst will then contain
two toctrees, one for models and one for tools.
Examples
--------
$ python qe_apidoc.py # generates the two separate directories
$ python qe_apidoc.py foo_bar # generates the two separate directories
$ python qe_apidoc.py single # generates the single directory
Notes
-----
1. This file can also be run from within ipython using the %%run magic.
To do this, use one of the commands above and replace `python` with
`%%run`
2. Models has been removed. But leaving infrastructure here for qe_apidoc
in the event we need it in the future
"""
import os
import sys
from glob import glob
######################
## String Templates ##
######################
module_template = """{mod_name}
{equals}
.. automodule:: quantecon.{mod_name}
:members:
:undoc-members:
:show-inheritance:
"""
game_theory_module_template = """{mod_name}
{equals}
.. automodule:: quantecon.game_theory.{mod_name}
:members:
:undoc-members:
:show-inheritance:
"""
game_generators_module_template = """{mod_name}
{equals}
.. automodule:: quantecon.game_theory.game_generators.{mod_name}
:members:
:undoc-members:
:show-inheritance:
"""
markov_module_template = """{mod_name}
{equals}
.. automodule:: quantecon.markov.{mod_name}
:members:
:undoc-members:
:show-inheritance:
"""
optimize_module_template = """{mod_name}
{equals}
.. automodule:: quantecon.optimize.{mod_name}
:members:
:undoc-members:
:show-inheritance:
"""
random_module_template = """{mod_name}
{equals}
.. automodule:: quantecon.random.{mod_name}
:members:
:undoc-members:
:show-inheritance:
"""
util_module_template = """{mod_name}
{equals}
.. automodule:: quantecon.util.{mod_name}
:members:
:undoc-members:
:show-inheritance:
"""
all_index_template = """=======================
QuantEcon documentation
=======================
Auto-generated documentation by module:
.. toctree::
:maxdepth: 2
{generated}
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
"""
split_index_template = """=======================
QuantEcon documentation
=======================
The `quantecon` python library consists of a number of modules which
includes game theory (game_theory), markov chains (markov), random
generation utilities (random), a collection of tools (tools),
and other utilities (util) which are
mainly used by developers internal to the package.
.. toctree::
:maxdepth: 2
game_theory
markov
optimize
random
tools
util
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
"""
split_file_template = """{name}
{equals}
.. toctree::
:maxdepth: 2
{files}
"""
######################
## Helper functions ##
######################
def source_join(f_name):
return os.path.join("source", f_name)
####################
## Main functions ##
####################
def all_auto():
# Get list of module names
mod_names = glob("../quantecon/[a-z0-9]*.py")
mod_names = list(map(lambda x: x.split('/')[-1], mod_names))
# Ensure source/modules directory exists
if not os.path.exists(source_join("modules")):
os.makedirs(source_join("modules"))
# Write file for each module
for mod in mod_names:
name = mod.split(".")[0] # drop .py ending
new_path = os.path.join("source", "modules", name + ".rst")
with open(new_path, "w") as f:
gen_module(name, f)
# write index.rst file to include these autogenerated files
with open(source_join("index.rst"), "w") as index:
generated = "\n ".join(list(map(lambda x: "modules/" + x.split(".")[0],
mod_names)))
temp = all_index_template.format(generated=generated)
index.write(temp)
def model_tool():
# list file names with game_theory
game_theory_files = glob("../quantecon/game_theory/[a-z0-9]*.py")
game_theory = list(map(lambda x: x.split('/')[-1][:-3], game_theory_files))
# Alphabetize
game_theory.sort()
# list file names with game_theory/game_generators
game_generators_files = glob("../quantecon/game_theory/game_generators/[a-z0-9]*.py")
game_generators = list(
map(lambda x: x.split('/')[-1][:-3], game_generators_files))
# Alphabetize
game_generators.sort()
# list file names with markov
markov_files = glob("../quantecon/markov/[a-z0-9]*.py")
markov = list(map(lambda x: x.split('/')[-1][:-3], markov_files))
# Alphabetize
markov.sort()
# list file names with optimize
optimize_files = glob("../quantecon/optimize/[a-z0-9]*.py")
optimize = list(map(lambda x: x.split('/')[-1][:-3], optimize_files))
# Alphabetize
optimize.sort()
# list file names with random
random_files = glob("../quantecon/random/[a-z0-9]*.py")
random = list(map(lambda x: x.split('/')[-1][:-3], random_files))
# Alphabetize
random.sort()
# list file names of tools (base level modules)
tool_files = glob("../quantecon/[a-z0-9]*.py")
tools = list(map(lambda x: x.split('/')[-1][:-3], tool_files))
# Alphabetize
tools.remove("version")
tools.sort()
# list file names of utilities
util_files = glob("../quantecon/util/[a-z0-9]*.py")
util = list(map(lambda x: x.split('/')[-1][:-3], util_files))
# Alphabetize
util.sort()
for folder in ["game_theory", "markov", "optimize", "random", "tools", "util"]:
if not os.path.exists(source_join(folder)):
os.makedirs(source_join(folder))
# Write file for each game_theory file
for mod in game_theory:
new_path = os.path.join("source", "game_theory", mod + ".rst")
with open(new_path, "w") as f:
equals = "=" * len(mod)
f.write(game_theory_module_template.format(mod_name=mod, equals=equals))
for mod in game_generators:
new_path = os.path.join("source", "game_theory", "game_generators", mod + ".rst")
with open(new_path, "w") as f:
equals = "=" * len(mod)
f.write(game_generators_module_template.format(
mod_name=mod, equals=equals))
#Add sudirectory to flat game_theory list for index file
game_theory.append("game_generators/{}".format(mod))
# Write file for each markov file
for mod in markov:
new_path = os.path.join("source", "markov", mod + ".rst")
with open(new_path, "w") as f:
equals = "=" * len(mod)
f.write(markov_module_template.format(mod_name=mod, equals=equals))
# Write file for each optimize file
for mod in optimize:
new_path = os.path.join("source", "optimize", mod + ".rst")
with open(new_path, "w") as f:
equals = "=" * len(mod)
f.write(optimize_module_template.format(mod_name=mod, equals=equals))
# Write file for each random file
for mod in random:
new_path = os.path.join("source", "random", mod + ".rst")
with open(new_path, "w") as f:
equals = "=" * len(mod)
f.write(random_module_template.format(mod_name=mod, equals=equals))
# Write file for each tool (base level modules)
for mod in tools:
new_path = os.path.join("source", "tools", mod + ".rst")
with open(new_path, "w") as f:
equals = "=" * len(mod)
f.write(module_template.format(mod_name=mod, equals=equals))
# Write file for each utility
for mod in util:
new_path = os.path.join("source", "util", mod + ".rst")
with open(new_path, "w") as f:
equals = "=" * len(mod)
f.write(util_module_template.format(mod_name=mod, equals=equals))
# write (index|models|tools).rst file to include autogenerated files
with open(source_join("index.rst"), "w") as index:
index.write(split_index_template)
gt = "game_theory/" + "\n game_theory/".join(game_theory)
mark = "markov/" + "\n markov/".join(markov)
opti = "optimize/" + "\n optimize/".join(optimize)
rand = "random/" + "\n random/".join(random)
tlz = "tools/" + "\n tools/".join(tools)
utls = "util/" + "\n util/".join(util)
#-TocTree-#
toc_tree_list = {"game_theory": gt,
"markov": mark,
"optimize" : opti,
"tools": tlz,
"random": rand,
"util": utls,
}
for f_name in ("game_theory", "markov", "optimize", "random", "tools", "util"):
with open(source_join(f_name + ".rst"), "w") as f:
m_name = f_name
if f_name == "game_theory":
f_name = "Game Theory" #Produce Nicer Title for Game Theory Module
if f_name == "util":
f_name = "Utilities" #Produce Nicer Title for Utilities Module
if f_name == "optimize":
f_name = "Optimize"
temp = split_file_template.format(name=f_name.capitalize(),
equals="="*len(f_name),
files=toc_tree_list[m_name])
f.write(temp)
if __name__ == '__main__':
if "single" in sys.argv[1:]:
all_auto()
else:
model_tool()
| QuantEcon/QuantEcon.py | docs/qe_apidoc.py | Python | bsd-3-clause | 10,122 |
from __future__ import division, print_function
import numpy as np
import nose.tools as nt
import regreg.api as rr
from ..group_lasso import (group_lasso,
selected_targets,
full_targets,
debiased_targets)
from ...tests.instance import gaussian_instance
from ...tests.flags import SET_SEED
from ...tests.decorators import set_sampling_params_iftrue, set_seed_iftrue
from ...algorithms.sqrt_lasso import choose_lambda, solve_sqrt_lasso
from ..randomization import randomization
from ...tests.decorators import rpy_test_safe
@set_seed_iftrue(SET_SEED)
def test_group_lasso(n=400,
p=100,
signal_fac=3,
s=5,
sigma=3,
target='full',
rho=0.4,
randomizer_scale=.75,
ndraw=100000):
"""
Test group lasso
"""
inst, const = gaussian_instance, group_lasso.gaussian
signal = np.sqrt(signal_fac * np.log(p))
X, Y, beta = inst(n=n,
p=p,
signal=signal,
s=s,
equicorrelated=False,
rho=rho,
sigma=sigma,
random_signs=True)[:3]
orthogonal = True
if orthogonal:
X = np.linalg.svd(X, full_matrices=False)[0]
Y = X.dot(beta) + sigma * np.random.standard_normal(n)
n, p = X.shape
sigma_ = np.std(Y)
groups = np.floor(np.arange(p)/2).astype(np.int)
weights = dict([(i, sigma_ * 2 * np.sqrt(2)) for i in np.unique(groups)])
conv = const(X,
Y,
groups,
weights,
randomizer_scale=randomizer_scale * sigma_)
signs = conv.fit()
nonzero = conv.selection_variable['directions'].keys()
if target == 'full':
(observed_target,
group_assignments,
cov_target,
cov_target_score,
alternatives) = full_targets(conv.loglike,
conv._W,
nonzero,
conv.penalty)
elif target == 'selected':
(observed_target,
group_assignments,
cov_target,
cov_target_score,
alternatives) = selected_targets(conv.loglike,
conv._W,
nonzero,
conv.penalty)
elif target == 'debiased':
(observed_target,
group_assignments,
cov_target,
cov_target_score,
alternatives) = debiased_targets(conv.loglike,
conv._W,
nonzero,
conv.penalty)
_, pval, intervals = conv.summary(observed_target,
group_assignments,
cov_target,
cov_target_score,
alternatives,
ndraw=ndraw,
compute_intervals=False)
which = np.zeros(p, np.bool)
for group in conv.selection_variable['directions'].keys():
which_group = conv.penalty.groups == group
which += which_group
return pval[beta[which] == 0], pval[beta[which] != 0]
@set_seed_iftrue(SET_SEED)
def test_lasso(n=400,
p=200,
signal_fac=1.5,
s=5,
sigma=3,
target='full',
rho=0.4,
ndraw=10000):
"""
Test group lasso with groups of size 1, ie lasso
"""
inst, const = gaussian_instance, group_lasso.gaussian
signal = np.sqrt(signal_fac * np.log(p))
X, Y, beta = inst(n=n,
p=p,
signal=signal,
s=s,
equicorrelated=False,
rho=rho,
sigma=sigma,
random_signs=True)[:3]
n, p = X.shape
sigma_ = np.std(Y)
groups = np.arange(p)
weights = dict([(i, sigma_ * 2 * np.sqrt(2)) for i in np.unique(groups)])
conv = const(X,
Y,
groups,
weights)
signs = conv.fit()
nonzero = conv.selection_variable['directions'].keys()
if target == 'full':
(observed_target,
group_assignments,
cov_target,
cov_target_score,
alternatives) = full_targets(conv.loglike,
conv._W,
nonzero,
conv.penalty)
elif target == 'selected':
(observed_target,
group_assignments,
cov_target,
cov_target_score,
alternatives) = selected_targets(conv.loglike,
conv._W,
nonzero,
conv.penalty)
elif target == 'debiased':
(observed_target,
group_assignments,
cov_target,
cov_target_score,
alternatives) = debiased_targets(conv.loglike,
conv._W,
nonzero,
conv.penalty)
_, pval, intervals = conv.summary(observed_target,
group_assignments,
cov_target,
cov_target_score,
alternatives,
ndraw=ndraw,
compute_intervals=False)
which = np.zeros(p, np.bool)
for group in conv.selection_variable['directions'].keys():
which_group = conv.penalty.groups == group
which += which_group
return pval[beta[which] == 0], pval[beta[which] != 0]
@set_seed_iftrue(SET_SEED)
def test_mixed(n=400,
p=200,
signal_fac=1.5,
s=5,
sigma=3,
target='full',
rho=0.4,
ndraw=10000):
"""
Test group lasso with a mix of groups of size 1, and larger
"""
inst, const = gaussian_instance, group_lasso.gaussian
signal = np.sqrt(signal_fac * np.log(p))
X, Y, beta = inst(n=n,
p=p,
signal=signal,
s=s,
equicorrelated=False,
rho=rho,
sigma=sigma,
random_signs=True)[:3]
n, p = X.shape
sigma_ = np.std(Y)
groups = np.arange(p)
groups[-5:] = -1
groups[-8:-5] = -2
Y += X[:,-8:].dot(np.ones(8)) * 5 # so we select the last two groups
weights = dict([(i, sigma_ * 2 * np.sqrt(2)) for i in np.unique(groups)])
conv = const(X,
Y,
groups,
weights)
signs = conv.fit()
nonzero = conv.selection_variable['directions'].keys()
if target == 'full':
(observed_target,
group_assignments,
cov_target,
cov_target_score,
alternatives) = full_targets(conv.loglike,
conv._W,
nonzero,
conv.penalty)
elif target == 'selected':
(observed_target,
group_assignments,
cov_target,
cov_target_score,
alternatives) = selected_targets(conv.loglike,
conv._W,
nonzero,
conv.penalty)
elif target == 'debiased':
(observed_target,
group_assignments,
cov_target,
cov_target_score,
alternatives) = debiased_targets(conv.loglike,
conv._W,
nonzero,
conv.penalty)
_, pval, intervals = conv.summary(observed_target,
group_assignments,
cov_target,
cov_target_score,
alternatives,
ndraw=ndraw,
compute_intervals=False)
which = np.zeros(p, np.bool)
for group in conv.selection_variable['directions'].keys():
which_group = conv.penalty.groups == group
which += which_group
return pval[beta[which] == 0], pval[beta[which] != 0]
@set_seed_iftrue(SET_SEED)
def test_all_targets(n=100, p=20, signal_fac=1.5, s=5, sigma=3, rho=0.4):
for target in ['full', 'selected', 'debiased']:
test_group_lasso(n=n,
p=p,
signal_fac=signal_fac,
s=s,
sigma=sigma,
rho=rho,
target=target)
def main(nsim=500, n=200, p=50, target='full', sigma=3):
import matplotlib.pyplot as plt
P0, PA = [], []
from statsmodels.distributions import ECDF
for i in range(nsim):
try:
p0, pA = test_group_lasso(n=n, p=p, target=target, sigma=sigma)
except:
pass
print(len(p0), len(pA))
P0.extend(p0)
PA.extend(pA)
P0_clean = np.array(P0)
P0_clean = P0_clean[P0_clean > 1.e-5] #
print(np.mean(P0_clean), np.std(P0_clean), np.mean(np.array(PA) < 0.05), np.sum(np.array(PA) < 0.05) / (i+1), np.mean(np.array(P0) < 0.05), np.mean(P0_clean < 0.05), np.mean(np.array(P0) < 1e-5), 'null pvalue + power + failure')
if i % 3 == 0 and i > 0:
U = np.linspace(0, 1, 101)
plt.clf()
if len(P0_clean) > 0:
plt.plot(U, ECDF(P0_clean)(U))
if len(PA) > 0:
plt.plot(U, ECDF(PA)(U), 'r')
plt.plot([0, 1], [0, 1], 'k--')
plt.savefig("plot.pdf")
plt.show()
| selective-inference/selective-inference | selectinf/randomized/tests/test_group_lasso.py | Python | bsd-3-clause | 10,569 |
from __future__ import print_function
import os
import sys
import math
import collections
import numpy as np
import flopy
import matplotlib.pyplot as plt
# --modify default matplotlib settings
updates = {'font.family': ['Univers 57 Condensed', 'Arial'],
'mathtext.default': 'regular',
'pdf.compression': 0,
'pdf.fonttype': 42,
'legend.fontsize': 7,
'axes.labelsize': 8,
'xtick.labelsize': 7,
'ytick.labelsize': 7}
plt.rcParams.update(updates)
workspace = 'swiex5'
if not os.path.isdir(workspace):
os.mkdir(workspace)
cleanFiles = False
skipRuns = False
fext = 'png'
narg = len(sys.argv)
iarg = 0
if narg > 1:
while iarg < narg - 1:
iarg += 1
basearg = sys.argv[iarg].lower()
if basearg == '--clean':
cleanFiles = True
elif basearg == '--skipruns':
skipRuns = True
elif basearg == '--pdf':
fext = 'pdf'
dirs = [os.path.join(workspace, 'SWI2'), os.path.join(workspace, 'SEAWAT')]
if cleanFiles:
print('cleaning all files')
print('excluding *.py files')
file_dict = collections.OrderedDict()
file_dict[0] = os.listdir(dirs[0])
file_dict[1] = os.listdir(dirs[1])
file_dict[-1] = os.listdir(workspace)
for key, files in list(file_dict.items()):
pth = '.'
if key >= 0:
pth = dirs[key]
for f in files:
fpth = os.path.join(pth, f)
if os.path.isdir(fpth):
continue
if '.py' != os.path.splitext(f)[1].lower():
print(' removing...{}'.format(os.path.basename(f)))
try:
os.remove(fpth)
except:
pass
for d in dirs:
if os.path.exists(d):
os.rmdir(d)
sys.exit(1)
# --make working directories
for d in dirs:
if not os.path.exists(d):
os.mkdir(d)
# --problem data
nlay = 6
nrow = 1
ncol = 113
delr = np.zeros((ncol), np.float)
delc = 1.
r = np.zeros((ncol), np.float)
x = np.zeros((ncol), np.float)
edge = np.zeros((ncol), np.float)
dx = 25.0
for i in range(0, ncol):
delr[i] = dx
r[0] = delr[0] / 2.0
for i in range(1, ncol):
r[i] = r[i - 1] + (delr[i - 1] + delr[i]) / 2.0
x[0] = delr[0] / 2.0
for i in range(1, ncol):
x[i] = x[i - 1] + (delr[i - 1] + delr[i]) / 2.0
edge[0] = delr[0]
for i in range(1, ncol):
edge[i] = edge[i - 1] + delr[i]
# constant data for all simulations
nper = 2
perlen = [1460, 1460]
nstp = [1460, 1460]
steady = True
nsave_zeta = 8
ndecay = 4
ibound = np.ones((nlay, nrow, ncol), np.int)
for k in range(0, nlay):
ibound[k, 0, ncol - 1] = -1
bot = np.zeros((nlay, nrow, ncol), np.float)
dz = 100. / float(nlay - 1)
zall = -np.arange(0, 100 + dz, dz)
zall = np.append(zall, -120.)
tb = -np.arange(dz, 100 + dz, dz)
tb = np.append(tb, -120.)
for k in range(0, nlay):
for i in range(0, ncol):
bot[k, 0, i] = tb[k]
isource = np.zeros((nlay, nrow, ncol), np.int)
isource[:, 0, ncol - 1] = 1
isource[nlay - 1, 0, ncol - 1] = 2
khb = (0.0000000000256 * 1000. * 9.81 / 0.001) * 60 * 60 * 24
kvb = (0.0000000000100 * 1000. * 9.81 / 0.001) * 60 * 60 * 24
ssb = 1e-5
sszb = 0.2
kh = np.zeros((nlay, nrow, ncol), np.float)
kv = np.zeros((nlay, nrow, ncol), np.float)
ss = np.zeros((nlay, nrow, ncol), np.float)
ssz = np.zeros((nlay, nrow, ncol), np.float)
for k in range(0, nlay):
for i in range(0, ncol):
f = r[i] * 2.0 * math.pi
kh[k, 0, i] = khb * f
kv[k, 0, i] = kvb * f
ss[k, 0, i] = ssb * f
ssz[k, 0, i] = sszb * f
z = np.ones((nlay), np.float)
z = -100. * z
nwell = 1
for k in range(0, nlay):
if zall[k] > -20. and zall[k + 1] <= -20:
nwell = k + 1
print('nlay={} dz={} nwell={}'.format(nlay, dz, nwell))
wellQ = -2400.
wellbtm = -20.0
wellQpm = wellQ / abs(wellbtm)
well_data = {}
for ip in range(0, nper):
welllist = np.zeros((nwell, 4), np.float)
for iw in range(0, nwell):
if ip == 0:
b = zall[iw] - zall[iw + 1]
if zall[iw + 1] < wellbtm:
b = zall[iw] - wellbtm
q = wellQpm * b
else:
q = 0.0
welllist[iw, 0] = iw
welllist[iw, 1] = 0
welllist[iw, 2] = 0
welllist[iw, 3] = q
well_data[ip] = welllist.copy()
ihead = np.zeros((nlay), np.float)
ocspd = {}
for i in range(0, nper):
icnt = 0
for j in range(0, nstp[i]):
icnt += 1
if icnt == 365:
ocspd[(i, j)] = ['save head']
icnt = 0
else:
ocspd[(i, j)] = []
solver2params = {'mxiter': 100, 'iter1': 20, 'npcond': 1, 'zclose': 1.0e-6,
'rclose': 3e-3, 'relax': 1.0,
'nbpol': 2, 'damp': 1.0, 'dampt': 1.0}
# --create model file and run model
modelname = 'swi2ex5'
mf_name = 'mf2005'
if not skipRuns:
ml = flopy.modflow.Modflow(modelname, version='mf2005', exe_name=mf_name,
model_ws=dirs[0])
discret = flopy.modflow.ModflowDis(ml, nrow=nrow, ncol=ncol, nlay=nlay,
delr=delr, delc=delc, top=0, botm=bot,
laycbd=0, nper=nper, perlen=perlen,
nstp=nstp, steady=steady)
bas = flopy.modflow.ModflowBas(ml, ibound=ibound, strt=ihead)
lpf = flopy.modflow.ModflowLpf(ml, hk=kh, vka=kv, ss=ss, sy=ssz, vkcb=0,
laytyp=0, layavg=1)
wel = flopy.modflow.ModflowWel(ml, stress_period_data=well_data)
swi = flopy.modflow.ModflowSwi2(ml, iswizt=55, npln=1, istrat=1,
toeslope=0.025, tipslope=0.025,
nu=[0, 0.025], zeta=z, ssz=ssz,
isource=isource, nsolver=2,
solver2params=solver2params)
oc = flopy.modflow.ModflowOc(ml, stress_period_data=ocspd)
pcg = flopy.modflow.ModflowPcg(ml, hclose=1.0e-6, rclose=3.0e-3,
mxiter=100, iter1=50)
# --write the modflow files
ml.write_input()
m = ml.run_model(silent=False)
# --read model zeta
get_stp = [364, 729, 1094, 1459, 364, 729, 1094, 1459]
get_per = [0, 0, 0, 0, 1, 1, 1, 1]
nswi_times = len(get_per)
zetafile = os.path.join(dirs[0], '{}.zta'.format(modelname))
zobj = flopy.utils.CellBudgetFile(zetafile)
zeta = []
for kk in zip(get_stp, get_per):
zeta.append(zobj.get_data(kstpkper=kk, text='ZETASRF 1')[0])
zeta = np.array(zeta)
# --seawat input - redefine input data that differ from SWI2
nlay_swt = 120
# --mt3d print times
timprs = (np.arange(8) + 1) * 365.
nprs = len(timprs)
# --
ndecay = 4
ibound = np.ones((nlay_swt, nrow, ncol), 'int')
for k in range(0, nlay_swt):
ibound[k, 0, ncol - 1] = -1
bot = np.zeros((nlay_swt, nrow, ncol), np.float)
zall = [0, -20., -40., -60., -80., -100., -120.]
dz = 120. / nlay_swt
tb = np.arange(nlay_swt) * -dz - dz
sconc = np.zeros((nlay_swt, nrow, ncol), np.float)
icbund = np.ones((nlay_swt, nrow, ncol), np.int)
strt = np.zeros((nlay_swt, nrow, ncol), np.float)
pressure = 0.
g = 9.81
z = - dz / 2. # cell center
for k in range(0, nlay_swt):
for i in range(0, ncol):
bot[k, 0, i] = tb[k]
if bot[k, 0, 0] >= -100.:
sconc[k, 0, :] = 0. / 3. * .025 * 1000. / .7143
else:
sconc[k, 0, :] = 3. / 3. * .025 * 1000. / .7143
icbund[k, 0, -1] = -1
dense = 1000. + 0.7143 * sconc[k, 0, 0]
pressure += 0.5 * dz * dense * g
if k > 0:
z = z - dz
denseup = 1000. + 0.7143 * sconc[k - 1, 0, 0]
pressure += 0.5 * dz * denseup * g
strt[k, 0, :] = z + pressure / dense / g
# print z, pressure, strt[k, 0, 0], sconc[k, 0, 0]
khb = (0.0000000000256 * 1000. * 9.81 / 0.001) * 60 * 60 * 24
kvb = (0.0000000000100 * 1000. * 9.81 / 0.001) * 60 * 60 * 24
ssb = 1e-5
sszb = 0.2
kh = np.zeros((nlay_swt, nrow, ncol), np.float)
kv = np.zeros((nlay_swt, nrow, ncol), np.float)
ss = np.zeros((nlay_swt, nrow, ncol), np.float)
ssz = np.zeros((nlay_swt, nrow, ncol), np.float)
for k in range(0, nlay_swt):
for i in range(0, ncol):
f = r[i] * 2.0 * math.pi
kh[k, 0, i] = khb * f
kv[k, 0, i] = kvb * f
ss[k, 0, i] = ssb * f
ssz[k, 0, i] = sszb * f
# wells and ssm data
itype = flopy.mt3d.Mt3dSsm.itype_dict()
nwell = 1
for k in range(0, nlay_swt):
if bot[k, 0, 0] >= -20.:
nwell = k + 1
print('nlay_swt={} dz={} nwell={}'.format(nlay_swt, dz, nwell))
well_data = {}
ssm_data = {}
wellQ = -2400.
wellbtm = -20.0
wellQpm = wellQ / abs(wellbtm)
for ip in range(0, nper):
welllist = np.zeros((nwell, 4), np.float)
ssmlist = []
for iw in range(0, nwell):
if ip == 0:
q = wellQpm * dz
else:
q = 0.0
welllist[iw, 0] = iw
welllist[iw, 1] = 0
welllist[iw, 2] = 0
welllist[iw, 3] = q
ssmlist.append([iw, 0, 0, 0., itype['WEL']])
well_data[ip] = welllist.copy()
ssm_data[ip] = ssmlist
# Define model name for SEAWAT model
modelname = 'swi2ex5_swt'
swtexe_name = 'swt_v4'
# Create the MODFLOW model data
if not skipRuns:
m = flopy.seawat.Seawat(modelname, exe_name=swtexe_name, model_ws=dirs[1])
discret = flopy.modflow.ModflowDis(m, nrow=nrow, ncol=ncol, nlay=nlay_swt,
delr=delr, delc=delc, top=0, botm=bot,
laycbd=0, nper=nper, perlen=perlen,
nstp=nstp, steady=True)
bas = flopy.modflow.ModflowBas(m, ibound=ibound, strt=strt)
lpf = flopy.modflow.ModflowLpf(m, hk=kh, vka=kv, ss=ss, sy=ssz, vkcb=0,
laytyp=0, layavg=1)
wel = flopy.modflow.ModflowWel(m, stress_period_data=well_data)
oc = flopy.modflow.ModflowOc(m, save_every=365, save_types=['save head'])
pcg = flopy.modflow.ModflowPcg(m, hclose=1.0e-5, rclose=3.0e-3, mxiter=100,
iter1=50)
# Create the basic MT3DMS model data
adv = flopy.mt3d.Mt3dAdv(m, mixelm=-1,
percel=0.5,
nadvfd=0,
# 0 or 1 is upstream; 2 is central in space
# particle based methods
nplane=4,
mxpart=1e7,
itrack=2,
dceps=1e-4,
npl=16,
nph=16,
npmin=8,
npmax=256)
btn = flopy.mt3d.Mt3dBtn(m, icbund=icbund, prsity=ssz, ncomp=1,
sconc=sconc,
ifmtcn=-1,
chkmas=False, nprobs=10, nprmas=10, dt0=1.0,
ttsmult=1.0,
nprs=nprs, timprs=timprs, mxstrn=1e8)
dsp = flopy.mt3d.Mt3dDsp(m, al=0., trpt=1., trpv=1., dmcoef=0.)
gcg = flopy.mt3d.Mt3dGcg(m, mxiter=1, iter1=50, isolve=1, cclose=1e-7)
ssm = flopy.mt3d.Mt3dSsm(m, stress_period_data=ssm_data)
# Create the SEAWAT model data
vdf = flopy.seawat.SeawatVdf(m, iwtable=0, densemin=0, densemax=0,
denseref=1000., denseslp=0.7143, firstdt=1e-3)
# write seawat files
m.write_input()
# Run SEAWAT
m = m.run_model(silent=False)
# plot the results
# read seawat model data
ucnfile = os.path.join(dirs[1], 'MT3D001.UCN')
uobj = flopy.utils.UcnFile(ucnfile)
times = uobj.get_times()
print(times)
conc = np.zeros((len(times), nlay_swt, ncol), np.float)
for idx, tt in enumerate(times):
c = uobj.get_data(totim=tt)
for ilay in range(0, nlay_swt):
for jcol in range(0, ncol):
conc[idx, ilay, jcol] = c[ilay, 0, jcol]
# spatial data
# swi2
bot = np.zeros((1, ncol, nlay), np.float)
dz = 100. / float(nlay - 1)
zall = -np.arange(0, 100 + dz, dz)
zall = np.append(zall, -120.)
tb = -np.arange(dz, 100 + dz, dz)
tb = np.append(tb, -120.)
for k in range(0, nlay):
for i in range(0, ncol):
bot[0, i, k] = tb[k]
# seawat
swt_dz = 120. / nlay_swt
swt_tb = np.zeros((nlay_swt), np.float)
zc = -swt_dz / 2.0
for klay in range(0, nlay_swt):
swt_tb[klay] = zc
zc -= swt_dz
X, Z = np.meshgrid(x, swt_tb)
# Make figure
fwid, fhgt = 6.5, 6.5
flft, frgt, fbot, ftop = 0.125, 0.95, 0.125, 0.925
eps = 1.0e-3
lc = ['r', 'c', 'g', 'b', 'k']
cfig = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H']
inc = 1.0e-3
xsf = plt.figure(figsize=(fwid, fhgt), facecolor='w')
xsf.subplots_adjust(wspace=0.25, hspace=0.25, left=flft, right=frgt,
bottom=fbot, top=ftop)
# withdrawal and recovery titles
ax = xsf.add_subplot(4, 2, 1)
ax.text(0.0, 1.03, 'Withdrawal', transform=ax.transAxes, va='bottom',
ha='left', size='8')
ax = xsf.add_subplot(4, 2, 2)
ax.text(0.0, 1.03, 'Recovery', transform=ax.transAxes, va='bottom', ha='left',
size='8')
# dummy items for legend
ax = xsf.add_subplot(4, 2, 1)
ax.plot([-1, -1], [-1, -1], 'bo', markersize=3, markeredgecolor='blue',
markerfacecolor='None', label='SWI2 interface')
ax.plot([-1, -1], [-1, -1], color='k', linewidth=0.75, linestyle='solid',
label='SEAWAT 50% seawater')
ax.plot([-1, -1], [-1, -1], marker='s', color='k', linewidth=0,
linestyle='none', markeredgecolor='w',
markerfacecolor='0.75', label='SEAWAT 5-95% seawater')
leg = ax.legend(loc='upper left', numpoints=1, ncol=1, labelspacing=0.5,
borderaxespad=1, handlelength=3)
leg._drawFrame = False
# data items
for itime in range(0, nswi_times):
zb = np.zeros((ncol), np.float)
zs = np.zeros((ncol), np.float)
for icol in range(0, ncol):
for klay in range(0, nlay):
# top and bottom of layer
ztop = float('{0:10.3e}'.format(zall[klay]))
zbot = float('{0:10.3e}'.format(zall[klay + 1]))
# fresh-salt zeta surface
zt = zeta[itime, klay, 0, icol]
if (ztop - zt) > eps:
zs[icol] = zt
if itime < ndecay:
ic = itime
isp = ic * 2 + 1
ax = xsf.add_subplot(4, 2, isp)
else:
ic = itime - ndecay
isp = (ic * 2) + 2
ax = xsf.add_subplot(4, 2, isp)
# figure title
ax.text(-0.15, 1.025, cfig[itime], transform=ax.transAxes, va='center',
ha='center', size='8')
# swi2
ax.plot(x, zs, 'bo', markersize=3, markeredgecolor='blue',
markerfacecolor='None', label='_None')
# seawat
sc = ax.contour(X, Z, conc[itime, :, :], levels=[17.5], colors='k',
linestyles='solid', linewidths=0.75, zorder=30)
cc = ax.contourf(X, Z, conc[itime, :, :], levels=[0.0, 1.75, 33.250],
colors=['w', '0.75', 'w'])
# set graph limits
ax.set_xlim(0, 500)
ax.set_ylim(-100, -65)
if itime < ndecay:
ax.set_ylabel('Elevation, in meters')
# x labels
ax = xsf.add_subplot(4, 2, 7)
ax.set_xlabel('Horizontal distance, in meters')
ax = xsf.add_subplot(4, 2, 8)
ax.set_xlabel('Horizontal distance, in meters')
# simulation time titles
for itime in range(0, nswi_times):
if itime < ndecay:
ic = itime
isp = ic * 2 + 1
ax = xsf.add_subplot(4, 2, isp)
else:
ic = itime - ndecay
isp = (ic * 2) + 2
ax = xsf.add_subplot(4, 2, isp)
iyr = itime + 1
if iyr > 1:
ctxt = '{} years'.format(iyr)
else:
ctxt = '{} year'.format(iyr)
ax.text(0.95, 0.925, ctxt, transform=ax.transAxes, va='top', ha='right',
size='8')
outfig = os.path.join(workspace, 'Figure11_swi2ex5.{0}'.format(fext))
xsf.savefig(outfig, dpi=300)
print('created...', outfig)
| bdestombe/flopy-1 | examples/scripts/flopy_swi2_ex5.py | Python | bsd-3-clause | 15,804 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
import roslib; roslib.load_manifest('zaber_stage')
import rospy
import actionlib
from geometry_msgs.msg import Pose
from zaber_stage.msg import PoseAndDebugInfo
from zaber_stage.srv import GetPoseAndDebugInfo,GetPoseAndDebugInfoResponse
def pose_publisher():
rospy.init_node('zaber_stage_pose_and_debug_publisher')
rospy.loginfo('zaber_stage pose_and_debug_publisher...')
rate = rospy.Rate(4)
pub_pose = rospy.Publisher('/zaber_stage_node/pose',Pose,queue_size=10)
pub_pose_and_debug = rospy.Publisher('/zaber_stage_node/pose_and_debug_info',PoseAndDebugInfo,queue_size=10)
rospy.wait_for_service('/zaber_stage_node/get_pose_and_debug_info')
get_pose_and_debug_info = rospy.ServiceProxy('/zaber_stage_node/get_pose_and_debug_info',GetPoseAndDebugInfo,persistent=True)
while not rospy.is_shutdown():
try:
response = get_pose_and_debug_info()
if not response.pose_and_debug_info.zaber_response_error:
pub_pose.publish(response.pose_and_debug_info.pose)
pub_pose_and_debug.publish(response.pose_and_debug_info)
except rospy.ServiceException, e:
rospy.logwarn('zaber_stage pose_and_debug_publisher service call failed! %s'%e)
print "Service call failed: %s"%e
rate.sleep()
if __name__ == '__main__':
try:
pose_publisher()
except rospy.ROSInterruptException:
pass
| janelia-ros/zaber_stage_ros | nodes/pose_and_debug_publisher.py | Python | bsd-3-clause | 1,472 |
from django.http import QueryDict
from django_facebook import settings as facebook_settings
def generate_oauth_url(scope=facebook_settings.FACEBOOK_DEFAULT_SCOPE,
next=None, extra_data=None):
query_dict = QueryDict('', True)
canvas_page = (next if next is not None else
facebook_settings.FACEBOOK_CANVAS_PAGE)
query_dict.update(dict(client_id=facebook_settings.FACEBOOK_APP_ID,
redirect_uri=canvas_page,
scope=','.join(scope)))
if extra_data:
query_dict.update(extra_data)
auth_url = 'https://www.facebook.com/dialog/oauth?%s' % (
query_dict.urlencode(), )
return auth_url
| fogcitymarathoner/djfb | facebook_example/django_facebook/canvas.py | Python | bsd-3-clause | 728 |
# -*- coding: utf-8 *-*
from pyglet.window import key
from pyglet import clock
from . import util, physicalobject
from . import resources
class Ship(physicalobject.PhysicalObject):
"""A class for the player"""
def __init__(self, thrust_image=None, *args, **kwargs):
super().__init__(*args, **kwargs)
# Set some easy-to-tweak constants
# play values
self.rotate_speed = 170.0
self.bullet_speed = 500.0
self.thrust_acc = 500
self.friction = 0.95
self.bullet_duration = 0.6
self.thrust = False
self.thrust_image = thrust_image
self.normal_image = self.image
self.bullets = set() # FIXME: bullet by OOT
def on_key_press(self, symbol, modifiers):
if symbol == key.SPACE:
self.shoot()
elif symbol == key.LEFT:
self.turn(-1)
elif symbol == key.RIGHT:
self.turn(1)
elif symbol == key.UP:
self.set_thrust(True)
def on_key_release(self, symbol, modifiers):
if symbol in (key.LEFT, key.RIGHT):
self.turn(0)
elif symbol == key.UP:
self.set_thrust(False)
def update(self, dt):
super().update(dt)
if self.thrust and self.thrust_image:
self.image = self.thrust_image
else:
self.image = self.normal_image
# update velocity
if self.thrust:
acc = util.angle_to_vector(self.rotation)
for i in (0,1):
self.vel[i] += acc[i] * self.thrust_acc * dt
# add friction
for i in (0,1):
self.vel[i] *= (1 - self.friction * dt)
for bullet in set(self.bullets):
if bullet.update(dt):
self.bullets.remove(bullet)
return False
def set_thrust(self, on):
self.thrust = on
if on:
resources.thrust_sound.seek(0)
resources.thrust_sound.play()
else:
resources.thrust_sound.pause()
def turn(self, clockwise):
self.rotation_speed = clockwise * self.rotate_speed
def shoot(self):
resources.bullet_sound.play()
forward = util.angle_to_vector(self.rotation)
bullet_pos = [self.x + self.radius * forward[0], self.y + self.radius * forward[1]]
bullet_vel = [self.vel[0] + self.bullet_speed * forward[0], self.vel[1] + self.bullet_speed * forward[1]]
bullet = physicalobject.PhysicalObject(lifespan=self.bullet_duration, vel=bullet_vel, x=bullet_pos[0], y=bullet_pos[1],
img=resources.shot_image, batch=self.batch, group=self.group, screensize=self.screensize)
self.bullets.add(bullet)
def destroy(self):
# check invulnerability
if self.opacity != 255:
return
explosion = super().destroy()
self.rotation = -90
self.x = self.screensize[0] / 2
self.y = self.screensize[1] / 2
self.vel = [0, 0]
self.set_thrust(False)
self.visible = True
return explosion
def normal_mode(self, dt):
self.opacity = 255
def invulnerable(self, time):
# be invulnerable for a brief time
self.opacity = 128
clock.schedule_once(self.normal_mode, time)
| mammadori/asteroids | game/ship.py | Python | bsd-3-clause | 3,293 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from acl.models import Role
from communities.models import Community, CommunityGroup, Committee, CommunityGroupRole
from django.db import models, migrations
def create_default_groups(apps, schema_editor):
Membership = apps.get_model("users", "Membership")
communities = Community.objects.all()
committees = Committee.objects.all()
for c in communities:
# Creating groups to existing communities, similar to what they have before.
CommunityGroup.objects.bulk_create([
CommunityGroup(community=c, title='chairman', _order=0),
CommunityGroup(community=c, title='board', _order=1),
CommunityGroup(community=c, title='member', _order=2),
CommunityGroup(community=c, title='administrator', _order=3)
])
# Creating roles for existing communities, similar to what they have before.
Role.objects.bulk_create([
Role(community=c, title='chairman', based_on='manager'),
Role(community=c, title='board', based_on='participant'),
Role(community=c, title='member', based_on='observer')
])
for c in committees:
# Creating community group roles.
CommunityGroupRole.objects.bulk_create([
CommunityGroupRole(committee=c, role=c.community.roles.get(title="chairman"),
group=c.community.groups.get(title="chairman")),
CommunityGroupRole(committee=c, role=c.community.roles.get(title="board"),
group=c.community.groups.get(title="board")),
CommunityGroupRole(committee=c, role=c.community.roles.get(title="member"),
group=c.community.groups.get(title="member"))
])
class Migration(migrations.Migration):
dependencies = [
('users', '0002_auto_20150607_1350'),
]
operations = [
migrations.RunPython(create_default_groups),
]
| nonZero/OpenCommunity | src/users/migrations/0003_auto_20150607_1606.py | Python | bsd-3-clause | 2,009 |
# -*- coding: utf-8 -*-
"""
wakatime.offlinequeue
~~~~~~~~~~~~~~~~~~~~~
Queue for saving heartbeats while offline.
:copyright: (c) 2014 Alan Hamlett.
:license: BSD, see LICENSE for more details.
"""
import logging
import os
from time import sleep
from .compat import json
from .constants import DEFAULT_SYNC_OFFLINE_ACTIVITY, HEARTBEATS_PER_REQUEST
from .heartbeat import Heartbeat
try:
import sqlite3
HAS_SQL = True
except ImportError: # pragma: nocover
HAS_SQL = False
log = logging.getLogger('WakaTime')
class Queue(object):
db_file = '.wakatime.db'
table_name = 'heartbeat_2'
args = None
configs = None
def __init__(self, args, configs):
self.args = args
self.configs = configs
def connect(self):
conn = sqlite3.connect(self._get_db_file(), isolation_level=None)
c = conn.cursor()
c.execute('''CREATE TABLE IF NOT EXISTS {0} (
id text,
heartbeat text)
'''.format(self.table_name))
return (conn, c)
def push(self, heartbeat):
if not HAS_SQL:
return
try:
conn, c = self.connect()
data = {
'id': heartbeat.get_id(),
'heartbeat': heartbeat.json(),
}
c.execute('INSERT INTO {0} VALUES (:id,:heartbeat)'.format(self.table_name), data)
conn.commit()
conn.close()
except sqlite3.Error:
log.traceback()
def pop(self):
if not HAS_SQL:
return None
tries = 3
wait = 0.1
try:
conn, c = self.connect()
except sqlite3.Error:
log.traceback(logging.DEBUG)
return None
heartbeat = None
loop = True
while loop and tries > -1:
try:
c.execute('BEGIN IMMEDIATE')
c.execute('SELECT * FROM {0} LIMIT 1'.format(self.table_name))
row = c.fetchone()
if row is not None:
id = row[0]
heartbeat = Heartbeat(json.loads(row[1]), self.args, self.configs, _clone=True)
c.execute('DELETE FROM {0} WHERE id=?'.format(self.table_name), [id])
conn.commit()
loop = False
except sqlite3.Error:
log.traceback(logging.DEBUG)
sleep(wait)
tries -= 1
try:
conn.close()
except sqlite3.Error:
log.traceback(logging.DEBUG)
return heartbeat
def push_many(self, heartbeats):
for heartbeat in heartbeats:
self.push(heartbeat)
def pop_many(self, limit=None):
if limit is None:
limit = DEFAULT_SYNC_OFFLINE_ACTIVITY
heartbeats = []
count = 0
while count < limit:
heartbeat = self.pop()
if not heartbeat:
break
heartbeats.append(heartbeat)
count += 1
if count % HEARTBEATS_PER_REQUEST == 0:
yield heartbeats
heartbeats = []
if heartbeats:
yield heartbeats
def _get_db_file(self):
home = '~'
if os.environ.get('WAKATIME_HOME'):
home = os.environ.get('WAKATIME_HOME')
return os.path.join(os.path.expanduser(home), '.wakatime.db')
| wakatime/komodo-wakatime | components/wakatime/offlinequeue.py | Python | bsd-3-clause | 3,427 |
import chainer
import chainer.functions as F
import chainer.links as L
class Alex(chainer.Chain):
"""Single-GPU AlexNet without partition toward the channel axis."""
def __init__(self, n_class=1000, threshold=0.5, pt_func=None):
self.threshold = threshold
self.pt_func = pt_func
self.n_class = n_class
super(Alex, self).__init__()
with self.init_scope():
self.conv1 = L.Convolution2D(3, 96, 11, stride=4, pad=4)
self.bn1 = L.BatchNormalization(96)
self.conv2 = L.Convolution2D(96, 256, 5, stride=1, pad=1)
self.bn2 = L.BatchNormalization(256)
self.conv3 = L.Convolution2D(256, 384, 3, stride=1, pad=1)
self.conv4 = L.Convolution2D(384, 384, 3, stride=1, pad=1)
self.conv5 = L.Convolution2D(384, 256, 3, stride=1, pad=1)
self.bn5 = L.BatchNormalization(256)
self.fc6 = L.Linear(33280, 4096)
self.fc7 = L.Linear(4096, 4096)
self.fc8 = L.Linear(4096, 2*n_class)
def __call__(self, x, t=None):
n_batch = len(x)
assert n_batch == len(t)
h = F.relu(self.bn1(self.conv1(x)))
h = F.max_pooling_2d(h, 3, stride=2)
h = F.relu(self.bn2(self.conv2(h)))
h = F.max_pooling_2d(h, 3, stride=2)
h = F.relu(self.conv3(h))
h = F.relu(self.conv4(h))
h = F.relu(self.bn5(self.conv5(h)))
h = F.max_pooling_2d(h, 3, stride=3)
if not self.train_conv:
h.unchain_backward()
h = F.dropout(F.relu(self.fc6(h)), ratio=0.5)
h = F.dropout(F.relu(self.fc7(h)), ratio=0.5)
h = self.fc8(h)
h = h.reshape((-1, 2, self.n_class))
h_prob = F.softmax(h, axis=1)[:, 1, :]
self.h_prob = h_prob
if t is None:
assert not chainer.config.train
return
half_n = self.n_class / 2
is_singlearm_mask = t[:, half_n] == -1
# loss for single arm
h_single = h[is_singlearm_mask][:, :, :half_n]
t_single = t[is_singlearm_mask][:, :half_n]
# Requires: https://github.com/chainer/chainer/pull/3310
if h_single.data.shape[0] > 0:
loss_single = F.softmax_cross_entropy(
h_single, t_single, normalize=False)
else:
loss_single = None
# loss for dual arm
h_dual = h[~is_singlearm_mask][:, :, half_n:]
t_dual = t[~is_singlearm_mask][:, half_n:]
# Requires: https://github.com/chainer/chainer/pull/3310
if h_dual.data.shape[0] > 0:
loss_dual = F.softmax_cross_entropy(
h_dual, t_dual, normalize=False)
else:
loss_dual = None
if loss_single is None:
self.loss = loss_dual
elif loss_dual is None:
self.loss = loss_single
else:
self.loss = loss_single + loss_dual
# calculate acc on CPU
h_prob_single = h_prob[is_singlearm_mask][:, :half_n]
h_prob_single = chainer.cuda.to_cpu(h_prob_single.data)
t_single = chainer.cuda.to_cpu(t_single)
h_prob_dual = h_prob[~is_singlearm_mask][:, half_n:]
h_prob_dual = chainer.cuda.to_cpu(h_prob_dual.data)
t_dual = chainer.cuda.to_cpu(t_dual)
label_single = (h_prob_single > self.threshold).astype(self.xp.int32)
label_dual = (h_prob_dual > self.threshold).astype(self.xp.int32)
acc_single = (t_single == label_single).all(axis=1)
acc_single = acc_single.astype(self.xp.int32).flatten()
acc_dual = (t_dual == label_dual).all(axis=1)
acc_dual = acc_dual.astype(self.xp.int32).flatten()
self.acc = self.xp.sum(acc_single) + self.xp.sum(acc_dual)
self.acc = self.acc / float(len(acc_single) + len(acc_dual))
chainer.reporter.report({
'loss': self.loss,
'acc': self.acc,
}, self)
if chainer.config.train:
return self.loss
| start-jsk/jsk_apc | demos/selective_dualarm_stowing/python/selective_dualarm_stowing/models/alex.py | Python | bsd-3-clause | 4,007 |
"""This test checks for correct fork() behavior.
"""
import _imp as imp
import os
import signal
import sys
import threading
import time
import unittest
from test.fork_wait import ForkWait
from test.support import reap_children, get_attribute, verbose
# Skip test if fork does not exist.
get_attribute(os, 'fork')
class ForkTest(ForkWait):
def wait_impl(self, cpid):
deadline = time.monotonic() + 10.0
while time.monotonic() <= deadline:
# waitpid() shouldn't hang, but some of the buildbots seem to hang
# in the forking tests. This is an attempt to fix the problem.
spid, status = os.waitpid(cpid, os.WNOHANG)
if spid == cpid:
break
time.sleep(0.1)
self.assertEqual(spid, cpid)
self.assertEqual(status, 0, "cause = %d, exit = %d" % (status&0xff, status>>8))
def test_threaded_import_lock_fork(self):
"""Check fork() in main thread works while a subthread is doing an import"""
import_started = threading.Event()
fake_module_name = "fake test module"
partial_module = "partial"
complete_module = "complete"
def importer():
imp.acquire_lock()
sys.modules[fake_module_name] = partial_module
import_started.set()
time.sleep(0.01) # Give the other thread time to try and acquire.
sys.modules[fake_module_name] = complete_module
imp.release_lock()
t = threading.Thread(target=importer)
t.start()
import_started.wait()
pid = os.fork()
try:
# PyOS_BeforeFork should have waited for the import to complete
# before forking, so the child can recreate the import lock
# correctly, but also won't see a partially initialised module
if not pid:
m = __import__(fake_module_name)
if m == complete_module:
os._exit(0)
else:
if verbose > 1:
print("Child encountered partial module")
os._exit(1)
else:
t.join()
# Exitcode 1 means the child got a partial module (bad.) No
# exitcode (but a hang, which manifests as 'got pid 0')
# means the child deadlocked (also bad.)
self.wait_impl(pid)
finally:
try:
os.kill(pid, signal.SIGKILL)
except OSError:
pass
def test_nested_import_lock_fork(self):
"""Check fork() in main thread works while the main thread is doing an import"""
# Issue 9573: this used to trigger RuntimeError in the child process
def fork_with_import_lock(level):
release = 0
in_child = False
try:
try:
for i in range(level):
imp.acquire_lock()
release += 1
pid = os.fork()
in_child = not pid
finally:
for i in range(release):
imp.release_lock()
except RuntimeError:
if in_child:
if verbose > 1:
print("RuntimeError in child")
os._exit(1)
raise
if in_child:
os._exit(0)
self.wait_impl(pid)
# Check this works with various levels of nested
# import in the main thread
for level in range(5):
fork_with_import_lock(level)
def tearDownModule():
reap_children()
if __name__ == "__main__":
unittest.main()
| kikocorreoso/brython | www/src/Lib/test/test_fork1.py | Python | bsd-3-clause | 3,761 |
#!/usr/bin/env python3
# Copyright (c) 2021, the Dart project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
from contextlib import ExitStack
import subprocess
import sys
import utils
def Main():
args = sys.argv[1:]
with ExitStack() as stack:
for ctx in utils.CoreDumpArchiver(args):
stack.enter_context(ctx)
exit_code = subprocess.call(args)
utils.DiagnoseExitCode(exit_code, args)
return exit_code
if __name__ == '__main__':
sys.exit(Main())
| dart-lang/sdk | tools/run_with_coredumps_enabled.py | Python | bsd-3-clause | 627 |
from django.conf import settings
from django.views.generic.dates import ArchiveIndexView, DateDetailView, DayArchiveView, MonthArchiveView, YearArchiveView
from django.views.generic.detail import DetailView
from django.views.generic.list import ListView
from .models import Photo, Gallery
# Number of galleries to display per page.
GALLERY_PAGINATE_BY = getattr(settings, 'PHOTOLOGUE_GALLERY_PAGINATE_BY', 20)
if GALLERY_PAGINATE_BY != 20:
import warnings
warnings.warn(
DeprecationWarning('PHOTOLOGUE_GALLERY_PAGINATE_BY setting will be removed in Photologue 3.0'))
# Number of photos to display per page.
PHOTO_PAGINATE_BY = getattr(settings, 'PHOTOLOGUE_PHOTO_PAGINATE_BY', 20)
if PHOTO_PAGINATE_BY != 20:
import warnings
warnings.warn(
DeprecationWarning('PHOTOLOGUE_PHOTO_PAGINATE_BY setting will be removed in Photologue 3.0'))
# Gallery views.
class GalleryListView(ListView):
queryset = Gallery.objects.filter(is_public=True)
paginate_by = GALLERY_PAGINATE_BY
class GalleryDetailView(DetailView):
queryset = Gallery.objects.filter(is_public=True)
class GalleryDateView(object):
queryset = Gallery.objects.filter(is_public=True)
date_field = 'date_added'
allow_empty = True
class GalleryDateDetailView(GalleryDateView, DateDetailView):
pass
class GalleryArchiveIndexView(GalleryDateView, ArchiveIndexView):
pass
class GalleryDayArchiveView(GalleryDateView, DayArchiveView):
pass
class GalleryMonthArchiveView(GalleryDateView, MonthArchiveView):
pass
class GalleryYearArchiveView(GalleryDateView, YearArchiveView):
pass
# Photo views.
class PhotoListView(ListView):
queryset = Photo.objects.filter(is_public=True)
paginate_by = PHOTO_PAGINATE_BY
class PhotoDetailView(DetailView):
queryset = Photo.objects.filter(is_public=True)
class PhotoDateView(object):
queryset = Photo.objects.filter(is_public=True)
date_field = 'date_added'
allow_empty = True
class PhotoDateDetailView(PhotoDateView, DateDetailView):
pass
class PhotoArchiveIndexView(PhotoDateView, ArchiveIndexView):
pass
class PhotoDayArchiveView(PhotoDateView, DayArchiveView):
pass
class PhotoMonthArchiveView(PhotoDateView, MonthArchiveView):
pass
class PhotoYearArchiveView(PhotoDateView, YearArchiveView):
pass
| MathieuDuponchelle/my_patched_photologue | photologue/views.py | Python | bsd-3-clause | 2,340 |
from weld.grizzly.core.frame import GrizzlyDataFrame
from weld.grizzly.core.series import GrizzlySeries
| weld-project/weld | weld-python/weld/grizzly/__init__.py | Python | bsd-3-clause | 105 |
# Copyright (c) 2010, Mats Kindahl, Charles Bell, and Lars Thalmann
# All rights reserved.
#
# Use of this source code is goverened by a BSD licence that can be
# found in the LICENCE file.
from mysql.replicant.server import Server
from mysql.replicant.common import User
from mysql.replicant.machine import Linux
from mysql.replicant.roles import Master, Final
import time, os.path
class MultiLinux(Linux):
"""Class to handle the case where there are multiple servers
running at the same box, all managed by mysqld_multi."""
def __init__(self, number):
self.__number = number
def stop_server(self, server):
server.ssh(["mysqld_multi", "stop", str(self.__number)])
pidfile = ''.join("/var/run/mysqld", server.name, ".pid")
while os.path.exists(pidfile):
time.sleep(1)
def start_server(self, server):
import time
print "Starting server...",
server.ssh(["mysqld_multi", "start", str(self.__number)])
time.sleep(1) # Need some time for server to start
print "done"
_replicant_user = User("mysql_replicant")
_repl_user = User("repl_user", "xyzzy")
def _cnf(name):
test_dir = os.path.dirname(os.path.abspath(__file__))
return os.path.join(test_dir, '..', name + ".cnf")
master = Server(server_id=1, name="mysqld1",
sql_user=_replicant_user,
ssh_user=User("mysql"),
machine=Linux(), role=Master(_repl_user),
port=3307,
socket='/var/run/mysqld/mysqld1.sock',
defaults_file=_cnf("mysqld1"),
config_section="mysqld1")
slaves = [Server(server_id=2, name="mysqld2",
sql_user=_replicant_user,
ssh_user=User("mysql"),
machine=Linux(), role=Final(master),
port=3308,
socket='/var/run/mysqld/mysqld2.sock',
defaults_file=_cnf("mysqld2"),
config_section="mysqld2"),
Server(server_id=3, name="mysqld3",
sql_user=_replicant_user,
ssh_user=User("mysql"),
machine=Linux(), role=Final(master),
port=3309,
socket='/var/run/mysqld/mysqld3.sock',
defaults_file=_cnf("mysqld3"),
config_section="mysqld3"),
Server(server_id=4, name="mysqld4",
sql_user=_replicant_user,
ssh_user=User("mysql"),
machine=Linux(), role=Final(master),
port=3310,
socket='/var/run/mysqld/mysqld4.sock',
defaults_file=_cnf("mysqld4"),
config_section="mysqld4")]
servers = [master] + slaves
| mrramazani/mysql-replicant-python-1 | lib/tests/deployment/simple.py | Python | bsd-3-clause | 2,759 |
"""
Plugin: Slideshow
*****************
This plugin allows you to put a slideshow on a page, automatically
displaying the selected image files with customizable transitions and
intervals.
Installation
============
To use this plugin, put ``media_tree.contrib.cms_plugins.media_tree_slideshow``
in your installed apps, and run ``manage.py syncdb``.
Template
========
Override the template ``cms/plugins/media_tree_slideshow.html`` if you want to
customize the output. Please take a look at the default template for more
information.
By default, images are rendered to the output using the template
``media_tree/filenode/includes/figure.html``, which includes captions.
.. Note::
The default template requires you to include `jQuery <http://jquery.com/>`_
in your pages, since it uses the `jQuery Cycle Plugin
<http://jquery.malsup.com/cycle/>`_ (bundled) for image transitions.
""" | bittner/django-media-tree | media_tree/contrib/cms_plugins/media_tree_slideshow/__init__.py | Python | bsd-3-clause | 903 |
__author__ = 'frank'
import json
from BaseObject import BaseObject
from Requestor import process_request
class WalletName(BaseObject):
"""
Wallet Name object
:param domain_name: Domain name that Wallet Name should be associated with.
:param name: Unique name for this Wallet Name prefixed to your domain_name. e.g. name.domain_name
:param external_id: Unique identifier of your choice to identify your user's Wallet Name.
:param id: Unique Netki identifier for this Wallet Name.
"""
def __init__(self, domain_name, name, external_id, id=None):
super(WalletName, self).__init__()
self.domain_name = domain_name
self.name = name
self.external_id = external_id
self.id = id
self.wallets = {}
def get_used_currencies(self):
"""
Returns wallets dictionary containing currencies and wallet addresses.
``wallets['currency']: 'wallet_address'``
"""
return self.wallets
def get_wallet_address(self, currency):
""" Returns the wallet address for a provided currency. """
return self.wallets[currency]
def set_currency_address(self, currency, wallet_address):
"""
Create or update a currency and wallet address.
:param currency: Three or Four letter currency identifier per Netki API documentation. ``btc, ltc, oap``
:param wallet_address: wallet address for provided currency
"""
self.wallets[currency] = wallet_address
def remove_currency_address(self, currency):
""" Remove a currency including the associated wallet address. """
if self.wallets[currency]:
del self.wallets[currency]
def save(self):
"""
Commit changes to a WalletName object by submitting them to the API. For new Wallet Names, an id will
automatically be generated by the server. Run Netki.create_wallet_name() to create a new WalletName object,
then run save() on your WalletName object to submit it to the API. To update a Wallet Name, run
Netki.get_wallet_names() to retrieve the Wallet Name object, make your updates, then run save() on the
WalletName object to commit changes to the API.
"""
wallet_data = []
for k in self.wallets.keys():
wallet_data.append({
'currency': k,
'wallet_address': self.wallets[k]
})
wallet_name_data = {
'domain_name': self.domain_name,
'name': self.name,
'wallets': wallet_data,
'external_id': self.external_id
}
wn_api_data = {'wallet_names': [wallet_name_data]}
# If an ID is present it exists in Netki's systems, therefore submit an update
if self.id:
wallet_name_data['id'] = self.id
response = process_request(
self.netki_client,
'/v1/partner/walletname',
'PUT',
wn_api_data
)
else:
response = process_request(
self.netki_client,
'/v1/partner/walletname',
'POST',
wn_api_data
)
for wn in response.wallet_names:
if wn.domain_name == self.domain_name and wn.name == self.name:
self.id = wn.id
def delete(self):
"""
To delete a WalletName object, first run Netki.get_wallet_names() to retrieve the Wallet Name from the API,
then run delete() on the WalletName object to delete it from Netki systems.
"""
if not self.id:
raise Exception('Unable to Delete Object that Does Not Exist Remotely')
wn_api_data = {
'wallet_names': [
{
'domain_name': self.domain_name,
'id': self.id
}
]
}
process_request(
self.netki_client,
'/v1/partner/walletname',
'DELETE',
wn_api_data
)
| netkicorp/python-partner-client | netki/WalletName.py | Python | bsd-3-clause | 4,099 |
'''
Hom family of models based on: [Drukker2013]_
Following: [Anselin2011]_
'''
__author__ = "Luc Anselin luc.anselin@asu.edu, Daniel Arribas-Bel darribas@asu.edu"
from scipy import sparse as SP
import numpy as np
from numpy import linalg as la
import ols as OLS
from pysal import lag_spatial
from utils import power_expansion, set_endog, iter_msg, sp_att
from utils import get_A1_hom, get_A2_hom, get_A1_het, optim_moments
from utils import get_spFilter, get_lags, _moments2eqs
from utils import spdot, RegressionPropsY, set_warn
import twosls as TSLS
import user_output as USER
import summary_output as SUMMARY
__all__ = ["GM_Error_Hom", "GM_Endog_Error_Hom", "GM_Combo_Hom"]
class BaseGM_Error_Hom(RegressionPropsY):
'''
GMM method for a spatial error model with homoskedasticity (note: no
consistency checks, diagnostics or constant added); based on
Drukker et al. (2013) [Drukker2013]_, following Anselin (2011) [Anselin2011]_.
Parameters
----------
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, excluding the constant
w : Sparse matrix
Spatial weights sparse matrix
max_iter : int
Maximum number of iterations of steps 2a and 2b from Arraiz
et al. Note: epsilon provides an additional stop condition.
epsilon : float
Minimum change in lambda required to stop iterations of
steps 2a and 2b from Arraiz et al. Note: max_iter provides
an additional stop condition.
A1 : string
If A1='het', then the matrix A1 is defined as in Arraiz et
al. If A1='hom', then as in Anselin (2011) (default). If
A1='hom_sc' (default), then as in Drukker, Egger and Prucha (2010)
and Drukker, Prucha and Raciborski (2010).
Attributes
----------
betas : array
kx1 array of estimated coefficients
u : array
nx1 array of residuals
e_filtered : array
nx1 array of spatially filtered residuals
predy : array
nx1 array of predicted y values
n : integer
Number of observations
k : integer
Number of variables for which coefficients are estimated
(including the constant)
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, including the constant
iter_stop : string
Stop criterion reached during iteration of steps 2a and 2b
from Arraiz et al.
iteration : integer
Number of iterations of steps 2a and 2b from Arraiz et al.
mean_y : float
Mean of dependent variable
std_y : float
Standard deviation of dependent variable
vm : array
Variance covariance matrix (kxk)
sig2 : float
Sigma squared used in computations
xtx : float
X'X
Examples
--------
>>> import numpy as np
>>> import pysal
>>> db = pysal.open(pysal.examples.get_path('columbus.dbf'),'r')
>>> y = np.array(db.by_col("HOVAL"))
>>> y = np.reshape(y, (49,1))
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X.append(db.by_col("CRIME"))
>>> X = np.array(X).T
>>> X = np.hstack((np.ones(y.shape),X))
>>> w = pysal.rook_from_shapefile(pysal.examples.get_path("columbus.shp"))
>>> w.transform = 'r'
Model commands
>>> reg = BaseGM_Error_Hom(y, X, w=w.sparse, A1='hom_sc')
>>> print np.around(np.hstack((reg.betas,np.sqrt(reg.vm.diagonal()).reshape(4,1))),4)
[[ 47.9479 12.3021]
[ 0.7063 0.4967]
[ -0.556 0.179 ]
[ 0.4129 0.1835]]
>>> print np.around(reg.vm, 4) #doctest: +SKIP
[[ 1.51340700e+02 -5.29060000e+00 -1.85650000e+00 -2.40000000e-03]
[ -5.29060000e+00 2.46700000e-01 5.14000000e-02 3.00000000e-04]
[ -1.85650000e+00 5.14000000e-02 3.21000000e-02 -1.00000000e-04]
[ -2.40000000e-03 3.00000000e-04 -1.00000000e-04 3.37000000e-02]]
'''
def __init__(self, y, x, w,
max_iter=1, epsilon=0.00001, A1='hom_sc'):
if A1 == 'hom':
wA1 = get_A1_hom(w)
elif A1 == 'hom_sc':
wA1 = get_A1_hom(w, scalarKP=True)
elif A1 == 'het':
wA1 = get_A1_het(w)
wA2 = get_A2_hom(w)
# 1a. OLS --> \tilde{\delta}
ols = OLS.BaseOLS(y=y, x=x)
self.x, self.y, self.n, self.k, self.xtx = ols.x, ols.y, ols.n, ols.k, ols.xtx
# 1b. GM --> \tilde{\rho}
moments = moments_hom(w, wA1, wA2, ols.u)
lambda1 = optim_moments(moments)
lambda_old = lambda1
self.iteration, eps = 0, 1
while self.iteration < max_iter and eps > epsilon:
# 2a. SWLS --> \hat{\delta}
x_s = get_spFilter(w, lambda_old, self.x)
y_s = get_spFilter(w, lambda_old, self.y)
ols_s = OLS.BaseOLS(y=y_s, x=x_s)
self.predy = spdot(self.x, ols_s.betas)
self.u = self.y - self.predy
# 2b. GM 2nd iteration --> \hat{\rho}
moments = moments_hom(w, wA1, wA2, self.u)
psi = get_vc_hom(w, wA1, wA2, self, lambda_old)[0]
lambda2 = optim_moments(moments, psi)
eps = abs(lambda2 - lambda_old)
lambda_old = lambda2
self.iteration += 1
self.iter_stop = iter_msg(self.iteration, max_iter)
# Output
self.betas = np.vstack((ols_s.betas, lambda2))
self.vm, self.sig2 = get_omega_hom_ols(
w, wA1, wA2, self, lambda2, moments[0])
self.e_filtered = self.u - lambda2 * w * self.u
self._cache = {}
class GM_Error_Hom(BaseGM_Error_Hom):
'''
GMM method for a spatial error model with homoskedasticity, with results
and diagnostics; based on Drukker et al. (2013) [Drukker2013]_, following Anselin
(2011) [Anselin2011]_.
Parameters
----------
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, excluding the constant
w : pysal W object
Spatial weights object
max_iter : int
Maximum number of iterations of steps 2a and 2b from Arraiz
et al. Note: epsilon provides an additional stop condition.
epsilon : float
Minimum change in lambda required to stop iterations of
steps 2a and 2b from Arraiz et al. Note: max_iter provides
an additional stop condition.
A1 : string
If A1='het', then the matrix A1 is defined as in Arraiz et
al. If A1='hom', then as in Anselin (2011). If
A1='hom_sc' (default), then as in Drukker, Egger and Prucha (2010)
and Drukker, Prucha and Raciborski (2010).
vm : boolean
If True, include variance-covariance matrix in summary
results
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_w : string
Name of weights matrix for use in output
name_ds : string
Name of dataset for use in output
Attributes
----------
summary : string
Summary of regression results and diagnostics (note: use in
conjunction with the print command)
betas : array
kx1 array of estimated coefficients
u : array
nx1 array of residuals
e_filtered : array
nx1 array of spatially filtered residuals
predy : array
nx1 array of predicted y values
n : integer
Number of observations
k : integer
Number of variables for which coefficients are estimated
(including the constant)
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, including the constant
iter_stop : string
Stop criterion reached during iteration of steps 2a and 2b
from Arraiz et al.
iteration : integer
Number of iterations of steps 2a and 2b from Arraiz et al.
mean_y : float
Mean of dependent variable
std_y : float
Standard deviation of dependent variable
pr2 : float
Pseudo R squared (squared correlation between y and ypred)
vm : array
Variance covariance matrix (kxk)
sig2 : float
Sigma squared used in computations
std_err : array
1xk array of standard errors of the betas
z_stat : list of tuples
z statistic; each tuple contains the pair (statistic,
p-value), where each is a float
xtx : float
X'X
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_w : string
Name of weights matrix for use in output
name_ds : string
Name of dataset for use in output
title : string
Name of the regression method used
Examples
--------
We first need to import the needed modules, namely numpy to convert the
data we read into arrays that ``spreg`` understands and ``pysal`` to
perform all the analysis.
>>> import numpy as np
>>> import pysal
Open data on Columbus neighborhood crime (49 areas) using pysal.open().
This is the DBF associated with the Columbus shapefile. Note that
pysal.open() also reads data in CSV format; since the actual class
requires data to be passed in as numpy arrays, the user can read their
data in using any method.
>>> db = pysal.open(pysal.examples.get_path('columbus.dbf'),'r')
Extract the HOVAL column (home values) from the DBF file and make it the
dependent variable for the regression. Note that PySAL requires this to be
an numpy array of shape (n, 1) as opposed to the also common shape of (n, )
that other packages accept.
>>> y = np.array(db.by_col("HOVAL"))
>>> y = np.reshape(y, (49,1))
Extract INC (income) and CRIME (crime) vectors from the DBF to be used as
independent variables in the regression. Note that PySAL requires this to
be an nxj numpy array, where j is the number of independent variables (not
including a constant). By default this class adds a vector of ones to the
independent variables passed in.
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X.append(db.by_col("CRIME"))
>>> X = np.array(X).T
Since we want to run a spatial error model, we need to specify the spatial
weights matrix that includes the spatial configuration of the observations
into the error component of the model. To do that, we can open an already
existing gal file or create a new one. In this case, we will create one
from ``columbus.shp``.
>>> w = pysal.rook_from_shapefile(pysal.examples.get_path("columbus.shp"))
Unless there is a good reason not to do it, the weights have to be
row-standardized so every row of the matrix sums to one. Among other
things, his allows to interpret the spatial lag of a variable as the
average value of the neighboring observations. In PySAL, this can be
easily performed in the following way:
>>> w.transform = 'r'
We are all set with the preliminars, we are good to run the model. In this
case, we will need the variables and the weights matrix. If we want to
have the names of the variables printed in the output summary, we will
have to pass them in as well, although this is optional.
>>> reg = GM_Error_Hom(y, X, w=w, A1='hom_sc', name_y='home value', name_x=['income', 'crime'], name_ds='columbus')
Once we have run the model, we can explore a little bit the output. The
regression object we have created has many attributes so take your time to
discover them. This class offers an error model that assumes
homoskedasticity but that unlike the models from
``pysal.spreg.error_sp``, it allows for inference on the spatial
parameter. This is why you obtain as many coefficient estimates as
standard errors, which you calculate taking the square root of the
diagonal of the variance-covariance matrix of the parameters:
>>> print np.around(np.hstack((reg.betas,np.sqrt(reg.vm.diagonal()).reshape(4,1))),4)
[[ 47.9479 12.3021]
[ 0.7063 0.4967]
[ -0.556 0.179 ]
[ 0.4129 0.1835]]
'''
def __init__(self, y, x, w,
max_iter=1, epsilon=0.00001, A1='hom_sc',
vm=False, name_y=None, name_x=None,
name_w=None, name_ds=None):
n = USER.check_arrays(y, x)
USER.check_y(y, n)
USER.check_weights(w, y, w_required=True)
x_constant = USER.check_constant(x)
BaseGM_Error_Hom.__init__(self, y=y, x=x_constant, w=w.sparse, A1=A1,
max_iter=max_iter, epsilon=epsilon)
self.title = "SPATIALLY WEIGHTED LEAST SQUARES (HOM)"
self.name_ds = USER.set_name_ds(name_ds)
self.name_y = USER.set_name_y(name_y)
self.name_x = USER.set_name_x(name_x, x)
self.name_x.append('lambda')
self.name_w = USER.set_name_w(name_w, w)
SUMMARY.GM_Error_Hom(reg=self, w=w, vm=vm)
class BaseGM_Endog_Error_Hom(RegressionPropsY):
'''
GMM method for a spatial error model with homoskedasticity and
endogenous variables (note: no consistency checks, diagnostics or constant
added); based on Drukker et al. (2013) [Drukker2013]_, following Anselin (2011)
[Anselin2011]_.
Parameters
----------
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, excluding the constant
yend : array
Two dimensional array with n rows and one column for each
endogenous variable
q : array
Two dimensional array with n rows and one column for each
external exogenous variable to use as instruments (note:
this should not contain any variables from x)
w : Sparse matrix
Spatial weights sparse matrix
max_iter : int
Maximum number of iterations of steps 2a and 2b from Arraiz
et al. Note: epsilon provides an additional stop condition.
epsilon : float
Minimum change in lambda required to stop iterations of
steps 2a and 2b from Arraiz et al. Note: max_iter provides
an additional stop condition.
A1 : string
If A1='het', then the matrix A1 is defined as in Arraiz et
al. If A1='hom', then as in Anselin (2011). If
A1='hom_sc' (default), then as in Drukker, Egger and Prucha (2010)
and Drukker, Prucha and Raciborski (2010).
Attributes
----------
betas : array
kx1 array of estimated coefficients
u : array
nx1 array of residuals
e_filtered : array
nx1 array of spatially filtered residuals
predy : array
nx1 array of predicted y values
n : integer
Number of observations
k : integer
Number of variables for which coefficients are estimated
(including the constant)
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, including the constant
yend : array
Two dimensional array with n rows and one column for each
endogenous variable
q : array
Two dimensional array with n rows and one column for each
external exogenous variable used as instruments
z : array
nxk array of variables (combination of x and yend)
h : array
nxl array of instruments (combination of x and q)
iter_stop : string
Stop criterion reached during iteration of steps 2a and 2b
from Arraiz et al.
iteration : integer
Number of iterations of steps 2a and 2b from Arraiz et al.
mean_y : float
Mean of dependent variable
std_y : float
Standard deviation of dependent variable
vm : array
Variance covariance matrix (kxk)
sig2 : float
Sigma squared used in computations
hth : float
H'H
Examples
--------
>>> import numpy as np
>>> import pysal
>>> db = pysal.open(pysal.examples.get_path('columbus.dbf'),'r')
>>> y = np.array(db.by_col("HOVAL"))
>>> y = np.reshape(y, (49,1))
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X = np.array(X).T
>>> X = np.hstack((np.ones(y.shape),X))
>>> yd = []
>>> yd.append(db.by_col("CRIME"))
>>> yd = np.array(yd).T
>>> q = []
>>> q.append(db.by_col("DISCBD"))
>>> q = np.array(q).T
>>> w = pysal.rook_from_shapefile(pysal.examples.get_path("columbus.shp"))
>>> w.transform = 'r'
>>> reg = BaseGM_Endog_Error_Hom(y, X, yd, q, w=w.sparse, A1='hom_sc')
>>> print np.around(np.hstack((reg.betas,np.sqrt(reg.vm.diagonal()).reshape(4,1))),4)
[[ 55.3658 23.496 ]
[ 0.4643 0.7382]
[ -0.669 0.3943]
[ 0.4321 0.1927]]
'''
def __init__(self, y, x, yend, q, w,
max_iter=1, epsilon=0.00001, A1='hom_sc'):
if A1 == 'hom':
wA1 = get_A1_hom(w)
elif A1 == 'hom_sc':
wA1 = get_A1_hom(w, scalarKP=True)
elif A1 == 'het':
wA1 = get_A1_het(w)
wA2 = get_A2_hom(w)
# 1a. S2SLS --> \tilde{\delta}
tsls = TSLS.BaseTSLS(y=y, x=x, yend=yend, q=q)
self.x, self.z, self.h, self.y, self.hth = tsls.x, tsls.z, tsls.h, tsls.y, tsls.hth
self.yend, self.q, self.n, self.k = tsls.yend, tsls.q, tsls.n, tsls.k
# 1b. GM --> \tilde{\rho}
moments = moments_hom(w, wA1, wA2, tsls.u)
lambda1 = optim_moments(moments)
lambda_old = lambda1
self.iteration, eps = 0, 1
while self.iteration < max_iter and eps > epsilon:
# 2a. GS2SLS --> \hat{\delta}
x_s = get_spFilter(w, lambda_old, self.x)
y_s = get_spFilter(w, lambda_old, self.y)
yend_s = get_spFilter(w, lambda_old, self.yend)
tsls_s = TSLS.BaseTSLS(y=y_s, x=x_s, yend=yend_s, h=self.h)
self.predy = spdot(self.z, tsls_s.betas)
self.u = self.y - self.predy
# 2b. GM 2nd iteration --> \hat{\rho}
moments = moments_hom(w, wA1, wA2, self.u)
psi = get_vc_hom(w, wA1, wA2, self, lambda_old, tsls_s.z)[0]
lambda2 = optim_moments(moments, psi)
eps = abs(lambda2 - lambda_old)
lambda_old = lambda2
self.iteration += 1
self.iter_stop = iter_msg(self.iteration, max_iter)
# Output
self.betas = np.vstack((tsls_s.betas, lambda2))
self.vm, self.sig2 = get_omega_hom(
w, wA1, wA2, self, lambda2, moments[0])
self.e_filtered = self.u - lambda2 * w * self.u
self._cache = {}
class GM_Endog_Error_Hom(BaseGM_Endog_Error_Hom):
'''
GMM method for a spatial error model with homoskedasticity and endogenous
variables, with results and diagnostics; based on Drukker et al. (2013)
[Drukker2013]_, following Anselin (2011) [Anselin2011]_.
Parameters
----------
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, excluding the constant
yend : array
Two dimensional array with n rows and one column for each
endogenous variable
q : array
Two dimensional array with n rows and one column for each
external exogenous variable to use as instruments (note:
this should not contain any variables from x)
w : pysal W object
Spatial weights object
max_iter : int
Maximum number of iterations of steps 2a and 2b from Arraiz
et al. Note: epsilon provides an additional stop condition.
epsilon : float
Minimum change in lambda required to stop iterations of
steps 2a and 2b from Arraiz et al. Note: max_iter provides
an additional stop condition.
A1 : string
If A1='het', then the matrix A1 is defined as in Arraiz et
al. If A1='hom', then as in Anselin (2011). If
A1='hom_sc' (default), then as in Drukker, Egger and Prucha (2010)
and Drukker, Prucha and Raciborski (2010).
vm : boolean
If True, include variance-covariance matrix in summary
results
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_yend : list of strings
Names of endogenous variables for use in output
name_q : list of strings
Names of instruments for use in output
name_w : string
Name of weights matrix for use in output
name_ds : string
Name of dataset for use in output
Attributes
----------
summary : string
Summary of regression results and diagnostics (note: use in
conjunction with the print command)
betas : array
kx1 array of estimated coefficients
u : array
nx1 array of residuals
e_filtered : array
nx1 array of spatially filtered residuals
predy : array
nx1 array of predicted y values
n : integer
Number of observations
k : integer
Number of variables for which coefficients are estimated
(including the constant)
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, including the constant
yend : array
Two dimensional array with n rows and one column for each
endogenous variable
q : array
Two dimensional array with n rows and one column for each
external exogenous variable used as instruments
z : array
nxk array of variables (combination of x and yend)
h : array
nxl array of instruments (combination of x and q)
iter_stop : string
Stop criterion reached during iteration of steps 2a and 2b
from Arraiz et al.
iteration : integer
Number of iterations of steps 2a and 2b from Arraiz et al.
mean_y : float
Mean of dependent variable
std_y : float
Standard deviation of dependent variable
vm : array
Variance covariance matrix (kxk)
pr2 : float
Pseudo R squared (squared correlation between y and ypred)
sig2 : float
Sigma squared used in computations
std_err : array
1xk array of standard errors of the betas
z_stat : list of tuples
z statistic; each tuple contains the pair (statistic,
p-value), where each is a float
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_yend : list of strings
Names of endogenous variables for use in output
name_z : list of strings
Names of exogenous and endogenous variables for use in
output
name_q : list of strings
Names of external instruments
name_h : list of strings
Names of all instruments used in ouput
name_w : string
Name of weights matrix for use in output
name_ds : string
Name of dataset for use in output
title : string
Name of the regression method used
hth : float
H'H
Examples
--------
We first need to import the needed modules, namely numpy to convert the
data we read into arrays that ``spreg`` understands and ``pysal`` to
perform all the analysis.
>>> import numpy as np
>>> import pysal
Open data on Columbus neighborhood crime (49 areas) using pysal.open().
This is the DBF associated with the Columbus shapefile. Note that
pysal.open() also reads data in CSV format; since the actual class
requires data to be passed in as numpy arrays, the user can read their
data in using any method.
>>> db = pysal.open(pysal.examples.get_path('columbus.dbf'),'r')
Extract the HOVAL column (home values) from the DBF file and make it the
dependent variable for the regression. Note that PySAL requires this to be
an numpy array of shape (n, 1) as opposed to the also common shape of (n, )
that other packages accept.
>>> y = np.array(db.by_col("HOVAL"))
>>> y = np.reshape(y, (49,1))
Extract INC (income) vector from the DBF to be used as
independent variables in the regression. Note that PySAL requires this to
be an nxj numpy array, where j is the number of independent variables (not
including a constant). By default this class adds a vector of ones to the
independent variables passed in.
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X = np.array(X).T
In this case we consider CRIME (crime rates) is an endogenous regressor.
We tell the model that this is so by passing it in a different parameter
from the exogenous variables (x).
>>> yd = []
>>> yd.append(db.by_col("CRIME"))
>>> yd = np.array(yd).T
Because we have endogenous variables, to obtain a correct estimate of the
model, we need to instrument for CRIME. We use DISCBD (distance to the
CBD) for this and hence put it in the instruments parameter, 'q'.
>>> q = []
>>> q.append(db.by_col("DISCBD"))
>>> q = np.array(q).T
Since we want to run a spatial error model, we need to specify the spatial
weights matrix that includes the spatial configuration of the observations
into the error component of the model. To do that, we can open an already
existing gal file or create a new one. In this case, we will create one
from ``columbus.shp``.
>>> w = pysal.rook_from_shapefile(pysal.examples.get_path("columbus.shp"))
Unless there is a good reason not to do it, the weights have to be
row-standardized so every row of the matrix sums to one. Among other
things, his allows to interpret the spatial lag of a variable as the
average value of the neighboring observations. In PySAL, this can be
easily performed in the following way:
>>> w.transform = 'r'
We are all set with the preliminars, we are good to run the model. In this
case, we will need the variables (exogenous and endogenous), the
instruments and the weights matrix. If we want to
have the names of the variables printed in the output summary, we will
have to pass them in as well, although this is optional.
>>> reg = GM_Endog_Error_Hom(y, X, yd, q, w=w, A1='hom_sc', name_x=['inc'], name_y='hoval', name_yend=['crime'], name_q=['discbd'], name_ds='columbus')
Once we have run the model, we can explore a little bit the output. The
regression object we have created has many attributes so take your time to
discover them. This class offers an error model that assumes
homoskedasticity but that unlike the models from
``pysal.spreg.error_sp``, it allows for inference on the spatial
parameter. Hence, we find the same number of betas as of standard errors,
which we calculate taking the square root of the diagonal of the
variance-covariance matrix:
>>> print reg.name_z
['CONSTANT', 'inc', 'crime', 'lambda']
>>> print np.around(np.hstack((reg.betas,np.sqrt(reg.vm.diagonal()).reshape(4,1))),4)
[[ 55.3658 23.496 ]
[ 0.4643 0.7382]
[ -0.669 0.3943]
[ 0.4321 0.1927]]
'''
def __init__(self, y, x, yend, q, w,
max_iter=1, epsilon=0.00001, A1='hom_sc',
vm=False, name_y=None, name_x=None,
name_yend=None, name_q=None,
name_w=None, name_ds=None):
n = USER.check_arrays(y, x, yend, q)
USER.check_y(y, n)
USER.check_weights(w, y, w_required=True)
x_constant = USER.check_constant(x)
BaseGM_Endog_Error_Hom.__init__(
self, y=y, x=x_constant, w=w.sparse, yend=yend, q=q,
A1=A1, max_iter=max_iter, epsilon=epsilon)
self.title = "SPATIALLY WEIGHTED TWO STAGE LEAST SQUARES (HOM)"
self.name_ds = USER.set_name_ds(name_ds)
self.name_y = USER.set_name_y(name_y)
self.name_x = USER.set_name_x(name_x, x)
self.name_yend = USER.set_name_yend(name_yend, yend)
self.name_z = self.name_x + self.name_yend
self.name_z.append('lambda') # listing lambda last
self.name_q = USER.set_name_q(name_q, q)
self.name_h = USER.set_name_h(self.name_x, self.name_q)
self.name_w = USER.set_name_w(name_w, w)
SUMMARY.GM_Endog_Error_Hom(reg=self, w=w, vm=vm)
class BaseGM_Combo_Hom(BaseGM_Endog_Error_Hom):
'''
GMM method for a spatial lag and error model with homoskedasticity and
endogenous variables (note: no consistency checks, diagnostics or constant
added); based on Drukker et al. (2013) [Drukker2013]_, following Anselin (2011)
[Anselin2011]_.
Parameters
----------
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, excluding the constant
yend : array
Two dimensional array with n rows and one column for each
endogenous variable
q : array
Two dimensional array with n rows and one column for each
external exogenous variable to use as instruments (note:
this should not contain any variables from x)
w : Sparse matrix
Spatial weights sparse matrix
w_lags : integer
Orders of W to include as instruments for the spatially
lagged dependent variable. For example, w_lags=1, then
instruments are WX; if w_lags=2, then WX, WWX; and so on.
lag_q : boolean
If True, then include spatial lags of the additional
instruments (q).
max_iter : int
Maximum number of iterations of steps 2a and 2b from Arraiz
et al. Note: epsilon provides an additional stop condition.
epsilon : float
Minimum change in lambda required to stop iterations of
steps 2a and 2b from Arraiz et al. Note: max_iter provides
an additional stop condition.
A1 : string
If A1='het', then the matrix A1 is defined as in Arraiz et
al. If A1='hom', then as in Anselin (2011). If
A1='hom_sc' (default), then as in Drukker, Egger and Prucha (2010)
and Drukker, Prucha and Raciborski (2010).
Attributes
----------
betas : array
kx1 array of estimated coefficients
u : array
nx1 array of residuals
e_filtered : array
nx1 array of spatially filtered residuals
predy : array
nx1 array of predicted y values
n : integer
Number of observations
k : integer
Number of variables for which coefficients are estimated
(including the constant)
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, including the constant
yend : array
Two dimensional array with n rows and one column for each
endogenous variable
q : array
Two dimensional array with n rows and one column for each
external exogenous variable used as instruments
z : array
nxk array of variables (combination of x and yend)
h : array
nxl array of instruments (combination of x and q)
iter_stop : string
Stop criterion reached during iteration of steps 2a and 2b
from Arraiz et al.
iteration : integer
Number of iterations of steps 2a and 2b from Arraiz et al.
mean_y : float
Mean of dependent variable
std_y : float
Standard deviation of dependent variable
vm : array
Variance covariance matrix (kxk)
sig2 : float
Sigma squared used in computations
hth : float
H'H
Examples
--------
>>> import numpy as np
>>> import pysal
>>> db = pysal.open(pysal.examples.get_path('columbus.dbf'),'r')
>>> y = np.array(db.by_col("HOVAL"))
>>> y = np.reshape(y, (49,1))
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X = np.array(X).T
>>> w = pysal.rook_from_shapefile(pysal.examples.get_path("columbus.shp"))
>>> w.transform = 'r'
>>> w_lags = 1
>>> yd2, q2 = pysal.spreg.utils.set_endog(y, X, w, None, None, w_lags, True)
>>> X = np.hstack((np.ones(y.shape),X))
Example only with spatial lag
>>> reg = BaseGM_Combo_Hom(y, X, yend=yd2, q=q2, w=w.sparse, A1='hom_sc')
>>> print np.around(np.hstack((reg.betas,np.sqrt(reg.vm.diagonal()).reshape(4,1))),4)
[[ 10.1254 15.2871]
[ 1.5683 0.4407]
[ 0.1513 0.4048]
[ 0.2103 0.4226]]
Example with both spatial lag and other endogenous variables
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X = np.array(X).T
>>> yd = []
>>> yd.append(db.by_col("CRIME"))
>>> yd = np.array(yd).T
>>> q = []
>>> q.append(db.by_col("DISCBD"))
>>> q = np.array(q).T
>>> yd2, q2 = pysal.spreg.utils.set_endog(y, X, w, yd, q, w_lags, True)
>>> X = np.hstack((np.ones(y.shape),X))
>>> reg = BaseGM_Combo_Hom(y, X, yd2, q2, w=w.sparse, A1='hom_sc')
>>> betas = np.array([['CONSTANT'],['inc'],['crime'],['W_hoval'],['lambda']])
>>> print np.hstack((betas, np.around(np.hstack((reg.betas, np.sqrt(reg.vm.diagonal()).reshape(5,1))),5)))
[['CONSTANT' '111.7705' '67.75191']
['inc' '-0.30974' '1.16656']
['crime' '-1.36043' '0.6841']
['W_hoval' '-0.52908' '0.84428']
['lambda' '0.60116' '0.18605']]
'''
def __init__(self, y, x, yend=None, q=None,
w=None, w_lags=1, lag_q=True,
max_iter=1, epsilon=0.00001, A1='hom_sc'):
BaseGM_Endog_Error_Hom.__init__(
self, y=y, x=x, w=w, yend=yend, q=q, A1=A1,
max_iter=max_iter, epsilon=epsilon)
class GM_Combo_Hom(BaseGM_Combo_Hom):
'''
GMM method for a spatial lag and error model with homoskedasticity and
endogenous variables, with results and diagnostics; based on Drukker et
al. (2013) [Drukker2013]_, following Anselin (2011) [Anselin2011]_.
Parameters
----------
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, excluding the constant
yend : array
Two dimensional array with n rows and one column for each
endogenous variable
q : array
Two dimensional array with n rows and one column for each
external exogenous variable to use as instruments (note:
this should not contain any variables from x)
w : pysal W object
Spatial weights object (always necessary)
w_lags : integer
Orders of W to include as instruments for the spatially
lagged dependent variable. For example, w_lags=1, then
instruments are WX; if w_lags=2, then WX, WWX; and so on.
lag_q : boolean
If True, then include spatial lags of the additional
instruments (q).
max_iter : int
Maximum number of iterations of steps 2a and 2b from Arraiz
et al. Note: epsilon provides an additional stop condition.
epsilon : float
Minimum change in lambda required to stop iterations of
steps 2a and 2b from Arraiz et al. Note: max_iter provides
an additional stop condition.
A1 : string
If A1='het', then the matrix A1 is defined as in Arraiz et
al. If A1='hom', then as in Anselin (2011). If
A1='hom_sc' (default), then as in Drukker, Egger and Prucha (2010)
and Drukker, Prucha and Raciborski (2010).
vm : boolean
If True, include variance-covariance matrix in summary
results
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_yend : list of strings
Names of endogenous variables for use in output
name_q : list of strings
Names of instruments for use in output
name_w : string
Name of weights matrix for use in output
name_ds : string
Name of dataset for use in output
Attributes
----------
summary : string
Summary of regression results and diagnostics (note: use in
conjunction with the print command)
betas : array
kx1 array of estimated coefficients
u : array
nx1 array of residuals
e_filtered : array
nx1 array of spatially filtered residuals
e_pred : array
nx1 array of residuals (using reduced form)
predy : array
nx1 array of predicted y values
predy_e : array
nx1 array of predicted y values (using reduced form)
n : integer
Number of observations
k : integer
Number of variables for which coefficients are estimated
(including the constant)
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, including the constant
yend : array
Two dimensional array with n rows and one column for each
endogenous variable
q : array
Two dimensional array with n rows and one column for each
external exogenous variable used as instruments
z : array
nxk array of variables (combination of x and yend)
h : array
nxl array of instruments (combination of x and q)
iter_stop : string
Stop criterion reached during iteration of steps 2a and 2b
from Arraiz et al.
iteration : integer
Number of iterations of steps 2a and 2b from Arraiz et al.
mean_y : float
Mean of dependent variable
std_y : float
Standard deviation of dependent variable
vm : array
Variance covariance matrix (kxk)
pr2 : float
Pseudo R squared (squared correlation between y and ypred)
pr2_e : float
Pseudo R squared (squared correlation between y and ypred_e
(using reduced form))
sig2 : float
Sigma squared used in computations (based on filtered
residuals)
std_err : array
1xk array of standard errors of the betas
z_stat : list of tuples
z statistic; each tuple contains the pair (statistic,
p-value), where each is a float
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_yend : list of strings
Names of endogenous variables for use in output
name_z : list of strings
Names of exogenous and endogenous variables for use in
output
name_q : list of strings
Names of external instruments
name_h : list of strings
Names of all instruments used in ouput
name_w : string
Name of weights matrix for use in output
name_ds : string
Name of dataset for use in output
title : string
Name of the regression method used
hth : float
H'H
Examples
--------
We first need to import the needed modules, namely numpy to convert the
data we read into arrays that ``spreg`` understands and ``pysal`` to
perform all the analysis.
>>> import numpy as np
>>> import pysal
Open data on Columbus neighborhood crime (49 areas) using pysal.open().
This is the DBF associated with the Columbus shapefile. Note that
pysal.open() also reads data in CSV format; since the actual class
requires data to be passed in as numpy arrays, the user can read their
data in using any method.
>>> db = pysal.open(pysal.examples.get_path('columbus.dbf'),'r')
Extract the HOVAL column (home values) from the DBF file and make it the
dependent variable for the regression. Note that PySAL requires this to be
an numpy array of shape (n, 1) as opposed to the also common shape of (n, )
that other packages accept.
>>> y = np.array(db.by_col("HOVAL"))
>>> y = np.reshape(y, (49,1))
Extract INC (income) vector from the DBF to be used as
independent variables in the regression. Note that PySAL requires this to
be an nxj numpy array, where j is the number of independent variables (not
including a constant). By default this class adds a vector of ones to the
independent variables passed in.
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X = np.array(X).T
Since we want to run a spatial error model, we need to specify the spatial
weights matrix that includes the spatial configuration of the observations
into the error component of the model. To do that, we can open an already
existing gal file or create a new one. In this case, we will create one
from ``columbus.shp``.
>>> w = pysal.rook_from_shapefile(pysal.examples.get_path("columbus.shp"))
Unless there is a good reason not to do it, the weights have to be
row-standardized so every row of the matrix sums to one. Among other
things, his allows to interpret the spatial lag of a variable as the
average value of the neighboring observations. In PySAL, this can be
easily performed in the following way:
>>> w.transform = 'r'
Example only with spatial lag
The Combo class runs an SARAR model, that is a spatial lag+error model.
In this case we will run a simple version of that, where we have the
spatial effects as well as exogenous variables. Since it is a spatial
model, we have to pass in the weights matrix. If we want to
have the names of the variables printed in the output summary, we will
have to pass them in as well, although this is optional.
>>> reg = GM_Combo_Hom(y, X, w=w, A1='hom_sc', name_x=['inc'],\
name_y='hoval', name_yend=['crime'], name_q=['discbd'],\
name_ds='columbus')
>>> print np.around(np.hstack((reg.betas,np.sqrt(reg.vm.diagonal()).reshape(4,1))),4)
[[ 10.1254 15.2871]
[ 1.5683 0.4407]
[ 0.1513 0.4048]
[ 0.2103 0.4226]]
This class also allows the user to run a spatial lag+error model with the
extra feature of including non-spatial endogenous regressors. This means
that, in addition to the spatial lag and error, we consider some of the
variables on the right-hand side of the equation as endogenous and we
instrument for this. As an example, we will include CRIME (crime rates) as
endogenous and will instrument with DISCBD (distance to the CSB). We first
need to read in the variables:
>>> yd = []
>>> yd.append(db.by_col("CRIME"))
>>> yd = np.array(yd).T
>>> q = []
>>> q.append(db.by_col("DISCBD"))
>>> q = np.array(q).T
And then we can run and explore the model analogously to the previous combo:
>>> reg = GM_Combo_Hom(y, X, yd, q, w=w, A1='hom_sc', \
name_ds='columbus')
>>> betas = np.array([['CONSTANT'],['inc'],['crime'],['W_hoval'],['lambda']])
>>> print np.hstack((betas, np.around(np.hstack((reg.betas, np.sqrt(reg.vm.diagonal()).reshape(5,1))),5)))
[['CONSTANT' '111.7705' '67.75191']
['inc' '-0.30974' '1.16656']
['crime' '-1.36043' '0.6841']
['W_hoval' '-0.52908' '0.84428']
['lambda' '0.60116' '0.18605']]
'''
def __init__(self, y, x, yend=None, q=None,
w=None, w_lags=1, lag_q=True,
max_iter=1, epsilon=0.00001, A1='hom_sc',
vm=False, name_y=None, name_x=None,
name_yend=None, name_q=None,
name_w=None, name_ds=None):
n = USER.check_arrays(y, x, yend, q)
USER.check_y(y, n)
USER.check_weights(w, y, w_required=True)
yend2, q2 = set_endog(y, x, w, yend, q, w_lags, lag_q)
x_constant = USER.check_constant(x)
BaseGM_Combo_Hom.__init__(
self, y=y, x=x_constant, w=w.sparse, yend=yend2, q=q2,
w_lags=w_lags, A1=A1, lag_q=lag_q,
max_iter=max_iter, epsilon=epsilon)
self.rho = self.betas[-2]
self.predy_e, self.e_pred, warn = sp_att(w, self.y, self.predy,
yend2[:, -1].reshape(self.n, 1), self.rho)
set_warn(self, warn)
self.title = "SPATIALLY WEIGHTED TWO STAGE LEAST SQUARES (HOM)"
self.name_ds = USER.set_name_ds(name_ds)
self.name_y = USER.set_name_y(name_y)
self.name_x = USER.set_name_x(name_x, x)
self.name_yend = USER.set_name_yend(name_yend, yend)
self.name_yend.append(USER.set_name_yend_sp(self.name_y))
self.name_z = self.name_x + self.name_yend
self.name_z.append('lambda') # listing lambda last
self.name_q = USER.set_name_q(name_q, q)
self.name_q.extend(
USER.set_name_q_sp(self.name_x, w_lags, self.name_q, lag_q))
self.name_h = USER.set_name_h(self.name_x, self.name_q)
self.name_w = USER.set_name_w(name_w, w)
SUMMARY.GM_Combo_Hom(reg=self, w=w, vm=vm)
# Functions
def moments_hom(w, wA1, wA2, u):
'''
Compute G and g matrices for the spatial error model with homoscedasticity
as in Anselin [Anselin2011]_ (2011).
...
Parameters
----------
w : Sparse matrix
Spatial weights sparse matrix
u : array
Residuals. nx1 array assumed to be aligned with w
Attributes
----------
moments : list
List of two arrays corresponding to the matrices 'G' and
'g', respectively.
'''
n = w.shape[0]
A1u = wA1 * u
A2u = wA2 * u
wu = w * u
g1 = np.dot(u.T, A1u)
g2 = np.dot(u.T, A2u)
g = np.array([[g1][0][0], [g2][0][0]]) / n
G11 = 2 * np.dot(wu.T * wA1, u)
G12 = -np.dot(wu.T * wA1, wu)
G21 = 2 * np.dot(wu.T * wA2, u)
G22 = -np.dot(wu.T * wA2, wu)
G = np.array([[G11[0][0], G12[0][0]], [G21[0][0], G22[0][0]]]) / n
return [G, g]
def get_vc_hom(w, wA1, wA2, reg, lambdapar, z_s=None, for_omegaOLS=False):
'''
VC matrix \psi of Spatial error with homoscedasticity. As in
Anselin (2011) [Anselin2011]_ (p. 20)
...
Parameters
----------
w : Sparse matrix
Spatial weights sparse matrix
reg : reg
Regression object
lambdapar : float
Spatial parameter estimated in previous step of the
procedure
z_s : array
optional argument for spatially filtered Z (to be
passed only if endogenous variables are present)
for_omegaOLS : boolean
If True (default=False), it also returns P, needed
only in the computation of Omega
Returns
-------
psi : array
2x2 VC matrix
a1 : array
nx1 vector a1. If z_s=None, a1 = 0.
a2 : array
nx1 vector a2. If z_s=None, a2 = 0.
p : array
P matrix. If z_s=None or for_omegaOLS=False, p=0.
'''
u_s = get_spFilter(w, lambdapar, reg.u)
n = float(w.shape[0])
sig2 = np.dot(u_s.T, u_s) / n
mu3 = np.sum(u_s ** 3) / n
mu4 = np.sum(u_s ** 4) / n
tr11 = wA1 * wA1
tr11 = np.sum(tr11.diagonal())
tr12 = wA1 * (wA2 * 2)
tr12 = np.sum(tr12.diagonal())
tr22 = wA2 * wA2 * 2
tr22 = np.sum(tr22.diagonal())
vecd1 = np.array([wA1.diagonal()]).T
psi11 = 2 * sig2 ** 2 * tr11 + \
(mu4 - 3 * sig2 ** 2) * np.dot(vecd1.T, vecd1)
psi12 = sig2 ** 2 * tr12
psi22 = sig2 ** 2 * tr22
a1, a2, p = 0., 0., 0.
if for_omegaOLS:
x_s = get_spFilter(w, lambdapar, reg.x)
p = la.inv(spdot(x_s.T, x_s) / n)
if issubclass(type(z_s), np.ndarray) or \
issubclass(type(z_s), SP.csr.csr_matrix) or \
issubclass(type(z_s), SP.csc.csc_matrix):
alpha1 = (-2 / n) * spdot(z_s.T, wA1 * u_s)
alpha2 = (-2 / n) * spdot(z_s.T, wA2 * u_s)
hth = spdot(reg.h.T, reg.h)
hthni = la.inv(hth / n)
htzsn = spdot(reg.h.T, z_s) / n
p = spdot(hthni, htzsn)
p = spdot(p, la.inv(spdot(htzsn.T, p)))
hp = spdot(reg.h, p)
a1 = spdot(hp, alpha1)
a2 = spdot(hp, alpha2)
psi11 = psi11 + \
sig2 * spdot(a1.T, a1) + \
2 * mu3 * spdot(a1.T, vecd1)
psi12 = psi12 + \
sig2 * spdot(a1.T, a2) + \
mu3 * spdot(a2.T, vecd1) # 3rd term=0
psi22 = psi22 + \
sig2 * spdot(a2.T, a2) # 3rd&4th terms=0 bc vecd2=0
psi = np.array(
[[psi11[0][0], psi12[0][0]], [psi12[0][0], psi22[0][0]]]) / n
return psi, a1, a2, p
def get_omega_hom(w, wA1, wA2, reg, lamb, G):
'''
Omega VC matrix for Hom models with endogenous variables computed as in
Anselin (2011) [Anselin2011]_ (p. 21).
...
Parameters
----------
w : Sparse matrix
Spatial weights sparse matrix
reg : reg
Regression object
lamb : float
Spatial parameter estimated in previous step of the
procedure
G : array
Matrix 'G' of the moment equation
Returns
-------
omega : array
Omega matrix of VC of the model
'''
n = float(w.shape[0])
z_s = get_spFilter(w, lamb, reg.z)
u_s = get_spFilter(w, lamb, reg.u)
sig2 = np.dot(u_s.T, u_s) / n
mu3 = np.sum(u_s ** 3) / n
vecdA1 = np.array([wA1.diagonal()]).T
psi, a1, a2, p = get_vc_hom(w, wA1, wA2, reg, lamb, z_s)
j = np.dot(G, np.array([[1.], [2 * lamb]]))
psii = la.inv(psi)
t2 = spdot(reg.h.T, np.hstack((a1, a2)))
psiDL = (mu3 * spdot(reg.h.T, np.hstack((vecdA1, np.zeros((int(n), 1))))) +
sig2 * spdot(reg.h.T, np.hstack((a1, a2)))) / n
oDD = spdot(la.inv(spdot(reg.h.T, reg.h)), spdot(reg.h.T, z_s))
oDD = sig2 * la.inv(spdot(z_s.T, spdot(reg.h, oDD)))
oLL = la.inv(spdot(j.T, spdot(psii, j))) / n
oDL = spdot(spdot(spdot(p.T, psiDL), spdot(psii, j)), oLL)
o_upper = np.hstack((oDD, oDL))
o_lower = np.hstack((oDL.T, oLL))
return np.vstack((o_upper, o_lower)), float(sig2)
def get_omega_hom_ols(w, wA1, wA2, reg, lamb, G):
'''
Omega VC matrix for Hom models without endogenous variables (OLS) computed
as in Anselin (2011) [Anselin2011]_.
...
Parameters
----------
w : Sparse matrix
Spatial weights sparse matrix
reg : reg
Regression object
lamb : float
Spatial parameter estimated in previous step of the
procedure
G : array
Matrix 'G' of the moment equation
Returns
-------
omega : array
Omega matrix of VC of the model
'''
n = float(w.shape[0])
x_s = get_spFilter(w, lamb, reg.x)
u_s = get_spFilter(w, lamb, reg.u)
sig2 = np.dot(u_s.T, u_s) / n
vecdA1 = np.array([wA1.diagonal()]).T
psi, a1, a2, p = get_vc_hom(w, wA1, wA2, reg, lamb, for_omegaOLS=True)
j = np.dot(G, np.array([[1.], [2 * lamb]]))
psii = la.inv(psi)
oDD = sig2 * la.inv(spdot(x_s.T, x_s))
oLL = la.inv(spdot(j.T, spdot(psii, j))) / n
#oDL = np.zeros((oDD.shape[0], oLL.shape[1]))
mu3 = np.sum(u_s ** 3) / n
psiDL = (mu3 * spdot(reg.x.T, np.hstack((vecdA1, np.zeros((int(n), 1)))))) / n
oDL = spdot(spdot(spdot(p.T, psiDL), spdot(psii, j)), oLL)
o_upper = np.hstack((oDD, oDL))
o_lower = np.hstack((oDL.T, oLL))
return np.vstack((o_upper, o_lower)), float(sig2)
def _test():
import doctest
start_suppress = np.get_printoptions()['suppress']
np.set_printoptions(suppress=True)
doctest.testmod()
np.set_printoptions(suppress=start_suppress)
if __name__ == '__main__':
_test()
| ljwolf/pysal | pysal/spreg/error_sp_hom.py | Python | bsd-3-clause | 57,745 |
from flask import Flask
from flask.ext.admin import Admin, BaseView, expose
class MyView(BaseView):
@expose('/')
def index(self):
return self.render('index.html')
app = Flask(__name__)
admin = Admin(app)
admin.add_view(MyView(name='Hello'))
app.run()
| 0atman/flask-admin | examples/quickstart/second.py | Python | bsd-3-clause | 272 |
print('Importing command definitions...')
from jycraft.plugin.interpreter import PyContext
from org.bukkit import Bukkit
from org.bukkit import Location
from org.bukkit import Material
from org.bukkit import Effect
from org.bukkit.command import Command
from org.bukkit.event import Listener, EventPriority
from random import *
SERVER = Bukkit.getServer()
WORLD = SERVER.getWorlds().get(0)
MORNING = 2000
NOON = 6000
EVENING = 14000
NIGHT = 18000
# reflection to get command map
_commandMapField = SERVER.getClass().getDeclaredField("commandMap")
_commandMapField.setAccessible(True)
_commandMap = _commandMapField.get(SERVER)
#full list of BlockTypes available in JavaDocs on canarymod.net
AIR = Material.AIR
STONE = Material.STONE
GRASS = Material.GRASS
DIRT = Material.DIRT
COBBLESTONE = Material.COBBLESTONE
WOOD_PLANKS = Material.WOOD
# SAPLING = BlockType.OakSapling
# BEDROCK = BlockType.Bedrock
# WATER_FLOWING = BlockType.WaterFlowing
# WATER = WATER_FLOWING
# WATER_STATIONARY = BlockType.Water
# LAVA_FLOWING = BlockType.LavaFlowing
# LAVA = LAVA_FLOWING
# LAVA_STATIONARY = BlockType.Lava
# SAND = BlockType.Sand
# GRAVEL = BlockType.Gravel
# GOLD_ORE = BlockType.GoldOre
# IRON_ORE = BlockType.IronOre
# COAL_ORE = BlockType.CoalOre
# WOOD = BlockType.OakLog
# LEAVES = BlockType.OakLeaves
# GLASS = BlockType.Glass
# LAPIS_LAZULI_ORE = BlockType.LapisOre
# LAPIS_LAZULI_BLOCK = BlockType.LapisBlock
# SANDSTONE = BlockType.Sandstone
# BED = BlockType.Bed
# COBWEB = BlockType.Web
# GRASS_TALL = BlockType.TallGrass
# WOOL = BlockType.WhiteWool
# FLOWER_YELLOW = BlockType.Dandelion
# FLOWER_CYAN = BlockType.BlueOrchid
# MUSHROOM_BROWN = BlockType.BrownMushroom
# MUSHROOM_RED = BlockType.RedMushroom
# GOLD_BLOCK = BlockType.GoldBlock
# IRON_BLOCK = BlockType.IronBlock
# STONE_SLAB_DOUBLE = BlockType.DoubleStoneSlab
# STONE_SLAB = BlockType.StoneSlab
# BRICK_BLOCK = BlockType.BrickBlock
# TNT = BlockType.TNT
# BOOKSHELF = BlockType.Bookshelf
# MOSS_STONE = BlockType.MossyCobble
# OBSIDIAN = BlockType.Obsidian
# TORCH = BlockType.Torch
# FIRE = BlockType.FireBlock
# STAIRS_WOOD = BlockType.OakStairs
# CHEST = BlockType.Chest
# DIAMOND_ORE = BlockType.DiamondOre
# DIAMOND_BLOCK = BlockType.DiamondBlock
# CRAFTING_TABLE = BlockType.Workbench
# FARMLAND = BlockType.Farmland
# FURNACE_INACTIVE = BlockType.Furnace
# FURNACE_ACTIVE = BlockType.BurningFurnace
# DOOR_WOOD = BlockType.WoodenDoor
# LADDER = BlockType.Ladder
# STAIRS_COBBLESTONE = BlockType.StoneStairs
# DOOR_IRON = BlockType.IronDoor
# REDSTONE_ORE = BlockType.RedstoneOre
# SNOW = BlockType.Snow
# ICE = BlockType.Ice
# SNOW_BLOCK = BlockType.SnowBlock
# CACTUS = BlockType.Cactus
# CLAY = BlockType.Clay
# SUGAR_CANE = BlockType.Reed
# FENCE = BlockType.Fence
# GLOWSTONE_BLOCK = BlockType.GlowStone
# STONE_BRICK = BlockType.StoneBrick
# GLASS_PANE = BlockType.GlassPane
# MELON = BlockType.Melon
# FENCE_GATE = BlockType.FenceGate
def pos(*args):
return Location(WORLD, *args)
def parseargswithpos(args, kwargs, asint=True, ledger={}):
results = {}
base = 3
tr = [args[0], args[1], args[2]]
if asint:
pos = (int(tr[0]), int(tr[1]), int(tr[2]))
results['x'] = pos[0]
results['y'] = pos[1]
results['z'] = pos[2]
for k,v in ledger.iteritems():
results[k] = kwargs.get(v[0], None)
if results[k] is None:
if len(args) > base+v[1]:
results[k] = args[base+v[1]]
else:
results[k] = v[2]
return results
def getplayer(name):
return SERVER.getPlayer(name)
def randomplayer():
pl = SERVER.getOnlinePlayers()
return choice(pl)
def yell(message):
SERVER.broadcastMessage(message)
def time(time):
WORLD.setTime(time)
def weather(rainsnow, thunder):
WORLD.setStorm(rainsnow)
WORLD.setThundering(thunder)
def explosion(*args, **kwargs):
r = parseargswithpos(args, kwargs, ledger={'power':['power', 0, 8]})
WORLD.createExplosion(r['x'], r['y'], r['z'], r['power'], True)
def teleport(*args, **kwargs):
r = parseargswithpos(args, kwargs, ledger={'whom':['whom', 0, 'GameStartSchool']})
someone = getplayer(r['whom'])
someone.teleport(pos(r['x'], r['y'], r['z']))
def setblock(*args, **kwargs):
r = parseargswithpos(args, kwargs, ledger={'type':['type', 0, COBBLESTONE]})
WORLD.getBlockAt(r['x'], r['y'], r['z']).setType(r['type'])
def cube(*args, **kwargs):
r = parseargswithpos(args, kwargs, ledger={
'type':['type', 0, COBBLESTONE],
'size':['size', 1, 4]})
size = min(r['size'], 12)
for x in range(size):
for y in range(size):
for z in range(size):
setblock(x + r['x'], y + r['y'], z + r['z'], r['type'])
def bolt(*args, **kwargs):
r = parseargswithpos(args, kwargs)
WORLD.strikeLightning(pos(r['x'], r['y'], r['z']))
def bless(*args, **kwargs):
r = parseargswithpos(args, kwargs, ledger={
'type':['type', 0, Effect.COLOURED_DUST],
'vx':['vx', 1, 1],
'vy':['vy', 2, 1],
'vz':['vz', 3, 1],
'sp':['sp', 4, 100],
'q':['q', 5, 100],
'r':['r', 6, 20],
'block':['block', 7, COBBLESTONE],
'data':['data', 8, 0]})
WORLD.spigot().playEffect(pos(r['x'], r['y'], r['z']),
r['type'], r['block'].getId(),
r['data'], r['vx'], r['vy'], r['vz'],
r['sp'], r['q'], r['r'])
# don't know how to do this in spigot
# def lookingat(player):
# return LineTracer(player).getTargetBlock()
class SpigotCommand(Command):
def __init__(self, name, execfunc):
super(SpigotCommand, self).__init__(name)
self.execfunc = execfunc
def execute(self, caller, label, parameters):
self.execfunc(caller, parameters)
def registercommand(name, execfunc):
# Use like this:
# >>> def functiontest(caller, params):
# ... yell(params[0])
# >>> registercommand("test", functiontest)
_commandMap.register("jycraft", SpigotCommand(name, execfunc))
class EventListener(Listener):
def __init__(self, func):
self.func = func
def execute(self, event):
self.func(event)
def execute(listener, event):
listener.execute(event)
def registerhook(hookCls, execfunc, priority=EventPriority.NORMAL):
# Use like this:
# >>> from mcapi import *
# >>> from org.bukkit.event.block import BlockPlaceEvent
# >>> def place(e):
# ... yell("Placed {}".format(e.getBlockPlaced()))
# >>> registerhook(BlockPlaceEvent, place)
SERVER.getPluginManager().registerEvent(hookCls, EventListener(execfunc), priority, execute, PyContext.getPlugin())
| Jycraft/jycraft-server-plugin | python/spigot/mcapi.py | Python | bsd-3-clause | 7,416 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-12 08:41
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('cases', '0001_initial'),
('offices', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.AddField(
model_name='case',
name='created_by',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='case',
name='office',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='offices.Office'),
),
]
| ad-m/foundation-manager | foundation/cases/migrations/0002_auto_20160512_1041.py | Python | bsd-3-clause | 888 |
from flask import Flask, redirect, url_for, session, request
from flask_oauthlib.client import OAuth, OAuthException
FACEBOOK_APP_ID = '188477911223606'
FACEBOOK_APP_SECRET = '621413ddea2bcc5b2e83d42fc40495de'
app = Flask(__name__)
app.debug = True
app.secret_key = 'development'
oauth = OAuth(app)
facebook = oauth.remote_app(
'facebook',
consumer_key=FACEBOOK_APP_ID,
consumer_secret=FACEBOOK_APP_SECRET,
request_token_params={'scope': 'email'},
base_url='https://graph.facebook.com',
request_token_url=None,
access_token_url='/oauth/access_token',
authorize_url='https://www.facebook.com/dialog/oauth'
)
@app.route('/')
def index():
return redirect(url_for('login'))
@app.route('/login')
def login():
callback = url_for(
'facebook_authorized',
next=request.args.get('next') or request.referrer or None,
_external=True
)
return facebook.authorize(callback=callback)
@app.route('/login/authorized')
def facebook_authorized():
resp = facebook.authorized_response()
if resp is None:
return 'Access denied: reason=%s error=%s' % (
request.args['error_reason'],
request.args['error_description']
)
if isinstance(resp, OAuthException):
return 'Access denied: %s' % resp.message
session['oauth_token'] = (resp['access_token'], '')
me = facebook.get('/me')
return 'Logged in as id=%s name=%s redirect=%s' % \
(me.data['id'], me.data['name'], request.args.get('next'))
@facebook.tokengetter
def get_facebook_oauth_token():
return session.get('oauth_token')
if __name__ == '__main__':
app.run()
| Fleurer/flask-oauthlib | example/facebook.py | Python | bsd-3-clause | 1,663 |
# -*- coding: utf-8 -*-
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("wagtailcore", "0016_change_page_url_path_to_text_field"),
]
operations = [
migrations.AlterField(
model_name="grouppagepermission",
name="permission_type",
field=models.CharField(
choices=[
("add", "Add/edit pages you own"),
("edit", "Edit any page"),
("publish", "Publish any page"),
("lock", "Lock/unlock any page"),
],
max_length=20,
verbose_name="Permission type",
),
preserve_default=True,
),
]
| rsalmaso/wagtail | wagtail/core/migrations/0017_change_edit_page_permission_description.py | Python | bsd-3-clause | 771 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
import sentry.db.models.fields.gzippeddict
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Node',
fields=[
('id', models.CharField(max_length=40, serialize=False, primary_key=True)),
('data', sentry.db.models.fields.gzippeddict.GzippedDictField()),
('timestamp', models.DateTimeField(default=django.utils.timezone.now, db_index=True)),
],
),
]
| beeftornado/sentry | src/sentry/nodestore/migrations/0001_initial.py | Python | bsd-3-clause | 654 |
"""
Roleplaying base system for Evennia
Contribution - Griatch, 2015
This module contains the ContribRPObject, ContribRPRoom and
ContribRPCharacter typeclasses. If you inherit your
objects/rooms/character from these (or make them the defaults) from
these you will get the following features:
- Objects/Rooms will get the ability to have poses and will report
the poses of items inside them (the latter most useful for Rooms).
- Characters will get poses and also sdescs (short descriptions)
that will be used instead of their keys. They will gain commands
for managing recognition (custom sdesc-replacement), masking
themselves as well as an advanced free-form emote command.
To use, simply import the typclasses you want from this module and use
them to create your objects, or set them to default.
In more detail, This RP base system introduces the following features
to a game, common to many RP-centric games:
- emote system using director stance emoting (names/sdescs).
This uses a customizable replacement noun (/me, @ etc) to
represent you in the emote. You can use /sdesc, /nick, /key or
/alias to reference objects in the room. You can use any
number of sdesc sub-parts to differentiate a local sdesc, or
use /1-sdesc etc to differentiate them. The emote also
identifies nested says.
- sdesc obscuration of real character names for use in emotes
and in any referencing such as object.search(). This relies
on an SdescHandler `sdesc` being set on the Character and
makes use of a custom Character.get_display_name hook. If
sdesc is not set, the character's `key` is used instead. This
is particularly used in the emoting system.
- recog system to assign your own nicknames to characters, can then
be used for referencing. The user may recog a user and assign
any personal nick to them. This will be shown in descriptions
and used to reference them. This is making use of the nick
functionality of Evennia.
- masks to hide your identity (using a simple lock).
- pose system to set room-persistent poses, visible in room
descriptions and when looking at the person/object. This is a
simple Attribute that modifies how the characters is viewed when
in a room as sdesc + pose.
- in-emote says, including seamless integration with language
obscuration routine (such as contrib/rplanguage.py)
Examples:
> look
Tavern
The tavern is full of nice people
*A tall man* is standing by the bar.
Above is an example of a player with an sdesc "a tall man". It is also
an example of a static *pose*: The "standing by the bar" has been set
by the player of the tall man, so that people looking at him can tell
at a glance what is going on.
> emote /me looks at /tall and says "Hello!"
I see:
Griatch looks at Tall man and says "Hello".
Tall man (assuming his name is Tom) sees:
The godlike figure looks at Tom and says "Hello".
"""
from builtins import object
import re
from re import escape as re_escape
import itertools
from evennia import DefaultObject, DefaultCharacter
from evennia import Command, CmdSet
from evennia import ansi
from evennia.utils.utils import lazy_property
#------------------------------------------------------------
# Emote parser
#------------------------------------------------------------
# Settings
# The prefix is the (single-character) symbol used to find the start
# of a object reference, such as /tall (note that
# the system will understand multi-word references like '/a tall man' too).
_PREFIX = "/"
# The num_sep is the (single-character) symbol used to separate the
# sdesc from the number when trying to separate identical sdescs from
# one another. This is the same syntax used in the rest of Evennia, so
# by default, multiple "tall" can be separated by entering 1-tall,
# 2-tall etc.
_NUM_SEP = "-"
# Texts
_EMOTE_NOMATCH_ERROR = \
"""{{RNo match for {{r{ref}{{R.{{n"""
_EMOTE_MULTIMATCH_ERROR = \
"""{{RMultiple possibilities for {ref}:
{{r{reflist}{{n"""
_RE_FLAGS = re.MULTILINE + re.IGNORECASE + re.UNICODE
_RE_PREFIX = re.compile(r"^%s" % _PREFIX, re.UNICODE)
# The num_sep is the (single-character) symbol used to separate the
# sdesc from the number when trying to separate identical sdescs from
# one another. This is the same syntax used in the rest of Evennia, so
# by default, multiple "tall" can be separated by entering 1-tall,
# 2-tall etc.
_NUM_SEP = "-"
# This regex will return groups (num, word), where num is an optional counter to
# separate multimatches from one another and word is the first word in the
# marker. So entering "/tall man" will return groups ("", "tall")
# and "/2-tall man" will return groups ("2", "tall").
_RE_OBJ_REF_START = re.compile(r"%s(?:([0-9]+)%s)*(\w+)" %
(_PREFIX, _NUM_SEP), _RE_FLAGS)
# Reference markers are used internally when distributing the emote to
# all that can see it. They are never seen by players and are on the form {#dbref}.
_RE_REF = re.compile(r"\{+\#([0-9]+)\}+")
# This regex is used to quickly reference one self in an emote.
_RE_SELF_REF = re.compile(r"/me|@", _RE_FLAGS)
# regex for non-alphanumberic end of a string
_RE_CHAREND = re.compile(r"\W+$", _RE_FLAGS)
# reference markers for language
_RE_REF_LANG = re.compile(r"\{+\##([0-9]+)\}+")
# language says in the emote are on the form "..." or langname"..." (no spaces).
# this regex returns in groups (langname, say), where langname can be empty.
_RE_LANGUAGE = re.compile(r"(?:\((\w+)\))*(\".+?\")")
# the emote parser works in two steps:
# 1) convert the incoming emote into an intermediary
# form with all object references mapped to ids.
# 2) for every person seeing the emote, parse this
# intermediary form into the one valid for that char.
class EmoteError(Exception):
pass
class SdescError(Exception):
pass
class RecogError(Exception):
pass
class LanguageError(Exception):
pass
def _dummy_process(text, *args, **kwargs):
"Pass-through processor"
return text
# emoting mechanisms
def ordered_permutation_regex(sentence):
"""
Builds a regex that matches 'ordered permutations' of a sentence's
words.
Args:
sentence (str): The sentence to build a match pattern to
Returns:
regex (re object): Compiled regex object represented the
possible ordered permutations of the sentence, from longest to
shortest.
Example:
The sdesc_regex for an sdesc of " very tall man" will
result in the following allowed permutations,
regex-matched in inverse order of length (case-insensitive):
"the very tall man", "the very tall", "very tall man",
"very tall", "the very", "tall man", "the", "very", "tall",
and "man".
We also add regex to make sure it also accepts num-specifiers,
like /2-tall.
"""
# escape {#nnn} markers from sentence, replace with nnn
sentence = _RE_REF.sub(r"\1", sentence)
# escape {##nnn} markers, replace with nnn
sentence = _RE_REF_LANG.sub(r"\1", sentence)
# escape self-ref marker from sentence
sentence = _RE_SELF_REF.sub(r"", sentence)
# ordered permutation algorithm
words = sentence.split()
combinations = itertools.product((True, False), repeat=len(words))
solution = []
for combination in combinations:
comb = []
for iword, word in enumerate(words):
if combination[iword]:
comb.append(word)
elif comb:
break
if comb:
solution.append(_PREFIX + r"[0-9]*%s*%s(?=\W|$)+" % (_NUM_SEP, re_escape(" ".join(comb)).rstrip("\\")))
# combine into a match regex, first matching the longest down to the shortest components
regex = r"|".join(sorted(set(solution), key=lambda o:len(o), reverse=True))
return regex
def parse_language(speaker, emote):
"""
Parse the emote for language. This is
used with a plugin for handling languages.
Args:
speaker (Object): The object speaking.
emote (str): An emote possibly containing
language references.
Returns:
(emote, mapping) (tuple): A tuple where the
`emote` is the emote string with all says
(including quotes) replaced with reference
markers on the form {##n} where n is a running
number. The `mapping` is a dictionary between
the markers and a tuple (langname, saytext), where
langname can be None.
Raises:
LanguageError: If an invalid language was specified.
Notes:
Note that no errors are raised if the wrong language identifier
is given.
This data, together with the identity of the speaker, is
intended to be used by the "listener" later, since with this
information the language skill of the speaker can be offset to
the language skill of the listener to determine how much
information is actually conveyed.
"""
# escape mapping syntax on the form {##id} if it exists already in emote,
# if so it is replaced with just "id".
emote = _RE_REF_LANG.sub(r"\1", emote)
errors = []
mapping = {}
for imatch, say_match in enumerate(reversed(list(_RE_LANGUAGE.finditer(emote)))):
# process matches backwards to be able to replace
# in-place without messing up indexes for future matches
# note that saytext includes surrounding "...".
langname, saytext = say_match.groups()
istart, iend = say_match.start(), say_match.end()
# the key is simply the running match in the emote
key = "##%i" % imatch
# replace say with ref markers in emote
emote = emote[:istart] + "{%s}" % key + emote[iend:]
mapping[key] = (langname, saytext)
if errors:
# catch errors and report
raise LanguageError("\n".join(errors))
# at this point all says have been replaced with {##nn} markers
# and mapping maps 1:1 to this.
return emote, mapping
def parse_sdescs_and_recogs(sender, candidates, string, search_mode=False):
"""
Read a textraw emote and parse it into an intermediary
format for distributing to all observers.
Args:
sender (Object): The object sending the emote. This object's
recog data will be considered in the parsing.
candidates (iterable): A list of objects valid for referencing
in the emote.
string (str): The string (like an emote) we want to analyze for keywords.
search_mode (bool, optional): If `True`, the "emote" is a query string
we want to analyze. If so, the return value is changed.
Returns:
(emote, mapping) (tuple): If `search_mode` is `False`
(default), a tuple where the emote is the emote string, with
all references replaced with internal-representation {#dbref}
markers and mapping is a dictionary `{"#dbref":obj, ...}`.
result (list): If `search_mode` is `True` we are
performing a search query on `string`, looking for a specific
object. A list with zero, one or more matches.
Raises:
EmoteException: For various ref-matching errors.
Notes:
The parser analyzes and should understand the following
_PREFIX-tagged structures in the emote:
- self-reference (/me)
- recogs (any part of it) stored on emoter, matching obj in `candidates`.
- sdesc (any part of it) from any obj in `candidates`.
- N-sdesc, N-recog separating multi-matches (1-tall, 2-tall)
- says, "..." are
"""
# Load all candidate regex tuples [(regex, obj, sdesc/recog),...]
candidate_regexes = \
[(_RE_SELF_REF, sender, sender.sdesc.get())] + \
[sender.recog.get_regex_tuple(obj)
for obj in candidates if hasattr(obj, "recog")] + \
[obj.sdesc.get_regex_tuple()
for obj in candidates if hasattr(obj, "sdesc")]
# filter out non-found data
candidate_regexes = [tup for tup in candidate_regexes if tup]
# escape mapping syntax on the form {#id} if it exists already in emote,
# if so it is replaced with just "id".
string = _RE_REF.sub(r"\1", string)
# we now loop over all references and analyze them
mapping = {}
errors = []
obj = None
nmatches = 0
for marker_match in reversed(list(_RE_OBJ_REF_START.finditer(string))):
# we scan backwards so we can replace in-situ without messing
# up later occurrences. Given a marker match, query from
# start index forward for all candidates.
# first see if there is a number given (e.g. 1-tall)
num_identifier, _ = marker_match.groups("") # return "" if no match, rather than None
istart0 = marker_match.start()
istart = istart0
# loop over all candidate regexes and match against the string following the match
matches = ((reg.match(string[istart:]), obj, text) for reg, obj, text in candidate_regexes)
# score matches by how long part of the string was matched
matches = [(match.end() if match else -1, obj, text) for match, obj, text in matches]
maxscore = max(score for score, obj, text in matches)
# we have a valid maxscore, extract all matches with this value
bestmatches = [(obj, text) for score, obj, text in matches if maxscore == score != -1]
nmatches = len(bestmatches)
if not nmatches:
# no matches
obj = None
nmatches = 0
elif nmatches == 1:
# an exact match.
obj = bestmatches[0][0]
nmatches = 1
elif all(bestmatches[0][0].id == obj.id for obj, text in bestmatches):
# multi-match but all matches actually reference the same
# obj (could happen with clashing recogs + sdescs)
obj = bestmatches[0][0]
nmatches = 1
else:
# multi-match.
# was a numerical identifier given to help us separate the multi-match?
inum = min(max(0, int(num_identifier) - 1), nmatches-1) if num_identifier else None
if inum is not None:
# A valid inum is given. Use this to separate data.
obj = bestmatches[inum][0]
nmatches = 1
else:
# no identifier given - a real multimatch.
obj = bestmatches
if search_mode:
# single-object search mode. Don't continue loop.
break
elif nmatches == 0:
errors.append(_EMOTE_NOMATCH_ERROR.format(ref=marker_match.group()))
elif nmatches == 1:
key = "#%i" % obj.id
string = string[:istart0] + "{%s}" % key + string[istart + maxscore:]
mapping[key] = obj
else:
refname = marker_match.group()
reflist = ["%s%s%s (%s%s)" % (inum+1, _NUM_SEP,
_RE_PREFIX.sub("", refname), text,
" (%s)" % sender.key if sender == obj else "")
for inum, (obj, text) in enumerate(bestmatches) if score == maxscore]
errors.append(_EMOTE_MULTIMATCH_ERROR.format(
ref=marker_match.group(), reflist="\n ".join(reflist)))
if search_mode:
# return list of object(s) matching
if nmatches == 0:
return []
elif nmatches == 1:
return [obj]
else:
return [tup[0] for tup in obj]
if errors:
# make sure to not let errors through.
raise EmoteError("\n".join(errors))
# at this point all references have been replaced with {#xxx} markers and the mapping contains
# a 1:1 mapping between those inline markers and objects.
return string, mapping
def send_emote(sender, receivers, emote, anonymous_add="first"):
"""
Main access function for distribute an emote.
Args:
sender (Object): The one sending the emote.
receivers (iterable): Receivers of the emote. These
will also form the basis for which sdescs are
'valid' to use in the emote.
emote (str): The raw emote string as input by emoter.
anonymous_add (str or None, optional): If `sender` is not
self-referencing in the emote, this will auto-add
`sender`'s data to the emote. Possible values are
- None: No auto-add at anonymous emote
- 'last': Add sender to the end of emote as [sender]
- 'first': Prepend sender to start of emote.
"""
try:
emote, obj_mapping = parse_sdescs_and_recogs(sender, receivers, emote)
emote, language_mapping = parse_language(sender, emote)
except (EmoteError, LanguageError) as err:
# handle all error messages, don't hide actual coding errors
sender.msg(err.message)
return
if anonymous_add and not "#%i" % sender.id in obj_mapping:
# no self-reference in the emote - add to the end
key = "#%i" % sender.id
obj_mapping[key] = sender
if anonymous_add == 'first':
possessive = "" if emote.startswith('\'') else " "
emote = "%s%s%s" % ("{%s}" % key, possessive, emote)
else:
emote = "%s [%s]" % (emote, "{%s}" % key)
# broadcast emote to everyone
for receiver in receivers:
# we make a temporary copy that we can modify
try:
recog_get = receiver.recog.get
mapping = dict((ref, recog_get(obj)) for ref, obj in obj_mapping.items())
except AttributeError:
mapping = dict((ref, obj.sdesc.get() if hasattr(obj, "sdesc") else obj.key)
for ref, obj in obj_mapping.items())
# handle the language mapping, which always produce different keys ##nn
try:
process_language = receiver.process_language
except AttributeError:
process_language = _dummy_process
for key, (langname, saytext) in language_mapping.iteritems():
# color says
mapping[key] = process_language(saytext, sender, langname)
# make sure receiver always sees their real name
rkey = "#%i" % receiver.id
if rkey in mapping:
mapping[rkey] = receiver.key
# add color to sdesc strings
try:
process_sdesc = receiver.process_sdesc
except AttributeError:
process_sdesc = _dummy_process
mapping = dict((key, process_sdesc(val, receiver))
for key, val in mapping.iteritems())
# do the template replacement
receiver.msg(emote.format(**mapping))
#------------------------------------------------------------
# Handlers for sdesc and recog
#------------------------------------------------------------
class SdescHandler(object):
"""
This Handler wraps all operations with sdescs. We
need to use this since we do a lot preparations on
sdescs when updating them, in order for them to be
efficient to search for and query.
"""
def __init__(self, obj):
"""
Initialize the handler
Args:
obj (Object): The entity on which this handler is stored.
"""
self.obj = obj
self.sdesc = ""
self.sdesc_regex = ""
self._cache()
def _cache(self):
"""
Cache data from storage
"""
self.sdesc = self.obj.attributes.get("_sdesc", default="")
sdesc_regex = self.obj.attributes.get("_sdesc_regex", default="")
self.sdesc_regex = re.compile(sdesc_regex, _RE_FLAGS)
def add(self, sdesc, max_length=60):
"""
Add a new sdesc to object, replacing the old one.
Args:
sdesc (str): The sdesc to set. This may be stripped
of control sequences before setting.
max_length (int, optional): The max limit of the sdesc.
Returns:
sdesc (str): The actually set sdesc.
Raises:
SdescError: If the sdesc can not be set or is longer than
`max_length`.
"""
# strip emote components from sdesc
sdesc = _RE_REF.sub(r"\1",
_RE_REF_LANG.sub(r"\1",
_RE_SELF_REF.sub(r"",
_RE_LANGUAGE.sub(r"",
_RE_OBJ_REF_START.sub(r"", sdesc)))))
# make an sdesc clean of ANSI codes
cleaned_sdesc = ansi.strip_ansi(sdesc)
if len(cleaned_sdesc) > max_length:
raise SdescError("Too long sdesc")
# store to attributes
sdesc_regex = ordered_permutation_regex(cleaned_sdesc)
self.obj.attributes.add("_sdesc", sdesc)
self.obj.attributes.add("_sdesc_regex", sdesc_regex)
# local caching
self.sdesc = sdesc
self.sdesc_regex = re.compile(sdesc_regex, _RE_FLAGS)
return sdesc
def get(self):
"""
Simple getter.
"""
return self.sdesc
def get_regex_tuple(self):
"""
Return data for sdesc/recog handling
Returns:
tup (tuple): tuple (sdesc_regex, obj, sdesc)
"""
return self.sdesc_regex, self.obj, self.sdesc
class RecogHandler(object):
"""
This handler manages the recognition mapping
of an Object.
"""
def __init__(self, obj):
"""
Initialize the handler
Args:
obj (Object): The entity on which this handler is stored.
"""
self.obj = obj
# mappings
self.ref2recog = {}
self.obj2regex = {}
self.obj2recog = {}
self._cache()
def _cache(self):
"""
Load data to handler cache
"""
self.ref2recog = self.obj.attributes.get("_recog_ref2recog", default={})
obj2regex = self.obj.attributes.get("_recog_obj2regex", default={})
obj2recog = self.obj.attributes.get("_recog_obj2recog", default={})
self.obj2regex = dict((obj, re.compile(regex, _RE_FLAGS))
for obj, regex in obj2regex.items() if obj)
self.obj2recog = dict((obj, recog)
for obj, recog in obj2recog.items() if obj)
def add(self, obj, recog, max_length=60):
"""
Assign a custom recog (nick) to the given object.
Args:
obj (Object): The object ot associate with the recog
string. This is usually determined from the sdesc in the
room by a call to parse_sdescs_and_recogs, but can also be
given.
recog (str): The replacement string to use with this object.
max_length (int, optional): The max length of the recog string.
Returns:
recog (str): The (possibly cleaned up) recog string actually set.
Raises:
SdescError: When recog could not be set or sdesc longer
than `max_length`.
"""
# strip emote components from recog
recog = _RE_REF.sub(r"\1",
_RE_REF_LANG.sub(r"\1",
_RE_SELF_REF.sub(r"",
_RE_LANGUAGE.sub(r"",
_RE_OBJ_REF_START.sub(r"", recog)))))
# make an recog clean of ANSI codes
cleaned_recog = ansi.strip_ansi(recog)
if len(cleaned_recog) > max_length:
raise RecogError("Too long recog")
# mapping #dbref:obj
key = "#%i" % obj.id
self.obj.db._recog_ref2recog[key] = recog
self.obj.db._recog_obj2recog[obj] = recog
regex = ordered_permutation_regex(cleaned_recog)
self.obj.db._recog_obj2regex[obj] = regex
# local caching
self.ref2recog[key] = recog
self.obj2recog[obj] = recog
self.obj2regex[obj] = re.compile(regex, _RE_FLAGS)
return recog
def get(self, obj):
"""
Get recog replacement string, if one exists, otherwise
get sdesc and as a last resort, the object's key.
Args:
obj (Object): The object, whose sdesc to replace
Returns:
recog (str): The replacement string to use.
Notes:
This method will respect a "enable_recog" lock set on
`obj` (True by default) in order to turn off recog
mechanism. This is useful for adding masks/hoods etc.
"""
if obj.access(self.obj, "enable_recog", default=True):
# check an eventual recog_masked lock on the object
# to avoid revealing masked characters. If lock
# does not exist, pass automatically.
return self.obj2recog.get(obj, obj.sdesc.get()
if hasattr(obj, "sdesc") else obj.key)
else:
# recog_mask log not passed, disable recog
return obj.sdesc.get() if hasattr(obj, "sdesc") else obj.key
def remove(self, obj):
"""
Clear recog for a given object.
Args:
obj (Object): The object for which to remove recog.
"""
if obj in self.obj2recog:
del self.obj.db._recog_obj2recog[obj]
del self.obj.db._recog_obj2regex[obj]
del self.obj.db._recog_ref2recog["#%i" % obj.id]
self._cache()
def get_regex_tuple(self, obj):
"""
Returns:
rec (tuple): Tuple (recog_regex, obj, recog)
"""
if obj in self.obj2recog and obj.access(self.obj, "enable_recog", default=True):
return self.obj2regex[obj], obj, self.obj2regex[obj]
return None
#------------------------------------------------------------
# RP Commands
#------------------------------------------------------------
class RPCommand(Command):
"simple parent"
def parse(self):
"strip extra whitespace"
self.args = self.args.strip()
class CmdEmote(RPCommand): # replaces the main emote
"""
Emote an action, allowing dynamic replacement of
text in the emote.
Usage:
emote text
Example:
emote /me looks around.
emote With a flurry /me attacks /tall man with his sword.
emote "Hello", /me says.
Describes an event in the world. This allows the use of /ref
markers to replace with the short descriptions or recognized
strings of objects in the same room. These will be translated to
emotes to match each person seeing it. Use "..." for saying
things and langcode"..." without spaces to say something in
a different language.
"""
key = "emote"
aliases = [":"]
locks = "cmd:all()"
def func(self):
"Perform the emote."
if not self.args:
self.caller.msg("What do you want to do?")
else:
# we also include ourselves here.
emote = self.args
targets = self.caller.location.contents
if not emote.endswith((".", "!")):
emote = "%s." % emote
send_emote(self.caller, targets, emote, anonymous_add='first')
class CmdSdesc(RPCommand): # set/look at own sdesc
"""
Assign yourself a short description (sdesc).
Usage:
sdesc <short description>
Assigns a short description to yourself.
"""
key = "sdesc"
locks = "cmd:all()"
def func(self):
"Assign the sdesc"
caller = self.caller
if not self.args:
caller.msg("Usage: sdesc <sdesc-text>")
return
else:
# strip non-alfanum chars from end of sdesc
sdesc = _RE_CHAREND.sub("", self.args)
sdesc = caller.sdesc.add(sdesc)
caller.msg("%s's sdesc was set to '%s'." % (caller.key, sdesc))
class CmdPose(RPCommand): # set current pose and default pose
"""
Set a static pose
Usage:
pose <pose>
pose default <pose>
pose reset
pose obj = <pose>
pose default obj = <pose>
pose reset obj =
Examples:
pose leans against the tree
pose is talking to the barkeep.
pose box = is sitting on the floor.
Set a static pose. This is the end of a full sentence that starts
with your sdesc. If no full stop is given, it will be added
automatically. The default pose is the pose you get when using
pose reset. Note that you can use sdescs/recogs to reference
people in your pose, but these always appear as that person's
sdesc in the emote, regardless of who is seeing it.
"""
key = "pose"
def parse(self):
"""
Extract the "default" alternative to the pose.
"""
args = self.args.strip()
default = args.startswith("default")
reset = args.startswith("reset")
if default:
args = re.sub(r"^default", "", args)
if reset:
args = re.sub(r"^reset", "", args)
target = None
if "=" in args:
target, args = [part.strip() for part in args.split("=", 1)]
self.target = target
self.reset = reset
self.default = default
self.args = args.strip()
def func(self):
"Create the pose"
caller = self.caller
pose = self.args
target = self.target
if not pose and not self.reset:
caller.msg("Usage: pose <pose-text> OR pose obj = <pose-text>")
return
if not pose.endswith("."):
pose = "%s." % pose
if target:
# affect something else
target = caller.search(target)
if not target:
return
if not target.access(caller, "edit"):
caller.msg("You can't pose that.")
return
else:
target = caller
if not target.attributes.has("pose"):
caller.msg("%s cannot be posed." % target.key)
return
target_name = target.sdesc.get() if hasattr(target, "sdesc") else target.key
# set the pose
if self.reset:
pose = target.db.pose_default
target.db.pose = pose
elif self.default:
target.db.pose_default = pose
caller.msg("Default pose is now '%s %s'." % (target_name, pose))
return
else:
# set the pose. We do one-time ref->sdesc mapping here.
parsed, mapping = parse_sdescs_and_recogs(caller, caller.location.contents, pose)
mapping = dict((ref, obj.sdesc.get() if hasattr(obj, "sdesc") else obj.key)
for ref, obj in mapping.iteritems())
pose = parsed.format(**mapping)
if len(target_name) + len(pose) > 60:
caller.msg("Your pose '%s' is too long." % pose)
return
target.db.pose = pose
caller.msg("Pose will read '%s %s'." % (target_name, pose))
class CmdRecog(RPCommand): # assign personal alias to object in room
"""
Recognize another person in the same room.
Usage:
recog sdesc as alias
Example:
recog tall man as Griatch
forget griatch
This will assign a personal alias for a person, or
forget said alias.
"""
key = "recog"
aliases = ["recognize", "forget"]
def parse(self):
"Parse for the sdesc as alias structure"
if "as" in self.args:
self.sdesc, self.alias = [part.strip() for part in self.args.split(" as ", 2)]
elif self.args:
self.sdesc = self.args.strip()
self.alias = ""
def func(self):
"Assign the recog"
caller = self.caller
if not self.args:
caller.msg("Usage: recog <sdesc> as <alias> or forget <alias>")
return
sdesc = self.sdesc
alias = self.alias.rstrip(".?!")
prefixed_sdesc = sdesc if sdesc.startswith(_PREFIX) else _PREFIX + sdesc
candidates = caller.location.contents
matches = parse_sdescs_and_recogs(caller, candidates, prefixed_sdesc, search_mode=True)
nmatches = len(matches)
# handle 0, 1 and >1 matches
if nmatches == 0:
caller.msg(_EMOTE_NOMATCH_ERROR.format(ref=sdesc))
elif nmatches > 1:
reflist = ["%s%s%s (%s%s)" % (inum+1, _NUM_SEP,
_RE_PREFIX.sub("", sdesc), caller.recog.get(obj),
" (%s)" % caller.key if caller == obj else "")
for inum, obj in enumerate(matches)]
caller.msg(_EMOTE_MULTIMATCH_ERROR.format(ref=sdesc,reflist="\n ".join(reflist)))
else:
obj = matches[0]
if not obj.access(self.obj, "enable_recog", default=True):
# don't apply recog if object doesn't allow it (e.g. by being masked).
caller.msg("Can't recognize someone who is masked.")
return
if self.cmdstring == "forget":
# remove existing recog
caller.recog.remove(obj)
caller.msg("%s will know only '%s'." % (caller.key, obj.recog.get(obj)))
else:
sdesc = obj.sdesc.get() if hasattr(obj, "sdesc") else obj.key
alias = caller.recog.add(obj, alias)
caller.msg("%s will now remember {w%s{n as {w%s{n." % (caller.key, sdesc, alias))
class CmdMask(RPCommand):
"""
Wear a mask
Usage:
mask <new sdesc>
unmask
This will put on a mask to hide your identity. When wearing
a mask, your sdesc will be replaced by the sdesc you pick and
people's recognitions of you will be disabled.
"""
key = "mask"
aliases = ["unmask"]
def func(self):
caller = self.caller
if self.cmdstring == "mask":
# wear a mask
if not self.args:
caller.msg("Usage: (un)wearmask sdesc")
return
if caller.db.unmasked_sdesc:
caller.msg("You are already wearing a mask.")
return
sdesc = _RE_CHAREND.sub("", self.args)
sdesc = "%s {H[masked]{n" % sdesc
if len(sdesc) > 60:
caller.msg("Your masked sdesc is too long.")
return
caller.db.unmasked_sdesc = caller.sdesc.get()
caller.locks.add("enable_recog:false()")
caller.sdesc.add(sdesc)
caller.msg("You wear a mask as '%s'." % sdesc)
else:
#unmask
old_sdesc = caller.db.unmasked_sdesc
if not old_sdesc:
caller.msg("You are not wearing a mask.")
return
del caller.db.unmasked_sdesc
caller.locks.remove("enable_recog")
caller.sdesc.add(old_sdesc)
caller.msg("You remove your mask and is again '%s'." % old_sdesc)
class RPSystemCmdSet(CmdSet):
"""
Mix-in for adding rp-commands to default cmdset.
"""
def at_cmdset_creation(self):
self.add(CmdEmote())
self.add(CmdSdesc())
self.add(CmdPose())
self.add(CmdRecog())
self.add(CmdMask())
#------------------------------------------------------------
# RP typeclasses
#------------------------------------------------------------
class ContribRPObject(DefaultObject):
"""
This class is meant as a mix-in or parent for objects in an
rp-heavy game. It implements the base functionality for poses.
"""
def at_object_creation(self):
"""
Called at initial creation.
"""
super(ContribRPObject, self).at_object_creation
# emoting/recog data
self.db.pose = ""
self.db.pose_default = "is here."
def search(self, searchdata, **kwargs):
"""
This version of search will pre-parse searchdata for eventual
matches against recogs and sdescs of candidates in the same
location.
Args:
searchdata (str): Search string.
Notes:
Recog/sdesc matching is always turned off if the keyword
`global_search` is set or `candidates` are given.
"""
if (isinstance(searchdata, basestring) and not
(kwargs.get("global_search") or
kwargs.get("candidates"))):
# searchdata is a string; common self-references
if searchdata.lower() in ("here", ):
return [self.location] if "quiet" in kwargs else self.location
if searchdata.lower() in ("me", "self",):
return [self] if "quiet" in kwargs else self
if searchdata.lower() == self.key.lower():
return [self] if "quiet" in kwargs else self
# sdesc/recog matching
candidates = self.location.contents
matches = parse_sdescs_and_recogs(self, candidates,
_PREFIX + searchdata, search_mode=True)
nmatches = len(matches)
if nmatches == 1:
return matches[0]
elif nmatches > 1:
# multimatch
reflist = ["%s%s%s (%s%s)" % (inum+1, _NUM_SEP, searchdata, self.recog.get(obj),
" (%s)" % self.key if self == obj else "")
for inum, obj in enumerate(matches)]
self.msg(_EMOTE_MULTIMATCH_ERROR.format(ref=searchdata,reflist="\n ".join(reflist)))
return
if not self.locks.check_lockstring(self, "perm(Builders)"):
# we block lookup unless we have access to continue
if "nofound_string" in kwargs:
self.msg(kwargs["nofound_string"])
else:
self.msg("There is no '%s' here." % searchdata)
return
# fall back to normal search
return super(ContribRPObject, self).search(searchdata, **kwargs)
def get_display_name(self, looker, **kwargs):
"""
Displays the name of the object in a viewer-aware manner.
Args:
looker (TypedObject): The object or player that is looking
at/getting inforamtion for this object.
Kwargs:
pose (bool): Include the pose (if available) in the return.
Returns:
name (str): A string of the sdesc containing the name of the object,
if this is defined.
including the DBREF if this user is privileged to control
said object.
Notes:
The RPObject version doesn't add color to its display.
"""
idstr = "(#%s)" % self.id if self.access(looker, access_type='control') else ""
try:
recog = looker.recog.get(self)
except AttributeError:
recog = None
sdesc = recog or (hasattr(self, "sdesc") and self.sdesc.get()) or self.key
pose = " %s" % ((self.db.pose or "") if kwargs.get("pose", False) else "")
return "%s%s%s" % (sdesc, idstr, pose)
def return_appearance(self, looker):
"""
This formats a description. It is the hook a 'look' command
should call.
Args:
looker (Object): Object doing the looking.
"""
if not looker:
return
# get and identify all objects
visible = (con for con in self.contents if con != looker and
con.access(looker, "view"))
exits, users, things = [], [], []
for con in visible:
key = con.get_display_name(looker, pose=True)
if con.destination:
exits.append(key)
elif con.has_player:
users.append(key)
else:
things.append(key)
# get description, build string
string = "{c%s{n\n" % self.get_display_name(looker, pose=True)
desc = self.db.desc
if desc:
string += "%s" % desc
if exits:
string += "\n{wExits:{n " + ", ".join(exits)
if users or things:
string += "\n " + "\n ".join(users + things)
return string
class ContribRPRoom(ContribRPObject):
"""
Dummy inheritance for rooms.
"""
pass
class ContribRPCharacter(DefaultCharacter, ContribRPObject):
"""
This is a character class that has poses, sdesc and recog.
"""
# Handlers
@lazy_property
def sdesc(self):
return SdescHandler(self)
@lazy_property
def recog(self):
return RecogHandler(self)
def get_display_name(self, looker, **kwargs):
"""
Displays the name of the object in a viewer-aware manner.
Args:
looker (TypedObject): The object or player that is looking
at/getting inforamtion for this object.
Kwargs:
pose (bool): Include the pose (if available) in the return.
Returns:
name (str): A string of the sdesc containing the name of the object,
if this is defined.
including the DBREF if this user is privileged to control
said object.
Notes:
The RPCharacter version of this method colors its display to make
characters stand out from other objects.
"""
idstr = "(#%s)" % self.id if self.access(looker, access_type='control') else ""
try:
recog = looker.recog.get(self)
except AttributeError:
recog = None
sdesc = recog or (hasattr(self, "sdesc") and self.sdesc.get()) or self.key
pose = " %s" % self.db.pose or "" if kwargs.get("pose", False) else ""
return "{c%s{n%s%s" % (sdesc, idstr, pose)
def at_object_creation(self):
"""
Called at initial creation.
"""
super(ContribRPCharacter, self).at_object_creation()
self.db._sdesc = ""
self.db._sdesc_regex = ""
self.db._recog_ref2recog = {}
self.db._recog_obj2regex = {}
self.db._recog_obj2recog = {}
self.cmdset.add(RPSystemCmdSet, permanent=True)
# initializing sdesc
self.sdesc.add("A normal person")
def process_sdesc(self, sdesc, obj, **kwargs):
"""
Allows to customize how your sdesc is displayed (primarily by
changing colors).
Args:
sdesc (str): The sdesc to display.
obj (Object): The object to which the adjoining sdesc
belongs (can be yourself).
Returns:
sdesc (str): The processed sdesc ready
for display.
"""
return "{b%s{n" % sdesc
def process_language(self, text, speaker, language, **kwargs):
"""
Allows to process the spoken text, for example
by obfuscating language based on your and the
speaker's language skills. Also a good place to
put coloring.
Args:
text (str): The text to process.
speaker (Object): The object delivering the text.
language (str): An identifier string for the language.
Return:
text (str): The optionally processed text.
Notes:
This is designed to work together with a string obfuscator
such as the `obfuscate_language` or `obfuscate_whisper` in
the evennia.contrib.rplanguage module.
"""
return "%s{w%s{n" % ("{W(%s)" % language if language else "", text)
#from evennia.contrib import rplanguage
#return "{w%s{n" % rplanguage.obfuscate_language(text, level=1.0)
| shollen/evennia | evennia/contrib/rpsystem.py | Python | bsd-3-clause | 43,810 |
from __future__ import absolute_import
from django import forms
from django.contrib import messages
from django.core.urlresolvers import reverse
from sentry.models import Project, Team
from sentry.web.forms.add_project import AddProjectForm
from sentry.web.frontend.base import OrganizationView
from sentry.utils.http import absolute_uri
ERR_NO_TEAMS = 'You cannot create a new project because there are no teams to assign it to.'
class AddProjectWithTeamForm(AddProjectForm):
team = forms.ChoiceField(
choices=(), required=True,
help_text='The team controls who has access to this project.',
)
class Meta:
fields = ('name', 'team')
model = Project
def __init__(self, user, team_list, *args, **kwargs):
super(AddProjectWithTeamForm, self).__init__(*args, **kwargs)
self.team_list = team_list
if len(self.team_list) == 1:
del self.fields['team']
else:
self.fields['team'].choices = (
(t.slug, t.name)
for t in team_list
)
self.fields['team'].widget.choices = self.fields['team'].choices
def clean_team(self):
value = self.cleaned_data['team']
for team in self.team_list:
if value == team.slug:
return team
return None
def save(self, actor, ip_address):
team = self.cleaned_data.get('team', self.team_list[0])
return super(AddProjectWithTeamForm, self).save(actor, team, ip_address)
class CreateProjectView(OrganizationView):
# TODO(dcramer): I'm 95% certain the access is incorrect here as it would
# be probably validating against global org access, and all we care about is
# team admin
required_scope = 'team:write'
def get_form(self, request, organization, team_list):
return AddProjectWithTeamForm(request.user, team_list, request.POST or None, initial={
'team': request.GET.get('team'),
})
def handle(self, request, organization):
team_list = [
t for t in Team.objects.get_for_user(
organization=organization,
user=request.user,
)
if request.access.has_team_scope(t, self.required_scope)
]
if not team_list:
messages.error(request, ERR_NO_TEAMS)
return self.redirect(reverse('sentry-organization-home', args=[organization.slug]))
form = self.get_form(request, organization, team_list)
if form.is_valid():
project = form.save(request.user, request.META['REMOTE_ADDR'])
return self.redirect(absolute_uri('/{}/{}/settings/install/'.format(
organization.slug,
project.slug,
)))
context = {
'form': form,
}
return self.respond('sentry/create-project.html', context)
| nicholasserra/sentry | src/sentry/web/frontend/create_project.py | Python | bsd-3-clause | 2,910 |
##########################################################################
#
# Copyright (c) 2015, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
__import__( "Gaffer" )
from _GafferDispatch import *
from LocalDispatcher import LocalDispatcher
from SystemCommand import SystemCommand
from TaskList import TaskList
from TaskContextProcessor import TaskContextProcessor
from Wedge import Wedge
from TaskContextVariables import TaskContextVariables
from TaskSwitch import TaskSwitch
from PythonCommand import PythonCommand
__import__( "IECore" ).loadConfig( "GAFFER_STARTUP_PATHS", {}, subdirectory = "GafferDispatch" )
| chippey/gaffer | python/GafferDispatch/__init__.py | Python | bsd-3-clause | 2,275 |
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import json
from mock import MagicMock, patch, PropertyMock
# External imports
# Bokeh imports
from bokeh.document.document import Document
from bokeh.io.state import State
# Module under test
import bokeh.io.notebook as binb
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
def test_install_notebook_hook():
binb.install_notebook_hook("foo", "load", "doc", "app")
assert binb._HOOKS["foo"]['load'] == "load"
assert binb._HOOKS["foo"]['doc'] == "doc"
assert binb._HOOKS["foo"]['app'] == "app"
with pytest.raises(RuntimeError):
binb.install_notebook_hook("foo", "load2", "doc2", "app2")
binb.install_notebook_hook("foo", "load2", "doc2", "app2", overwrite=True)
assert binb._HOOKS["foo"]['load'] == "load2"
assert binb._HOOKS["foo"]['doc'] == "doc2"
assert binb._HOOKS["foo"]['app'] == "app2"
@patch('bokeh.io.notebook.get_comms')
@patch('bokeh.io.notebook.publish_display_data')
@patch('bokeh.embed.notebook.notebook_content')
def test_show_doc_no_server(mock_notebook_content,
mock__publish_display_data,
mock_get_comms):
mock_get_comms.return_value = "comms"
s = State()
d = Document()
mock_notebook_content.return_value = ["notebook_script", "notebook_div", d]
class Obj(object):
id = None
def references(self): return []
assert mock__publish_display_data.call_count == 0
binb.show_doc(Obj(), s, True)
expected_args = ({'application/javascript': 'notebook_script', 'application/vnd.bokehjs_exec.v0+json': ''},)
expected_kwargs = {'metadata': {'application/vnd.bokehjs_exec.v0+json': {'id': None}}}
assert d._hold is not None
assert mock__publish_display_data.call_count == 2 # two mime types
assert mock__publish_display_data.call_args[0] == expected_args
assert mock__publish_display_data.call_args[1] == expected_kwargs
class Test_push_notebook(object):
@patch('bokeh.io.notebook.CommsHandle.comms', new_callable=PropertyMock)
def test_no_events(self, mock_comms):
mock_comms.return_value = MagicMock()
d = Document()
handle = binb.CommsHandle("comms", d)
binb.push_notebook(d, None, handle)
assert mock_comms.call_count == 0
@patch('bokeh.io.notebook.CommsHandle.comms', new_callable=PropertyMock)
def test_with_events(self, mock_comms):
mock_comm = MagicMock()
mock_send = MagicMock(return_value="junk")
mock_comm.send = mock_send
mock_comms.return_value = mock_comm
d = Document()
handle = binb.CommsHandle("comms", d)
d.title = "foo"
binb.push_notebook(d, None, handle)
assert mock_comms.call_count > 0
assert mock_send.call_count == 3 # sends header, metadata, then content
assert json.loads(mock_send.call_args[0][0]) == {u"events": [{u"kind": u"TitleChanged", u"title": u"foo"}], u"references": []}
assert mock_send.call_args[1] == {}
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
def test__origin_url():
assert binb._origin_url("foo.com:8888") == "foo.com:8888"
assert binb._origin_url("http://foo.com:8888") == "foo.com:8888"
assert binb._origin_url("https://foo.com:8888") == "foo.com:8888"
def test__server_url():
assert binb._server_url("foo.com:8888", 10) == "http://foo.com:10/"
assert binb._server_url("http://foo.com:8888", 10) == "http://foo.com:10/"
assert binb._server_url("https://foo.com:8888", 10) == "https://foo.com:10/"
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| stonebig/bokeh | bokeh/io/tests/test_notebook.py | Python | bsd-3-clause | 5,073 |
"""setuptools.command.bdist_egg
Build .egg distributions"""
# This module should be kept compatible with Python 2.3
import sys, os, marshal
from setuptools import Command
from distutils.dir_util import remove_tree, mkpath
try:
from distutils.sysconfig import get_python_version, get_python_lib
except ImportError:
from sysconfig import get_python_version
from distutils.sysconfig import get_python_lib
from distutils import log
from distutils.errors import DistutilsSetupError
from pkg_resources import get_build_platform, Distribution, ensure_directory
from pkg_resources import EntryPoint
from types import CodeType
from setuptools.extension import Library
def strip_module(filename):
if '.' in filename:
filename = os.path.splitext(filename)[0]
if filename.endswith('module'):
filename = filename[:-6]
return filename
def write_stub(resource, pyfile):
f = open(pyfile,'w')
f.write('\n'.join([
"def __bootstrap__():",
" global __bootstrap__, __loader__, __file__",
" import sys, pkg_resources, imp",
" __file__ = pkg_resources.resource_filename(__name__,%r)"
% resource,
" __loader__ = None; del __bootstrap__, __loader__",
" imp.load_dynamic(__name__,__file__)",
"__bootstrap__()",
"" # terminal \n
]))
f.close()
# stub __init__.py for packages distributed without one
NS_PKG_STUB = '__import__("pkg_resources").declare_namespace(__name__)'
class bdist_egg(Command):
description = "create an \"egg\" distribution"
user_options = [
('bdist-dir=', 'b',
"temporary directory for creating the distribution"),
('plat-name=', 'p',
"platform name to embed in generated filenames "
"(default: %s)" % get_build_platform()),
('exclude-source-files', None,
"remove all .py files from the generated egg"),
('keep-temp', 'k',
"keep the pseudo-installation tree around after " +
"creating the distribution archive"),
('dist-dir=', 'd',
"directory to put final built distributions in"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
]
boolean_options = [
'keep-temp', 'skip-build', 'exclude-source-files'
]
def initialize_options (self):
self.bdist_dir = None
self.plat_name = None
self.keep_temp = 0
self.dist_dir = None
self.skip_build = 0
self.egg_output = None
self.exclude_source_files = None
def finalize_options(self):
ei_cmd = self.ei_cmd = self.get_finalized_command("egg_info")
self.egg_info = ei_cmd.egg_info
if self.bdist_dir is None:
bdist_base = self.get_finalized_command('bdist').bdist_base
self.bdist_dir = os.path.join(bdist_base, 'egg')
if self.plat_name is None:
self.plat_name = get_build_platform()
self.set_undefined_options('bdist',('dist_dir', 'dist_dir'))
if self.egg_output is None:
# Compute filename of the output egg
basename = Distribution(
None, None, ei_cmd.egg_name, ei_cmd.egg_version,
get_python_version(),
self.distribution.has_ext_modules() and self.plat_name
).egg_name()
self.egg_output = os.path.join(self.dist_dir, basename+'.egg')
def do_install_data(self):
# Hack for packages that install data to install's --install-lib
self.get_finalized_command('install').install_lib = self.bdist_dir
site_packages = os.path.normcase(os.path.realpath(get_python_lib()))
old, self.distribution.data_files = self.distribution.data_files,[]
for item in old:
if isinstance(item,tuple) and len(item)==2:
if os.path.isabs(item[0]):
realpath = os.path.realpath(item[0])
normalized = os.path.normcase(realpath)
if normalized==site_packages or normalized.startswith(
site_packages+os.sep
):
item = realpath[len(site_packages)+1:], item[1]
# XXX else: raise ???
self.distribution.data_files.append(item)
try:
log.info("installing package data to %s" % self.bdist_dir)
self.call_command('install_data', force=0, root=None)
finally:
self.distribution.data_files = old
def get_outputs(self):
return [self.egg_output]
def call_command(self,cmdname,**kw):
"""Invoke reinitialized command `cmdname` with keyword args"""
for dirname in INSTALL_DIRECTORY_ATTRS:
kw.setdefault(dirname,self.bdist_dir)
kw.setdefault('skip_build',self.skip_build)
kw.setdefault('dry_run', self.dry_run)
cmd = self.reinitialize_command(cmdname, **kw)
self.run_command(cmdname)
return cmd
def run(self):
# Generate metadata first
self.run_command("egg_info")
# We run install_lib before install_data, because some data hacks
# pull their data path from the install_lib command.
log.info("installing library code to %s" % self.bdist_dir)
instcmd = self.get_finalized_command('install')
old_root = instcmd.root; instcmd.root = None
cmd = self.call_command('install_lib', warn_dir=0)
instcmd.root = old_root
all_outputs, ext_outputs = self.get_ext_outputs()
self.stubs = []
to_compile = []
for (p,ext_name) in enumerate(ext_outputs):
filename,ext = os.path.splitext(ext_name)
pyfile = os.path.join(self.bdist_dir, strip_module(filename)+'.py')
self.stubs.append(pyfile)
log.info("creating stub loader for %s" % ext_name)
if not self.dry_run:
write_stub(os.path.basename(ext_name), pyfile)
to_compile.append(pyfile)
ext_outputs[p] = ext_name.replace(os.sep,'/')
to_compile.extend(self.make_init_files())
if to_compile:
cmd.byte_compile(to_compile)
if self.distribution.data_files:
self.do_install_data()
# Make the EGG-INFO directory
archive_root = self.bdist_dir
egg_info = os.path.join(archive_root,'EGG-INFO')
self.mkpath(egg_info)
if self.distribution.scripts:
script_dir = os.path.join(egg_info, 'scripts')
log.info("installing scripts to %s" % script_dir)
self.call_command('install_scripts',install_dir=script_dir,no_ep=1)
self.copy_metadata_to(egg_info)
native_libs = os.path.join(egg_info, "native_libs.txt")
if all_outputs:
log.info("writing %s" % native_libs)
if not self.dry_run:
ensure_directory(native_libs)
libs_file = open(native_libs, 'wt')
libs_file.write('\n'.join(all_outputs))
libs_file.write('\n')
libs_file.close()
elif os.path.isfile(native_libs):
log.info("removing %s" % native_libs)
if not self.dry_run:
os.unlink(native_libs)
write_safety_flag(
os.path.join(archive_root,'EGG-INFO'), self.zip_safe()
)
if os.path.exists(os.path.join(self.egg_info,'depends.txt')):
log.warn(
"WARNING: 'depends.txt' will not be used by setuptools 0.6!\n"
"Use the install_requires/extras_require setup() args instead."
)
if self.exclude_source_files:
self.zap_pyfiles()
# Make the archive
make_zipfile(self.egg_output, archive_root, verbose=self.verbose,
dry_run=self.dry_run, mode=self.gen_header())
if not self.keep_temp:
remove_tree(self.bdist_dir, dry_run=self.dry_run)
# Add to 'Distribution.dist_files' so that the "upload" command works
getattr(self.distribution,'dist_files',[]).append(
('bdist_egg',get_python_version(),self.egg_output))
def zap_pyfiles(self):
log.info("Removing .py files from temporary directory")
for base,dirs,files in walk_egg(self.bdist_dir):
for name in files:
if name.endswith('.py'):
path = os.path.join(base,name)
log.debug("Deleting %s", path)
os.unlink(path)
def zip_safe(self):
safe = getattr(self.distribution,'zip_safe',None)
if safe is not None:
return safe
log.warn("zip_safe flag not set; analyzing archive contents...")
return analyze_egg(self.bdist_dir, self.stubs)
def make_init_files(self):
"""Create missing package __init__ files"""
init_files = []
for base,dirs,files in walk_egg(self.bdist_dir):
if base==self.bdist_dir:
# don't put an __init__ in the root
continue
for name in files:
if name.endswith('.py'):
if '__init__.py' not in files:
pkg = base[len(self.bdist_dir)+1:].replace(os.sep,'.')
if self.distribution.has_contents_for(pkg):
log.warn("Creating missing __init__.py for %s",pkg)
filename = os.path.join(base,'__init__.py')
if not self.dry_run:
f = open(filename,'w'); f.write(NS_PKG_STUB)
f.close()
init_files.append(filename)
break
else:
# not a package, don't traverse to subdirectories
dirs[:] = []
return init_files
def gen_header(self):
epm = EntryPoint.parse_map(self.distribution.entry_points or '')
ep = epm.get('setuptools.installation',{}).get('eggsecutable')
if ep is None:
return 'w' # not an eggsecutable, do it the usual way.
if not ep.attrs or ep.extras:
raise DistutilsSetupError(
"eggsecutable entry point (%r) cannot have 'extras' "
"or refer to a module" % (ep,)
)
pyver = sys.version[:3]
pkg = ep.module_name
full = '.'.join(ep.attrs)
base = ep.attrs[0]
basename = os.path.basename(self.egg_output)
header = (
"#!/bin/sh\n"
'if [ `basename $0` = "%(basename)s" ]\n'
'then exec python%(pyver)s -c "'
"import sys, os; sys.path.insert(0, os.path.abspath('$0')); "
"from %(pkg)s import %(base)s; sys.exit(%(full)s())"
'" "$@"\n'
'else\n'
' echo $0 is not the correct name for this egg file.\n'
' echo Please rename it back to %(basename)s and try again.\n'
' exec false\n'
'fi\n'
) % locals()
if not self.dry_run:
mkpath(os.path.dirname(self.egg_output), dry_run=self.dry_run)
f = open(self.egg_output, 'w')
f.write(header)
f.close()
return 'a'
def copy_metadata_to(self, target_dir):
"Copy metadata (egg info) to the target_dir"
# normalize the path (so that a forward-slash in egg_info will
# match using startswith below)
norm_egg_info = os.path.normpath(self.egg_info)
prefix = os.path.join(norm_egg_info,'')
for path in self.ei_cmd.filelist.files:
if path.startswith(prefix):
target = os.path.join(target_dir, path[len(prefix):])
ensure_directory(target)
self.copy_file(path, target)
def get_ext_outputs(self):
"""Get a list of relative paths to C extensions in the output distro"""
all_outputs = []
ext_outputs = []
paths = {self.bdist_dir:''}
for base, dirs, files in os.walk(self.bdist_dir):
for filename in files:
if os.path.splitext(filename)[1].lower() in NATIVE_EXTENSIONS:
all_outputs.append(paths[base]+filename)
for filename in dirs:
paths[os.path.join(base,filename)] = paths[base]+filename+'/'
if self.distribution.has_ext_modules():
build_cmd = self.get_finalized_command('build_ext')
for ext in build_cmd.extensions:
if isinstance(ext,Library):
continue
fullname = build_cmd.get_ext_fullname(ext.name)
filename = build_cmd.get_ext_filename(fullname)
if not os.path.basename(filename).startswith('dl-'):
if os.path.exists(os.path.join(self.bdist_dir,filename)):
ext_outputs.append(filename)
return all_outputs, ext_outputs
NATIVE_EXTENSIONS = dict.fromkeys('.dll .so .dylib .pyd'.split())
def walk_egg(egg_dir):
"""Walk an unpacked egg's contents, skipping the metadata directory"""
walker = os.walk(egg_dir)
base,dirs,files = walker.next()
if 'EGG-INFO' in dirs:
dirs.remove('EGG-INFO')
yield base,dirs,files
for bdf in walker:
yield bdf
def analyze_egg(egg_dir, stubs):
# check for existing flag in EGG-INFO
for flag,fn in safety_flags.items():
if os.path.exists(os.path.join(egg_dir,'EGG-INFO',fn)):
return flag
if not can_scan(): return False
safe = True
for base, dirs, files in walk_egg(egg_dir):
for name in files:
if name.endswith('.py') or name.endswith('.pyw'):
continue
elif name.endswith('.pyc') or name.endswith('.pyo'):
# always scan, even if we already know we're not safe
safe = scan_module(egg_dir, base, name, stubs) and safe
return safe
def write_safety_flag(egg_dir, safe):
# Write or remove zip safety flag file(s)
for flag,fn in safety_flags.items():
fn = os.path.join(egg_dir, fn)
if os.path.exists(fn):
if safe is None or bool(safe)<>flag:
os.unlink(fn)
elif safe is not None and bool(safe)==flag:
f=open(fn,'wt'); f.write('\n'); f.close()
safety_flags = {
True: 'zip-safe',
False: 'not-zip-safe',
}
def scan_module(egg_dir, base, name, stubs):
"""Check whether module possibly uses unsafe-for-zipfile stuff"""
filename = os.path.join(base,name)
if filename[:-1] in stubs:
return True # Extension module
pkg = base[len(egg_dir)+1:].replace(os.sep,'.')
module = pkg+(pkg and '.' or '')+os.path.splitext(name)[0]
f = open(filename,'rb'); f.read(8) # skip magic & date
try:
code = marshal.load(f); f.close()
except ValueError:
f.seek(0); f.read(12) # skip magic & date & file size; file size added in Python 3.3
code = marshal.load(f); f.close()
safe = True
symbols = dict.fromkeys(iter_symbols(code))
for bad in ['__file__', '__path__']:
if bad in symbols:
log.warn("%s: module references %s", module, bad)
safe = False
if 'inspect' in symbols:
for bad in [
'getsource', 'getabsfile', 'getsourcefile', 'getfile'
'getsourcelines', 'findsource', 'getcomments', 'getframeinfo',
'getinnerframes', 'getouterframes', 'stack', 'trace'
]:
if bad in symbols:
log.warn("%s: module MAY be using inspect.%s", module, bad)
safe = False
if '__name__' in symbols and '__main__' in symbols and '.' not in module:
if sys.version[:3]=="2.4": # -m works w/zipfiles in 2.5
log.warn("%s: top-level module may be 'python -m' script", module)
safe = False
return safe
def iter_symbols(code):
"""Yield names and strings used by `code` and its nested code objects"""
for name in code.co_names: yield name
for const in code.co_consts:
if isinstance(const,basestring):
yield const
elif isinstance(const,CodeType):
for name in iter_symbols(const):
yield name
def can_scan():
if sys.version_info > (3, 3):
return False # Can't scan recent formats
if not sys.platform.startswith('java') and sys.platform != 'cli':
# CPython, PyPy, etc.
return True
log.warn("Unable to analyze compiled code on this platform.")
log.warn("Please ask the author to include a 'zip_safe'"
" setting (either True or False) in the package's setup.py")
# Attribute names of options for commands that might need to be convinced to
# install to the egg build directory
INSTALL_DIRECTORY_ATTRS = [
'install_lib', 'install_dir', 'install_data', 'install_base'
]
def make_zipfile(zip_filename, base_dir, verbose=0, dry_run=0, compress=None,
mode='w'
):
"""Create a zip file from all the files under 'base_dir'. The output
zip file will be named 'base_dir' + ".zip". Uses either the "zipfile"
Python module (if available) or the InfoZIP "zip" utility (if installed
and found on the default search path). If neither tool is available,
raises DistutilsExecError. Returns the name of the output zip file.
"""
import zipfile
mkpath(os.path.dirname(zip_filename), dry_run=dry_run)
log.info("creating '%s' and adding '%s' to it", zip_filename, base_dir)
def visit(z, dirname, names):
for name in names:
path = os.path.normpath(os.path.join(dirname, name))
if os.path.isfile(path):
p = path[len(base_dir)+1:]
if not dry_run:
z.write(path, p)
log.debug("adding '%s'" % p)
if compress is None:
compress = (sys.version>="2.4") # avoid 2.3 zipimport bug when 64 bits
compression = [zipfile.ZIP_STORED, zipfile.ZIP_DEFLATED][bool(compress)]
if not dry_run:
z = zipfile.ZipFile(zip_filename, mode, compression=compression)
for dirname, dirs, files in os.walk(base_dir):
visit(z, dirname, files)
z.close()
else:
for dirname, dirs, files in os.walk(base_dir):
visit(None, dirname, files)
return zip_filename
#
| mayankcu/Django-social | venv/Lib/site-packages/distribute-0.6.28-py2.7.egg/setuptools/command/bdist_egg.py | Python | bsd-3-clause | 18,594 |
import sublime
import sublime_plugin
import json
from threading import Thread
from ..lib.ycmd_handler import server
from ..lib.utils import *
from ..lib.msgs import MsgTemplates
class CppYCMHighlightProblemsListener(sublime_plugin.EventListener):
def on_selection_modified_async(self, view):
if not is_cpp(view) or view.is_scratch():
return
# Not work in st3, output panel wouldn't call this callback
# from ..commands.highlight_problems import output_panel
# if output_panel and (view.id() == output_panel.id()):
# sublime.message_dialog('match!')
# update_statusbar(view)
def on_post_save_async(self, view):
if not is_cpp(view) or view.is_scratch():
return
# run highlight problems command
if check_highlight_on_save():
view.window().run_command('cppycm_highlight_problems')
| glymehrvrd/CppYCM | listeners/highlight_problems.py | Python | mit | 902 |
from biicode.common.model.cells import SimpleCell
from biicode.common.model.dependency_set import DependencySet
from biicode.common.model.brl.block_name import BlockName
from biicode.common.model.symbolic.block_version_table import BlockVersionTable
from biicode.common.model.blob import Blob
from biicode.common.model.resource import Resource
from biicode.common.model.content import Content
from biicode.common.model.bii_type import TEXT
from biicode.common.edition.bii_config import BiiConfig
from biicode.common.model.symbolic.block_version import BlockVersion
from biicode.common.exception import ConfigurationFileError, BiiException
BIICODE_FILE = "biicode.conf"
class BlockHolder(object):
def __init__(self, block_name, resources):
""" resources is iterable of resources or dict {CellName: Resource(Cell, Content)}
"""
assert isinstance(block_name, BlockName)
self.block_name = block_name
if isinstance(resources, dict):
# we build the dict again to ensure the keys are CellName not BlockCellName
self._resources = {r.cell_name: r for r in resources.itervalues()}
else:
self._resources = {r.cell_name: r for r in resources}
self._simple_resources = None # iterable (list) of simple resources
# configuration files
self._config = None
@property
def config(self):
if self._config is None:
try:
res = self._resources[BIICODE_FILE]
content = res.content.load.bytes
except KeyError:
content = None
try:
self._config = BiiConfig(content)
except ConfigurationFileError as e:
raise ConfigurationFileError('%s/biicode.conf: Line %s'
% (self.block_name, str(e)))
return self._config
@property
def mains(self):
return self.config.mains
@property
def tests(self):
return self.config.tests
@property
def data(self):
return self.config.data
@property
def paths(self):
return self.config.paths
@property
def dependencies(self):
return self.config.dependencies
@property
def requirements(self):
return self.config.requirements
@property
def parent(self):
if self.config.parent:
if self.config.parent.block_name != self.block_name:
raise BiiException("A block should have same BlockName as it's parent.\n"
"%s's parent is %s"
% (self.block_name, self.config.parent.block_name))
return self.config.parent
return self.block_name.init_version()
@property
def includes(self):
return self.config.includes
@requirements.setter
def requirements(self, block_version_table):
assert isinstance(block_version_table, BlockVersionTable)
self.config.requirements = block_version_table
@parent.setter
def parent(self, parent):
""" Should be called only after publish and open
"""
assert isinstance(parent, BlockVersion)
self.config.parent = parent
def commit_config(self):
'''
Returns:
None if the config file didnt change. The config file Resource in case
it was created or modified
'''
new_content = self.config.dumps()
if new_content:
name = self.block_name + BIICODE_FILE
new_res = Resource(SimpleCell(name, TEXT),
Content(name, load=Blob(new_content), created=True))
self.add_resource(new_res)
return new_res
return None
@property
def cell_names(self):
""" return CellNames
"""
return set(self._resources.keys())
@property
def block_cell_names(self):
""" return BlockCellNames
"""
return {self.block_name + name for name in self._resources}
def __getitem__(self, key):
return self._resources[key]
@property
def simple_resources(self):
''' Useful as most iterations are done over simple resources.
If a block_name is given, the method returns only it's simple resources
'''
if self._simple_resources is None:
self._simple_resources = [x for x in self._resources.itervalues()
if isinstance(x.cell, SimpleCell)]
return self._simple_resources
@property
def resources(self):
return self._resources
def add_resource(self, resource):
self._resources[resource.cell_name] = resource
self._simple_resources = None
def delete_resource(self, name):
del self._resources[name]
self._simple_resources = None
def external_targets(self):
'''return: a set(BlockCellNames) with cells not included'''
return self._filter(lambda x, y: x != y)
def internal_targets(self):
'''return the internal targets as set(BlockCellNames) of dependencies
of resources with "names" (NOT EXTERNAL) '''
return self._filter(lambda x, y: x == y)
def _filter(self, compare):
result = set()
for resource in self.simple_resources:
cell = resource.cell
for target in cell.dependencies.targets:
if compare(target.block_name, self.block_name):
result.add(target)
return result
def unresolved(self):
result = set()
for cell, _ in self.simple_resources:
result.update(cell.dependencies.unresolved)
return result
def translate_virtuals(self, block_cell_names):
'''Handles pointing to virtual targets instead contained ones'''
result = set()
for block_cell_name in block_cell_names:
assert block_cell_name.block_name == self.block_name, "%s != %s" % (block_cell_name,
self.block_name)
cell = self._resources[block_cell_name.cell_name].cell
try:
target = cell.container or cell.name
except AttributeError:
target = cell.name
result.add(target)
return result
def deps(self, files=None):
deps = DependencySet()
for name, (cell, _) in self._resources.iteritems():
if files is None or self.block_name + name in files:
if isinstance(cell, SimpleCell):
deps.update(cell.dependencies)
return deps
| zhangf911/common | edition/block_holder.py | Python | mit | 6,717 |
# -*- coding: utf-8 -*-
import codecs
import logging
import os
import chardet
import pysrt
from .video import Episode, Movie
from .utils import sanitize, sanitize_release_group
logger = logging.getLogger(__name__)
#: Subtitle extensions
SUBTITLE_EXTENSIONS = ('.srt', '.sub', '.smi', '.txt', '.ssa', '.ass', '.mpl')
class Subtitle(object):
"""Base class for subtitle.
:param language: language of the subtitle.
:type language: :class:`~babelfish.language.Language`
:param bool hearing_impaired: whether or not the subtitle is hearing impaired.
:param page_link: URL of the web page from which the subtitle can be downloaded.
:type page_link: str
:param encoding: Text encoding of the subtitle.
:type encoding: str
"""
#: Name of the provider that returns that class of subtitle
provider_name = ''
def __init__(self, language, hearing_impaired=False, page_link=None, encoding=None):
#: Language of the subtitle
self.language = language
#: Whether or not the subtitle is hearing impaired
self.hearing_impaired = hearing_impaired
#: URL of the web page from which the subtitle can be downloaded
self.page_link = page_link
#: Content as bytes
self.content = None
#: Encoding to decode with when accessing :attr:`text`
self.encoding = None
# validate the encoding
if encoding:
try:
self.encoding = codecs.lookup(encoding).name
except (TypeError, LookupError):
logger.debug('Unsupported encoding %s', encoding)
@property
def id(self):
"""Unique identifier of the subtitle"""
raise NotImplementedError
@property
def text(self):
"""Content as string
If :attr:`encoding` is None, the encoding is guessed with :meth:`guess_encoding`
"""
if not self.content:
return
if self.encoding:
return self.content.decode(self.encoding, errors='replace')
return self.content.decode(self.guess_encoding(), errors='replace')
def is_valid(self):
"""Check if a :attr:`text` is a valid SubRip format.
:return: whether or not the subtitle is valid.
:rtype: bool
"""
if not self.text:
return False
try:
pysrt.from_string(self.text, error_handling=pysrt.ERROR_RAISE)
except pysrt.Error as e:
if e.args[0] < 80:
return False
return True
def guess_encoding(self):
"""Guess encoding using the language, falling back on chardet.
:return: the guessed encoding.
:rtype: str
"""
logger.info('Guessing encoding for language %s', self.language)
# always try utf-8 first
encodings = ['utf-8']
# add language-specific encodings
if self.language.alpha3 == 'zho':
encodings.extend(['gb18030', 'big5'])
elif self.language.alpha3 == 'jpn':
encodings.append('shift-jis')
elif self.language.alpha3 == 'ara':
encodings.append('windows-1256')
elif self.language.alpha3 == 'heb':
encodings.append('windows-1255')
elif self.language.alpha3 == 'tur':
encodings.extend(['iso-8859-9', 'windows-1254'])
elif self.language.alpha3 == 'pol':
# Eastern European Group 1
encodings.extend(['windows-1250'])
elif self.language.alpha3 == 'bul':
# Eastern European Group 2
encodings.extend(['windows-1251'])
else:
# Western European (windows-1252)
encodings.append('latin-1')
# try to decode
logger.debug('Trying encodings %r', encodings)
for encoding in encodings:
try:
self.content.decode(encoding)
except UnicodeDecodeError:
pass
else:
logger.info('Guessed encoding %s', encoding)
return encoding
logger.warning('Could not guess encoding from language')
# fallback on chardet
encoding = chardet.detect(self.content)['encoding']
logger.info('Chardet found encoding %s', encoding)
return encoding
def get_matches(self, video):
"""Get the matches against the `video`.
:param video: the video to get the matches with.
:type video: :class:`~subliminal.video.Video`
:return: matches of the subtitle.
:rtype: set
"""
raise NotImplementedError
def __hash__(self):
return hash(self.provider_name + '-' + self.id)
def __repr__(self):
return '<%s %r [%s]>' % (self.__class__.__name__, self.id, self.language)
def get_subtitle_path(video_path, language=None, extension='.srt'):
"""Get the subtitle path using the `video_path` and `language`.
:param str video_path: path to the video.
:param language: language of the subtitle to put in the path.
:type language: :class:`~babelfish.language.Language`
:param str extension: extension of the subtitle.
:return: path of the subtitle.
:rtype: str
"""
subtitle_root = os.path.splitext(video_path)[0]
if language:
subtitle_root += '.' + str(language)
return subtitle_root + extension
def guess_matches(video, guess, partial=False):
"""Get matches between a `video` and a `guess`.
If a guess is `partial`, the absence information won't be counted as a match.
:param video: the video.
:type video: :class:`~subliminal.video.Video`
:param guess: the guess.
:type guess: dict
:param bool partial: whether or not the guess is partial.
:return: matches between the `video` and the `guess`.
:rtype: set
"""
matches = set()
if isinstance(video, Episode):
# series
if video.series and 'title' in guess and sanitize(guess['title']) == sanitize(video.series):
matches.add('series')
# title
if video.title and 'episode_title' in guess and sanitize(guess['episode_title']) == sanitize(video.title):
matches.add('title')
# season
if video.season and 'season' in guess and guess['season'] == video.season:
matches.add('season')
# episode
if video.episode and 'episode' in guess and guess['episode'] == video.episode:
matches.add('episode')
# year
if video.year and 'year' in guess and guess['year'] == video.year:
matches.add('year')
# count "no year" as an information
if not partial and video.original_series and 'year' not in guess:
matches.add('year')
elif isinstance(video, Movie):
# year
if video.year and 'year' in guess and guess['year'] == video.year:
matches.add('year')
# title
if video.title and 'title' in guess and sanitize(guess['title']) == sanitize(video.title):
matches.add('title')
# release_group
if (video.release_group and 'release_group' in guess and
sanitize_release_group(guess['release_group']) == sanitize_release_group(video.release_group)):
matches.add('release_group')
# resolution
if video.resolution and 'screen_size' in guess and guess['screen_size'] == video.resolution:
matches.add('resolution')
# format
if video.format and 'format' in guess and guess['format'].lower() == video.format.lower():
matches.add('format')
# video_codec
if video.video_codec and 'video_codec' in guess and guess['video_codec'] == video.video_codec:
matches.add('video_codec')
# audio_codec
if video.audio_codec and 'audio_codec' in guess and guess['audio_codec'] == video.audio_codec:
matches.add('audio_codec')
return matches
def fix_line_ending(content):
"""Fix line ending of `content` by changing it to \n.
:param bytes content: content of the subtitle.
:return: the content with fixed line endings.
:rtype: bytes
"""
return content.replace(b'\r\n', b'\n').replace(b'\r', b'\n')
| neo1691/subliminal | subliminal/subtitle.py | Python | mit | 8,191 |
"""
tnetstring: data serialization using typed netstrings
======================================================
This is a custom Python 3 implementation of tnetstrings.
Compared to other implementations, the main difference
is that this implementation supports a custom unicode datatype.
An ordinary tnetstring is a blob of data prefixed with its length and postfixed
with its type. Here are some examples:
>>> tnetstring.dumps("hello world")
11:hello world,
>>> tnetstring.dumps(12345)
5:12345#
>>> tnetstring.dumps([12345, True, 0])
19:5:12345#4:true!1:0#]
This module gives you the following functions:
:dump: dump an object as a tnetstring to a file
:dumps: dump an object as a tnetstring to a string
:load: load a tnetstring-encoded object from a file
:loads: load a tnetstring-encoded object from a string
Note that since parsing a tnetstring requires reading all the data into memory
at once, there's no efficiency gain from using the file-based versions of these
functions. They're only here so you can use load() to read precisely one
item from a file or socket without consuming any extra data.
The tnetstrings specification explicitly states that strings are binary blobs
and forbids the use of unicode at the protocol level.
**This implementation decodes dictionary keys as surrogate-escaped ASCII**,
all other strings are returned as plain bytes.
:Copyright: (c) 2012-2013 by Ryan Kelly <ryan@rfk.id.au>.
:Copyright: (c) 2014 by Carlo Pires <carlopires@gmail.com>.
:Copyright: (c) 2016 by Maximilian Hils <tnetstring3@maximilianhils.com>.
:License: MIT
"""
import collections
import six
from typing import io, Union, Tuple # noqa
TSerializable = Union[None, bool, int, float, bytes, list, tuple, dict]
def dumps(value):
# type: (TSerializable) -> bytes
"""
This function dumps a python object as a tnetstring.
"""
# This uses a deque to collect output fragments in reverse order,
# then joins them together at the end. It's measurably faster
# than creating all the intermediate strings.
q = collections.deque()
_rdumpq(q, 0, value)
return b''.join(q)
def dump(value, file_handle):
# type: (TSerializable, io.BinaryIO) -> None
"""
This function dumps a python object as a tnetstring and
writes it to the given file.
"""
file_handle.write(dumps(value))
def _rdumpq(q, size, value):
# type: (collections.deque, int, TSerializable) -> int
"""
Dump value as a tnetstring, to a deque instance, last chunks first.
This function generates the tnetstring representation of the given value,
pushing chunks of the output onto the given deque instance. It pushes
the last chunk first, then recursively generates more chunks.
When passed in the current size of the string in the queue, it will return
the new size of the string in the queue.
Operating last-chunk-first makes it easy to calculate the size written
for recursive structures without having to build their representation as
a string. This is measurably faster than generating the intermediate
strings, especially on deeply nested structures.
"""
write = q.appendleft
if value is None:
write(b'0:~')
return size + 3
elif value is True:
write(b'4:true!')
return size + 7
elif value is False:
write(b'5:false!')
return size + 8
elif isinstance(value, six.integer_types):
data = str(value).encode()
ldata = len(data)
span = str(ldata).encode()
write(b'%s:%s#' % (span, data))
return size + 2 + len(span) + ldata
elif isinstance(value, float):
# Use repr() for float rather than str().
# It round-trips more accurately.
# Probably unnecessary in later python versions that
# use David Gay's ftoa routines.
data = repr(value).encode()
ldata = len(data)
span = str(ldata).encode()
write(b'%s:%s^' % (span, data))
return size + 2 + len(span) + ldata
elif isinstance(value, bytes):
data = value
ldata = len(data)
span = str(ldata).encode()
write(b',')
write(data)
write(b':')
write(span)
return size + 2 + len(span) + ldata
elif isinstance(value, six.text_type):
data = value.encode("utf8")
ldata = len(data)
span = str(ldata).encode()
write(b';')
write(data)
write(b':')
write(span)
return size + 2 + len(span) + ldata
elif isinstance(value, (list, tuple)):
write(b']')
init_size = size = size + 1
for item in reversed(value):
size = _rdumpq(q, size, item)
span = str(size - init_size).encode()
write(b':')
write(span)
return size + 1 + len(span)
elif isinstance(value, dict):
write(b'}')
init_size = size = size + 1
for (k, v) in value.items():
size = _rdumpq(q, size, v)
size = _rdumpq(q, size, k)
span = str(size - init_size).encode()
write(b':')
write(span)
return size + 1 + len(span)
else:
raise ValueError("unserializable object: {} ({})".format(value, type(value)))
def loads(string):
# type: (bytes) -> TSerializable
"""
This function parses a tnetstring into a python object.
"""
return pop(string)[0]
def load(file_handle):
# type: (io.BinaryIO) -> TSerializable
"""load(file) -> object
This function reads a tnetstring from a file and parses it into a
python object. The file must support the read() method, and this
function promises not to read more data than necessary.
"""
# Read the length prefix one char at a time.
# Note that the netstring spec explicitly forbids padding zeros.
c = file_handle.read(1)
data_length = b""
while c.isdigit():
data_length += c
if len(data_length) > 9:
raise ValueError("not a tnetstring: absurdly large length prefix")
c = file_handle.read(1)
if c != b":":
raise ValueError("not a tnetstring: missing or invalid length prefix")
data = file_handle.read(int(data_length))
data_type = file_handle.read(1)[0]
return parse(data_type, data)
def parse(data_type, data):
# type: (int, bytes) -> TSerializable
if six.PY2:
data_type = ord(data_type)
if data_type == ord(b','):
return data
if data_type == ord(b';'):
return data.decode("utf8")
if data_type == ord(b'#'):
try:
if six.PY2:
return long(data)
return int(data)
except ValueError:
raise ValueError("not a tnetstring: invalid integer literal: {}".format(data))
if data_type == ord(b'^'):
try:
return float(data)
except ValueError:
raise ValueError("not a tnetstring: invalid float literal: {}".format(data))
if data_type == ord(b'!'):
if data == b'true':
return True
elif data == b'false':
return False
else:
raise ValueError("not a tnetstring: invalid boolean literal: {}".format(data))
if data_type == ord(b'~'):
if data:
raise ValueError("not a tnetstring: invalid null literal")
return None
if data_type == ord(b']'):
l = []
while data:
item, data = pop(data)
l.append(item)
return l
if data_type == ord(b'}'):
d = {}
while data:
key, data = pop(data)
val, data = pop(data)
d[key] = val
return d
raise ValueError("unknown type tag: {}".format(data_type))
def pop(data):
# type: (bytes) -> Tuple[TSerializable, bytes]
"""
This function parses a tnetstring into a python object.
It returns a tuple giving the parsed object and a string
containing any unparsed data from the end of the string.
"""
# Parse out data length, type and remaining string.
try:
length, data = data.split(b':', 1)
length = int(length)
except ValueError:
raise ValueError("not a tnetstring: missing or invalid length prefix: {}".format(data))
try:
data, data_type, remain = data[:length], data[length], data[length + 1:]
except IndexError:
# This fires if len(data) < dlen, meaning we don't need
# to further validate that data is the right length.
raise ValueError("not a tnetstring: invalid length prefix: {}".format(length))
# Parse the data based on the type tag.
return parse(data_type, data), remain
__all__ = ["dump", "dumps", "load", "loads", "pop"]
| dufferzafar/mitmproxy | mitmproxy/contrib/tnetstring.py | Python | mit | 8,799 |
import os
import sys
import time
import types
ltime = 1 and time.localtime()
logSuffix = '%02d%02d%02d_%02d%02d%02d' % (ltime[0] - 2000, ltime[1], ltime[2],
ltime[3], ltime[4], ltime[5])
logfile = 'toontownD-' + logSuffix + '.log'
class LogAndOutput:
def __init__(self, orig, log):
self.orig = orig
self.log = log
def write(self, str):
self.log.write(str)
self.log.flush()
self.orig.write(str)
self.orig.flush()
def flush(self):
self.log.flush()
self.orig.flush()
log = open(logfile, 'a')
logOut = LogAndOutput(sys.__stdout__, log)
logErr = LogAndOutput(sys.__stderr__, log)
sys.stdout = logOut
sys.stderr = logErr
print('\n\nStarting Toontown...')
if 1:
print 'Current time: ' + time.asctime(time.localtime(time.time())) + ' ' + time.tzname[0]
print 'sys.path = ', sys.path
print 'sys.argv = ', sys.argv
from otp.launcher.LauncherBase import LauncherBase
from otp.otpbase import OTPLauncherGlobals
from panda3d.core import *
from toontown.toonbase import TTLocalizer
class ToontownLauncher(LauncherBase):
GameName = 'Toontown'
LauncherPhases = [3, 3.5, 4, 5, 5.5, 6, 7, 8, 9, 10, 11, 12, 13]
TmpOverallMap = [0.25, 0.15, 0.12, 0.17, 0.08, 0.07, 0.05, 0.05, 0.017,
0.011, 0.01, 0.012, 0.01]
RegistryKey = 'Software\\Disney\\Disney Online\\Toontown'
ForegroundSleepTime = 0.01
Localizer = TTLocalizer
VerifyFiles = 1
DecompressMultifiles = True
def __init__(self):
if sys.argv[2] == 'Phase2.py':
sys.argv = sys.argv[:1] + sys.argv[3:]
if len(sys.argv) == 5 or len(sys.argv) == 6:
self.gameServer = sys.argv[2]
self.accountServer = sys.argv[3]
self.testServerFlag = int(sys.argv[4])
else:
print 'Error: Launcher: incorrect number of parameters'
sys.exit()
self.toontownBlueKey = 'TOONTOWN_BLUE'
self.toontownPlayTokenKey = 'TTI_PLAYCOOKIE'
self.launcherMessageKey = 'LAUNCHER_MESSAGE'
self.game1DoneKey = 'GAME1_DONE'
self.game2DoneKey = 'GAME2_DONE'
self.tutorialCompleteKey = 'TUTORIAL_DONE'
self.toontownRegistryKey = 'Software\\Disney\\Disney Online\\Toontown'
if self.testServerFlag:
self.toontownRegistryKey = '%s%s' % (self.toontownRegistryKey, 'Test')
self.toontownRegistryKey = '%s%s' % (self.toontownRegistryKey, self.getProductName())
LauncherBase.__init__(self)
self.webAcctParams = 'WEB_ACCT_PARAMS'
self.parseWebAcctParams()
self.mainLoop()
def getValue(self, key, default=None):
try:
return self.getRegistry(key, default)
except:
return self.getRegistry(key)
def setValue(self, key, value):
self.setRegistry(key, value)
def getVerifyFiles(self):
return 1
def getTestServerFlag(self):
return self.testServerFlag
def getGameServer(self):
return self.gameServer
def getLogFileName(self):
return 'toontown'
def parseWebAcctParams(self):
s = config.GetString('fake-web-acct-params', '')
if not s:
s = self.getRegistry(self.webAcctParams)
self.setRegistry(self.webAcctParams, '')
l = s.split('&')
length = len(l)
dict = {}
for index in xrange(0, len(l)):
args = l[index].split('=')
if len(args) == 3:
[name, value] = args[-2:]
dict[name] = int(value)
elif len(args) == 2:
[name, value] = args
dict[name] = int(value)
self.secretNeedsParentPasswordKey = 1
if 'secretsNeedsParentPassword' in dict:
self.secretNeedsParentPasswordKey = dict['secretsNeedsParentPassword']
else:
self.notify.warning('no secretNeedsParentPassword token in webAcctParams')
self.notify.info('secretNeedsParentPassword = %d' % self.secretNeedsParentPasswordKey)
self.chatEligibleKey = 0
if 'chatEligible' in dict:
self.chatEligibleKey = dict['chatEligible']
else:
self.notify.warning('no chatEligible token in webAcctParams')
self.notify.info('chatEligibleKey = %d' % self.chatEligibleKey)
def getBlue(self):
blue = self.getValue(self.toontownBlueKey)
self.setValue(self.toontownBlueKey, '')
if blue == 'NO BLUE':
blue = None
return blue
def getPlayToken(self):
playToken = self.getValue(self.toontownPlayTokenKey)
self.setValue(self.toontownPlayTokenKey, '')
if playToken == 'NO PLAYTOKEN':
playToken = None
return playToken
def setRegistry(self, name, value):
if not self.WIN32:
return
t = type(value)
if t == types.IntType:
WindowsRegistry.setIntValue(self.toontownRegistryKey, name, value)
elif t == types.StringType:
WindowsRegistry.setStringValue(self.toontownRegistryKey, name, value)
else:
self.notify.warning('setRegistry: Invalid type for registry value: ' + `value`)
def getRegistry(self, name, missingValue=None):
self.notify.info('getRegistry%s' % ((name, missingValue),))
if not self.WIN32:
if missingValue == None:
missingValue = ''
value = os.environ.get(name, missingValue)
try:
value = int(value)
except: pass
return value
t = WindowsRegistry.getKeyType(self.toontownRegistryKey, name)
if t == WindowsRegistry.TInt:
if missingValue == None:
missingValue = 0
return WindowsRegistry.getIntValue(self.toontownRegistryKey,
name, missingValue)
elif t == WindowsRegistry.TString:
if missingValue == None:
missingValue = ''
return WindowsRegistry.getStringValue(self.toontownRegistryKey,
name, missingValue)
else:
return missingValue
def getCDDownloadPath(self, origPath, serverFilePath):
return '%s/%s%s/CD_%d/%s' % (origPath, self.ServerVersion, self.ServerVersionSuffix, self.fromCD, serverFilePath)
def getDownloadPath(self, origPath, serverFilePath):
return '%s/%s%s/%s' % (origPath, self.ServerVersion, self.ServerVersionSuffix, serverFilePath)
def getPercentPatchComplete(self, bytesWritten):
if self.totalPatchDownload:
return LauncherBase.getPercentPatchComplete(self, bytesWritten)
else:
return 0
def hashIsValid(self, serverHash, hashStr):
return serverHash.setFromDec(hashStr) or serverHash.setFromHex(hashStr)
def launcherMessage(self, msg):
LauncherBase.launcherMessage(self, msg)
self.setRegistry(self.launcherMessageKey, msg)
def getAccountServer(self):
return self.accountServer
def setTutorialComplete(self):
self.setRegistry(self.tutorialCompleteKey, 0)
def getTutorialComplete(self):
return self.getRegistry(self.tutorialCompleteKey, 0)
def getGame2Done(self):
return self.getRegistry(self.game2DoneKey, 0)
def setPandaErrorCode(self, code):
self.pandaErrorCode = code
if self.WIN32:
self.notify.info('setting panda error code to %s' % code)
exitCode2exitPage = {
OTPLauncherGlobals.ExitEnableChat: 'chat',
OTPLauncherGlobals.ExitSetParentPassword: 'setparentpassword',
OTPLauncherGlobals.ExitPurchase: 'purchase'}
if code in exitCode2exitPage:
self.setRegistry('EXIT_PAGE', exitCode2exitPage[code])
self.setRegistry(self.PandaErrorCodeKey, 0)
else:
self.setRegistry(self.PandaErrorCodeKey, code)
else:
LauncherBase.setPandaErrorCode(self, code)
def getNeedPwForSecretKey(self):
return self.secretNeedsParentPasswordKey
def getParentPasswordSet(self):
return self.chatEligibleKey
def MakeNTFSFilesGlobalWriteable(self, pathToSet=None):
if not self.WIN32:
return
LauncherBase.MakeNTFSFilesGlobalWriteable(self, pathToSet)
def startGame(self):
try:
os.remove('Phase3.py')
except: pass
import Phase3
self.newTaskManager()
from direct.showbase.EventManagerGlobal import eventMgr
eventMgr.restart()
from toontown.toonbase import ToontownStart
| Spiderlover/Toontown | toontown/launcher/ToontownLauncher.py | Python | mit | 8,767 |
from RockStar import RockStar
facefuck_code = (":) :) :) :) :) :) :) :) :) :)\n=(\n:> :) :) :) :) :) :) :) :> :) :) :) :) :) :) :) :) :) :) :> :) :) :) :> :) :< :< :< :< :(\n"
"=)\n:> :) :) :P\n:> :) :P\n:) :) :) :) :) :) :) :P\n"
":P\n:) :) :) :P\n:> :) :) :P\n:< :< :) :) :) :) :) :) :) :) :) :) :) :) :) :) :) :P\n:> :P\n:) :) :) :P\n:( :( :( :( :( :( :P\n"
":( :( :( :( :( :( :( :( :P\n:> :) :P\n:> :P\n")
rock_it_bro = RockStar(days=400, file_name='helloWorld.ff', code=facefuck_code)
rock_it_bro.make_me_a_rockstar()
| johniek/meteor-rock | examples/facefuck_rockstar.py | Python | mit | 533 |
import time
import json
import datetime
import urllib
from django.test import TestCase, Client
from django.core.urlresolvers import reverse
from django_dynamic_fixture import G
from django.utils.dateformat import format
from .utils import datetime_to_timestamp, timestamp_to_datetime
from .models import Message, Channel
def create_message(text, timestamp, username, channel):
"""
Creates a message with the given text, datetime,
username, channel and with typing set to True.
"""
return Message.objects.create(
text=text,
datetime_start=timestamp_to_datetime(timestamp),
username=username,
typing=True,
channel=channel
)
class ChatClient(Client):
def delete(self, url, qstring):
return Client().delete(
url,
qstring,
content_type='application/x-www-form-urlencoded'
)
def patch(slef, url, qstring):
return Client().patch(
url,
qstring,
content_type='application/x-www-form-urlencoded'
)
class ChatTests(TestCase):
def setUp(self):
super(ChatTests, self).setUp()
self.channel = G(Channel, name='Channel')
class MessageViewPOSTTests(ChatTests):
def post_and_get_response(self, text, timestamp, username, typing):
"""
Posts a message on chat:message and returns the response
"""
return self.client.post(
reverse('chat:message', args=(self.channel.name,)),
{'text': text, 'username': username, 'datetime_start': timestamp, 'typing': typing}
)
def test_post_valid_message(self):
"""
When a valid message is sent, the view should
save the message in the database and return
the id of the message.
"""
timestamp = 10 ** 11
username = 'vitsalisa'
text = 'Message'
response = self.post_and_get_response(
text=text,
timestamp=timestamp,
username=username,
typing=True
)
messages = Message.objects.filter(username=username)
self.assertTrue(messages.exists())
self.assertEquals(len(messages), 1)
self.assertEqual(response.status_code, 200)
message = Message.objects.get(username=username);
self.assertEqual(int(response.content), message.id);
self.assertEqual(message.username, username);
self.assertTrue(message.typing)
self.assertEqual(message.text, text)
self.assertEqual(datetime_to_timestamp(message.datetime_start), timestamp)
def test_post_message_without_datetime_start(self):
"""
When a message is sent without a datetime_start the view
should produce an appropriate error and a 400(Bad Request)
status code. The message should not be saved.
"""
post_dict = {'text': 'Message', 'username': 'vitsalis', 'typing': True}
response = self.client.post(
reverse('chat:message', args=(self.channel.name,)),
post_dict
)
self.assertFalse(Message.objects.filter(username='vitsalis').exists())
self.assertEqual(response.status_code, 400)
def test_post_message_without_username(self):
"""
When a message is sent without a username the view
should produce an appropriate error and a 400(Bad Request)
status code. The message should not be saved.
"""
timestamp = 10 ** 11
post_dict = {'text': 'Message', 'datetime_start': timestamp, 'typing': True}
response = self.client.post(
reverse('chat:message', args=(self.channel.name,)),
post_dict
)
datetime_start_field = timestamp_to_datetime(timestamp)
self.assertFalse(Message.objects.filter(datetime_start=datetime_start_field).exists())
self.assertEqual(response.status_code, 400)
def test_post_message_with_invalid_channel_name(self):
"""
When a message is sent with an invalid channel name
the view should produce an appropriate error and a
404(Not Found) status code. The message should not be saved.
"""
timestamp = 10 ** 11
response = self.client.post(
reverse('chat:message', args=('invalid_channel',)),
{'text': 'Message', 'username': 'vitsalis', 'datetime_start': timestamp, 'typing': True}
)
self.assertFalse(Message.objects.filter(username='vitsalis').exists())
self.assertEqual(response.status_code, 404)
def test_post_message_without_text(self):
"""
When a message is sent without a channel_id the view
should produce an appropriate error and a 400(Bad Request)
status code. The message should not be saved.
"""
timestamp = 10 ** 11
post_dict = {'username': 'vitsalis', 'datetime_start': timestamp, 'typing': True}
response = self.client.post(
reverse('chat:message', args=(self.channel.name,)),
post_dict
)
self.assertFalse(Message.objects.filter(username='vitsalis').exists())
self.assertEqual(response.status_code, 400)
def test_post_message_with_invalid_datetime_start(self):
"""
When a message is sent with an invalid datetime the view
should produce an appropriate error and a 400(Bad Request)
status code. The message should not be saved.
"""
response = self.post_and_get_response(
text='Message',
timestamp='wtf',
username='vitsalis',
typing=True
)
self.assertFalse(Message.objects.filter(username='vitsalis').exists())
self.assertEqual(response.status_code, 400)
def test_post_message_with_future_datetime_start(self):
"""
When a message is sent with a future datetime the view
should change the datetime to the current one and save the message.
"""
timestamp = int(format(datetime.datetime.utcnow() + datetime.timedelta(days=1), 'U')) * 1000
response = self.post_and_get_response(
text='Message',
timestamp=timestamp,
username='vitsalis',
typing=True
)
messages = Message.objects.filter(username='vitsalis')
self.assertTrue(messages.exists())
self.assertEqual(len(messages), 1)
self.assertTrue(datetime_to_timestamp(messages[0].datetime_start) < timestamp)
self.assertEqual(response.status_code, 200)
self.assertEqual(int(response.content), messages[0].id)
def test_post_message_with_typing_false(self):
"""
When typing is False the view should save the message
and make its datetime_sent equal to datetime_start.
"""
timestamp = 10 ** 11
response = self.post_and_get_response(
text='Message',
timestamp=timestamp,
username='vitsalis',
typing=False
)
messages = Message.objects.filter(username='vitsalis')
self.assertTrue(messages.exists())
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0].datetime_sent, messages[0].datetime_start)
class MessageViewGETTests(ChatTests):
def test_request_messages(self):
"""
When a valid request is sent the view should return
a JSON object containing messages. Each message should be
in the form {text: ...,username: ..., datetime: ...}.
The messages should be in chronological order(more recent first).
The number of objects is specified by the lim argument.
"""
lim = 2
timestamp = 10 ** 11
message1 = Message.objects.create(
text='Message1',
datetime_start=timestamp_to_datetime(timestamp),
datetime_sent=timestamp_to_datetime(timestamp + 10),
username='vitsalis',
typing=True,
channel=self.channel
)
message2 = Message.objects.create(
text='Message2',
datetime_start=timestamp_to_datetime(timestamp + 60 * 60),
datetime_sent=timestamp_to_datetime(timestamp + 60 * 60 + 10),
username='pkakelas',
typing=True,
channel=self.channel
)
response = self.client.get(
reverse('chat:message', args=(self.channel.name,)),
{'lim': lim}
)
messages = json.loads(response.content)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(messages), 2)
# The order is reverse chronological
self.assertEqual(messages[0]['text'], message2.text)
self.assertEqual(messages[0]['username'], message2.username)
self.assertEqual(messages[0]['datetime_start'], datetime_to_timestamp(message2.datetime_start))
self.assertTrue(messages[0]['typing'])
self.assertEqual(messages[0]['id'], message2.id)
self.assertEqual(messages[0]['datetime_sent'], datetime_to_timestamp(message2.datetime_sent))
self.assertEqual(messages[1]['text'], message1.text)
self.assertEqual(messages[1]['username'], message1.username)
self.assertEqual(messages[1]['datetime_start'], datetime_to_timestamp(message1.datetime_start))
self.assertTrue(messages[1]['typing'])
self.assertEqual(messages[1]['id'], message1.id)
self.assertEqual(messages[1]['datetime_sent'], datetime_to_timestamp(message1.datetime_sent))
def test_request_messages_with_bigger_limit_than_messages(self):
"""
When the lim is bigger than the number of the messages
on the database for the channel, the server should return
all the messages for the channel.
"""
lim = 100
timestamp = 10 ** 11
create_message(
text='Message1',
timestamp=timestamp,
username='vitsalis',
channel=self.channel
)
create_message(
text='Message2',
timestamp=timestamp + 60 * 60,
username='pkakelas',
channel=self.channel
)
messages = json.loads(self.client.get(
reverse('chat:message', args=(self.channel.name,)),
{'lim': lim}
).content)
self.assertEqual(len(messages), 2)
def test_request_messages_with_smaller_limit_than_messages(self):
"""
When the lim is smaller than the number of the messages
on the database for the channel, the server should return
no more than <lim> messages.
"""
lim = 2
timestamp = 10 ** 11
for i in range(100):
create_message(
text='Message' + str(i),
timestamp=timestamp + i,
username='vitsalis',
channel=self.channel
)
messages = json.loads(self.client.get(
reverse('chat:message', args=(self.channel.name,)),
{'lim': lim}
).content)
self.assertEqual(len(messages), 2)
self.assertEqual(messages[0]['text'], 'Message99')
self.assertEqual(messages[1]['text'], 'Message98')
def test_request_messages_without_lim(self):
"""
When the lim is not specified the view should return
100 messages(or less if there are less than 100 messages).
"""
timestamp = 10 ** 11
for i in range(200):
create_message(
text='Message' + str(i),
timestamp=timestamp + i,
username='vitsalis',
channel=self.channel
)
messages = json.loads(self.client.get(
reverse('chat:message', args=(self.channel.name,)),
).content)
self.assertEqual(len(messages), 100)
def test_request_messages_from_one_channel(self):
"""
The view should return the messages from the
channel specified.
"""
channel1 = G(Channel, name='Channel1')
channel2 = G(Channel, name='Channel2')
timestamp = 10 ** 11
message1 = create_message(
text='Message1',
timestamp=timestamp,
username='vitsalis',
channel=channel1
)
create_message(
text='Message2',
timestamp=timestamp,
username='vitsalis',
channel=channel2
)
messages = json.loads(self.client.get(
reverse('chat:message', args=(channel1.name,)),
).content)
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0]['text'], message1.text)
def test_request_messages_with_invalid_channel_name(self):
"""
When the channel with the name <channel_name>
does not exist, a 404(Not Found) response code
should be returned from the view.
"""
timestamp = 10 ** 11
create_message(
text='Message1',
timestamp=timestamp,
username='vitsalis',
channel=self.channel
)
response = self.client.get(
reverse('chat:message', args=('invalid_name',)),
)
self.assertEqual(response.status_code, 404)
class MessageViewPATCHTests(ChatTests):
client_class = ChatClient
def patch_and_get_response(self, messageid, text, timestamp, typing):
"""
Patches a message on chat:message and returns the response
"""
qstring = urllib.urlencode({
'id': messageid,
'text': text,
'datetime_sent': timestamp,
'typing': typing
})
return self.client.patch(
reverse('chat:message', args=(self.channel.name,)),
qstring
)
def test_patch_message(self):
"""
The view should update the message according to the
data provided and respond with a 204(No Content) code.
"""
timestamp = 10 ** 11
message = create_message(
text='Message',
username='vitsalis',
channel=self.channel,
timestamp=timestamp
)
response = self.patch_and_get_response(
messageid=message.id,
text='Message Updated',
timestamp=timestamp + 10,
typing=False
)
messages = Message.objects.filter(username='vitsalis')
self.assertTrue(messages.exists())
self.assertEqual(len(messages), 1)
self.assertEqual(response.status_code, 204)
self.assertEqual(messages[0].text, 'Message Updated')
self.assertEqual(datetime_to_timestamp(messages[0].datetime_start), timestamp)
self.assertEqual(datetime_to_timestamp(messages[0].datetime_sent), timestamp + 10)
self.assertEqual(messages[0].username, 'vitsalis')
self.assertFalse(messages[0].typing)
def test_patch_message_second_time(self):
"""
The view should not update a message that has been
made persistent. Instead it should respond with a
400(Bad Request) code.
"""
timestamp = 10 ** 11
message = create_message(
text='Message',
username='vitsalis',
channel=self.channel,
timestamp=timestamp
)
self.patch_and_get_response(
messageid=message.id,
text='Message Updated',
timestamp=timestamp + 10,
typing=False
)
response = self.patch_and_get_response(
messageid=message.id,
text='Message Updated Again',
timestamp=timestamp + 100,
typing=False
)
messages = Message.objects.filter(username='vitsalis')
self.assertTrue(messages.exists())
self.assertEqual(messages[0].text, 'Message Updated')
self.assertEqual(response.status_code, 400)
def test_patch_message_with_datetime_sent_before_datetime_start(self):
"""
When the datetime_sent is before datetime_start the view
should make the datetime_sent equal to the datetime_sent,
save the message and respond with a 204(No Content) code.
"""
timestamp = 10 ** 11
message = create_message(
text='Message',
username='vitsalis',
channel=self.channel,
timestamp=timestamp
)
response = self.patch_and_get_response(
messageid=message.id,
text='Message Updated',
timestamp=timestamp - 1,
typing=False
)
dbmessage = Message.objects.get(pk=message.id)
self.assertEqual(response.status_code, 204)
self.assertEqual(dbmessage.text, 'Message Updated')
self.assertTrue(hasattr(dbmessage, 'datetime_sent'))
self.assertEqual(dbmessage.datetime_sent, message.datetime_start)
self.assertEqual(dbmessage.datetime_sent, dbmessage.datetime_start)
self.assertEqual(datetime_to_timestamp(dbmessage.datetime_start), timestamp)
self.assertFalse(dbmessage.typing)
def test_patch_message_without_id(self):
"""
When the id is not specified the view should
not patch the message and respond with a
400(Bad Request) code.
"""
timestamp = 10 ** 11
message = create_message(
text='Message',
username='vitsalis',
channel=self.channel,
timestamp=timestamp
)
qstring = urllib.urlencode({
'text': 'Message Updated',
'datetime_sent': timestamp + 10,
'typing': False
})
response = self.client.patch(
reverse('chat:message', args=(self.channel.name,)),
qstring
)
dbmessage = Message.objects.get(pk=message.id)
self.assertEqual(response.status_code, 400)
self.assertEqual(dbmessage.text, message.text)
self.assertIsNone(dbmessage.datetime_sent)
def test_patch_message_without_text(self):
"""
When the text is not specified the view
should not patch the message and respond with a
400(Bad Request) code.
"""
timestamp = 10 ** 11
message = create_message(
text='Message',
username='vitsalis',
channel=self.channel,
timestamp=timestamp
)
qstring = urllib.urlencode({
'id': message.id,
'datetime_sent': timestamp + 10,
'typing': False
})
response = self.client.patch(
reverse('chat:message', args=(self.channel.name,)),
qstring
)
dbmessage = Message.objects.get(pk=message.id)
self.assertEqual(response.status_code, 400)
self.assertEqual(dbmessage.text, message.text)
self.assertIsNone(dbmessage.datetime_sent)
def test_patch_message_without_datetime_sent(self):
"""
When the datetime_sent is not specified the view
should not patch the message and respond with a
400(Bad Request) code.
"""
timestamp = 10 ** 11
message = create_message(
text='Message',
username='vitsalis',
channel=self.channel,
timestamp=timestamp
)
qstring = urllib.urlencode({
'id': message.id,
'text': 'Message Updated',
'typing': False
})
response = self.client.patch(
reverse('chat:message', args=(self.channel.name,)),
qstring
)
dbmessage = Message.objects.get(pk=message.id)
self.assertEqual(response.status_code, 400)
self.assertEqual(dbmessage.text, message.text)
self.assertIsNone(dbmessage.datetime_sent)
class MessageViewDELETETests(ChatTests):
client_class = ChatClient
def test_delete_message(self):
"""
The view should delete the message with the
specified id and respond with a 204(No Content)
code.
"""
timestamp = 10 ** 11
message = create_message(
text='Message',
username='vitsalis',
channel=self.channel,
timestamp=timestamp
)
qstring = urllib.urlencode({
'id': message.id
})
response = self.client.delete(
reverse('chat:message', args=(self.channel.name,)),
qstring
)
messages = Message.objects.filter(username='vitsalis')
self.assertEqual(response.status_code, 204)
self.assertEqual(len(messages), 0)
def test_delete_message_without_id(self):
"""
When the id is not specified the view should
return a 400(Bad Request) code.
"""
qstring = urllib.urlencode({})
response = self.client.delete(
reverse('chat:message', args=(self.channel.name,)),
qstring
)
self.assertEqual(response.status_code, 400)
def test_delete_message_that_does_not_exist(self):
"""
When a message with the specified id doesn't exist
the view should respond with a 404(Not Found) code.
"""
timestamp = 10 ** 11
message = create_message(
text='Message',
username='vitsalis',
channel=self.channel,
timestamp=timestamp
)
qstring = urllib.urlencode({
'id': message.id + 1
})
response = self.client.delete(
reverse('chat:message', args=(self.channel.name,)),
qstring
)
self.assertEqual(response.status_code, 404)
messages = Message.objects.filter(username='vitsalis')
self.assertEqual(len(messages), 1)
class ChannelViewPOSTTests(ChatTests):
def test_create_valid_channel(self):
"""
When a channel is created the view should
respond with a 204(No Content) code and save the channel
in the database.
"""
response = self.client.post(
reverse('chat:channel'),
{'name': 'New_Channel'}
)
self.assertTrue(Channel.objects.filter(name='New_Channel').exists())
self.assertEqual(Channel.objects.filter(name='New_Channel').count(), 1)
self.assertEqual(response.status_code, 204)
class ChannelViewGETTests(ChatTests):
def test_request_valid_channel(self):
"""
When a channel with a name that exists in
the database is requested, the view should return
a JSON object containing the name of the channel
and a 200(OK) status code.
"""
response = self.client.get(
reverse('chat:channel'),
{'name': self.channel.name}
)
returned_channel = json.loads(response.content)
self.assertEqual(response.status_code, 200)
self.assertEqual(returned_channel['name'], self.channel.name)
def test_request_channel_that_does_not_exist(self):
"""
When a channel that does not exist is requested
the view should return a 404(Not Found) status code.
"""
response = self.client.get(
reverse('chat:channel'),
{'name': 'invalid_channel'}
)
self.assertEqual(response.status_code, 404)
class MessageModelTests(ChatTests):
def test_message_create(self):
"""
A message must be saved correctly in the database.
"""
message = create_message(
text='Message',
timestamp=10 ** 11,
username='vitsalis',
channel=self.channel
)
messages = Message.objects.filter(pk=message.id)
self.assertTrue(messages.exists())
self.assertEqual(messages.count(), 1)
dbmessage = messages[0]
self.assertEqual(dbmessage.text, message.text)
self.assertEqual(dbmessage.datetime_start, message.datetime_start)
self.assertEqual(dbmessage.username, message.username)
self.assertEqual(dbmessage.channel.id, message.channel.id)
self.assertTrue(dbmessage.typing)
class ChannelModelTests(ChatTests):
def test_channel_create(self):
"""
A channel must be saved in the database.
"""
channels = Channel.objects.filter(pk=self.channel.id)
self.assertTrue(channels.exists())
self.assertEqual(channels.count(), 1)
self.assertEqual(channels[0].name, self.channel.name)
class URLTests(ChatTests):
def test_urls(self):
self.assertEqual(
reverse('chat:message', args=('foo',)),
'/messages/foo/'
)
| sirodoht/ting | API/chat/tests.py | Python | mit | 24,874 |
__author__ = 'dkador'
class BasePersistenceStrategy(object):
"""
A persistence strategy is responsible for persisting a given event
somewhere (i.e. directly to Keen, a local cache, a Redis queue, etc.)
"""
def persist(self, event):
"""Persists the given event somewhere.
:param event: the event to persist
"""
raise NotImplementedError()
class DirectPersistenceStrategy(BasePersistenceStrategy):
"""
A persistence strategy that saves directly to Keen and bypasses any local
cache.
"""
def __init__(self, api):
""" Initializer for DirectPersistenceStrategy.
:param api: the Keen Api object used to communicate with the Keen API
"""
super(DirectPersistenceStrategy, self).__init__()
self.api = api
def persist(self, event):
""" Posts the given event directly to the Keen API.
:param event: an Event to persist
"""
self.api.post_event(event)
def batch_persist(self, events):
""" Posts the given events directly to the Keen API.
:param events: a batch of events to persist
"""
self.api.post_events(events)
class RedisPersistenceStrategy(BasePersistenceStrategy):
"""
A persistence strategy that persists events to Redis for later processing.
Not yet implemented.
"""
pass
class FilePersistenceStrategy(BasePersistenceStrategy):
"""
A persistence strategy that persists events to the local file system for
later processing.
Not yet implemented.
"""
pass
| isotoma/KeenClient-Python | keen/persistence_strategies.py | Python | mit | 1,595 |
# coding=utf-8
config = {
'timeout': 10,
'db_user': '', # mongodb的用户名
'db_pass': '', # mongodb的密码
'db_host': 'localhost',
'db_port': 27017,
'db_name': 'novelRS',
'cpu_num': 4 # 开几个进程计算
}
| nladuo/novelRS | web_demo/lib/config.py | Python | mit | 273 |
# debugshell extension
"""a python shell with repo, changelog & manifest objects"""
import mercurial
import code
def debugshell(ui, repo, **opts):
objects = {
'mercurial': mercurial,
'repo': repo,
'cl': repo.changelog,
'mf': repo.manifest,
}
bannermsg = "loaded repo : %s\n" \
"using source: %s" % (repo.root,
mercurial.__path__[0])
code.interact(bannermsg, local=objects)
cmdtable = {
"debugshell|dbsh": (debugshell, [])
}
| iaddict/mercurial.rb | vendor/mercurial/contrib/debugshell.py | Python | mit | 533 |
from chainer import cuda
from chainer import function
from chainer.utils import type_check
class Sum(function.Function):
"""Sum of array elements over a given axis."""
keepdims = False
def __init__(self, axis=None, keepdims=False):
if axis is None:
self.axis = None
elif isinstance(axis, int):
self.axis = (axis,)
elif isinstance(axis, tuple) and all(isinstance(a, int) for a in axis):
if len(set(axis)) != len(axis):
raise ValueError('duplicate value in axis: ({})'.format(
', '.join(map(str, axis))))
self.axis = axis
else:
raise TypeError('None, int or tuple of int are required')
self.keepdims = keepdims
def check_type_forward(self, in_types):
type_check.expect(
in_types.size() == 1,
in_types[0].dtype.kind == 'f',
)
if self.axis is not None:
for axis in self.axis:
if axis >= 0:
type_check.expect(
axis < in_types[0].ndim,
)
else:
type_check.expect(
-axis - 1 < in_types[0].ndim,
)
def forward(self, x):
self.retain_inputs(())
self._in_shape = x[0].shape
self._in_dtype = x[0].dtype
self._xp = cuda.get_array_module(*x)
return self._xp.asarray(
x[0].sum(axis=self.axis, keepdims=self.keepdims)),
def backward(self, x, gy):
xp = self._xp
gy = gy[0]
if not (len(self._in_shape) == 0 or
self.axis is None or self.keepdims):
actual_axis = []
for axis in self.axis:
if axis < 0:
axis += len(self._in_shape)
actual_axis.append(axis)
for axis in sorted(actual_axis):
gy = xp.expand_dims(gy, axis=axis)
if hasattr(xp, 'broadcast_to'):
gx = xp.broadcast_to(gy, self._in_shape)
else:
# NumPy 1.9 does not support broadcast_to.
dummy_x = xp.empty(self._in_shape, 'b')
gx, _ = xp.broadcast_arrays(gy, dummy_x)
return gx,
def sum(x, axis=None, keepdims=False):
"""Sum of array elements over a given axis.
Args:
x (~chainer.Variable): Elements to sum.
axis (None, int, or tuple of int): Axis which a sum is performed.
The default (axis = None) is perform a sum over all the dimensions
of the input array.
keepdims (bool): If ``True``, the specified axes are remained as axes
of length one.
Returns:
~chainer.Variable: Output variable.
"""
return Sum(axis, keepdims)(x)
| kiyukuta/chainer | chainer/functions/math/sum.py | Python | mit | 2,814 |
"""
The "engine room" of django mailer.
Methods here actually handle the sending of queued messages.
"""
from django_mailer import constants, models, settings
from lockfile import FileLock, AlreadyLocked, LockTimeout
from socket import error as SocketError
import logging
import os
import smtplib
import tempfile
import time
if constants.EMAIL_BACKEND_SUPPORT:
from django.core.mail import get_connection
else:
from django.core.mail import SMTPConnection as get_connection
LOCK_PATH = settings.LOCK_PATH or os.path.join(tempfile.gettempdir(),
'send_mail')
logger = logging.getLogger('django_mailer.engine')
def _message_queue(block_size, exclude_messages=[]):
"""
A generator which iterates queued messages in blocks so that new
prioritised messages can be inserted during iteration of a large number of
queued messages.
To avoid an infinite loop, yielded messages *must* be deleted or deferred.
"""
def get_block():
queue = models.QueuedMessage.objects.non_deferred() \
.exclude(pk__in=exclude_messages).select_related()
if block_size:
queue = queue[:block_size]
return queue
queue = get_block()
while queue:
for message in queue:
yield message
queue = get_block()
def send_all(block_size=500, backend=None):
"""
Send all non-deferred messages in the queue.
A lock file is used to ensure that this process can not be started again
while it is already running.
The ``block_size`` argument allows for queued messages to be iterated in
blocks, allowing new prioritised messages to be inserted during iteration
of a large number of queued messages.
"""
lock = FileLock(LOCK_PATH)
logger.debug("Acquiring lock...")
try:
# lockfile has a bug dealing with a negative LOCK_WAIT_TIMEOUT (which
# is the default if it's not provided) systems which use a LinkFileLock
# so ensure that it is never a negative number.
lock.acquire(settings.LOCK_WAIT_TIMEOUT or 0)
#lock.acquire(settings.LOCK_WAIT_TIMEOUT)
except AlreadyLocked:
logger.debug("Lock already in place. Exiting.")
return
except LockTimeout:
logger.debug("Waiting for the lock timed out. Exiting.")
return
logger.debug("Lock acquired.")
start_time = time.time()
sent = deferred = skipped = 0
# A list of messages to be sent, usually contains messages that failed
exclude_messages = []
try:
if constants.EMAIL_BACKEND_SUPPORT:
connection = get_connection(backend=backend)
else:
connection = get_connection()
blacklist = models.Blacklist.objects.values_list('email', flat=True)
connection.open()
for message in _message_queue(block_size, exclude_messages=exclude_messages):
result = send_queued_message(message, connection=connection,
blacklist=blacklist)
if result == constants.RESULT_SENT:
sent += 1
elif result == constants.RESULT_FAILED:
deferred += 1
# Don't try to send this message again for now
exclude_messages.append(message.pk)
elif result == constants.RESULT_SKIPPED:
skipped += 1
connection.close()
finally:
logger.debug("Releasing lock...")
lock.release()
logger.debug("Lock released.")
logger.debug("")
if sent or deferred or skipped:
log = logger.warning
else:
log = logger.info
log("%s sent, %s deferred, %s skipped." % (sent, deferred, skipped))
logger.debug("Completed in %.2f seconds." % (time.time() - start_time))
def send_loop(empty_queue_sleep=None):
"""
Loop indefinitely, checking queue at intervals and sending and queued
messages.
The interval (in seconds) can be provided as the ``empty_queue_sleep``
argument. The default is attempted to be retrieved from the
``MAILER_EMPTY_QUEUE_SLEEP`` setting (or if not set, 30s is used).
"""
empty_queue_sleep = empty_queue_sleep or settings.EMPTY_QUEUE_SLEEP
while True:
while not models.QueuedMessage.objects.all():
logger.debug("Sleeping for %s seconds before checking queue "
"again." % empty_queue_sleep)
time.sleep(empty_queue_sleep)
send_all()
def send_queued_message(queued_message, connection=None, blacklist=None,
log=True):
"""
Send a queued message, returning a response code as to the action taken.
The response codes can be found in ``django_mailer.constants``. The
response will be either ``RESULT_SKIPPED`` for a blacklisted email,
``RESULT_FAILED`` for a deferred message or ``RESULT_SENT`` for a
successful sent message.
To allow optimizations if multiple messages are to be sent, a
connection can be provided and a list of blacklisted email addresses.
Otherwise a new connection will be opened to send this message and the
email recipient address checked against the ``Blacklist`` table.
If the message recipient is blacklisted, the message will be removed from
the queue without being sent. Otherwise, the message is attempted to be
sent with an SMTP failure resulting in the message being flagged as
deferred so it can be tried again later.
By default, a log is created as to the action. Either way, the original
message is not deleted.
"""
message = queued_message.message
if connection is None:
connection = get_connection()
connection.open()
arg_connection = False
else:
arg_connection = True
if blacklist is None:
blacklisted = models.Blacklist.objects.filter(email=message.to_address)
else:
blacklisted = message.to_address in blacklist
if blacklisted:
logger.info("Not sending to blacklisted email: %s" %
message.to_address.encode("utf-8"))
queued_message.delete()
result = constants.RESULT_SKIPPED
else:
result = send_message(message, connection=connection)
if not arg_connection:
connection.close()
return result
def send_message(message, connection=None):
"""
Send an EmailMessage, returning a response code as to the action taken.
The response codes can be found in ``django_mailer.constants``. The
response will be either ``RESULT_FAILED`` for a failed send or
``RESULT_SENT`` for a successfully sent message.
To allow optimizations if multiple messages are to be sent, a
connection can be provided. Otherwise a new connection will be opened
to send this message.
This function does not perform any logging or queueing.
"""
if connection is None:
connection = get_connection()
opened_connection = False
try:
logger.info("Sending message to %s: %s" %
(message.to_address.encode("utf-8"),
message.subject.encode("utf-8")))
message.email_message(connection=connection).send()
message.queuedmessage.delete()
result = constants.RESULT_SENT
log_message = 'Sent'
except Exception, err:
if isinstance(err, settings.DEFER_ON_ERRORS):
message.queuedmessage.defer()
logger.warning("Message to %s deferred due to failure: %s" %
(message.to_address.encode("utf-8"), err))
log_message = unicode(err)
result = constants.RESULT_FAILED
models.Log.objects.create(message=message, result=result,
log_message=log_message)
if opened_connection:
connection.close()
return result
| selwin/django-mailer | django_mailer/engine.py | Python | mit | 7,863 |
import JeevesLib
from smt.Z3 import *
import unittest
from RSphaeroides import RSphaeroides
import JeevesLib
class TestAuction(unittest.TestCase):
def setUp(self):
JeevesLib.init()
def test_something(self):
r = RSphaeroides()
pass
| jonathanmarvens/jeeves | test/gallery/proteinsignal/testChemotaxis.py | Python | mit | 249 |
# -*- coding: utf-8 -*-
'''
Tulip routine libraries, based on lambda's lamlib
Author Twilight0
License summary below, for more details please read license.txt file
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import os, xbmc, xbmcaddon, xbmcplugin, xbmcgui, xbmcvfs
integer = 1000
addon = xbmcaddon.Addon
lang = xbmcaddon.Addon().getLocalizedString
setting = xbmcaddon.Addon().getSetting
setSetting = xbmcaddon.Addon().setSetting
addonInfo = xbmcaddon.Addon().getAddonInfo
addItem = xbmcplugin.addDirectoryItem
directory = xbmcplugin.endOfDirectory
content = xbmcplugin.setContent
property = xbmcplugin.setProperty
resolve = xbmcplugin.setResolvedUrl
infoLabel = xbmc.getInfoLabel
condVisibility = xbmc.getCondVisibility
jsonrpc = xbmc.executeJSONRPC
keyboard = xbmc.Keyboard
sleep = xbmc.sleep
execute = xbmc.executebuiltin
skin = xbmc.getSkinDir()
player = xbmc.Player()
playlist = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
transPath = xbmc.translatePath
skinPath = xbmc.translatePath('special://skin/')
addonPath = xbmc.translatePath(addonInfo('path'))
dataPath = xbmc.translatePath(addonInfo('profile')).decode('utf-8')
window = xbmcgui.Window(10000)
dialog = xbmcgui.Dialog()
progressDialog = xbmcgui.DialogProgress()
windowDialog = xbmcgui.WindowDialog()
button = xbmcgui.ControlButton
image = xbmcgui.ControlImage
alphanum_input = xbmcgui.INPUT_ALPHANUM
password_input = xbmcgui.INPUT_PASSWORD
hide_input = xbmcgui.ALPHANUM_HIDE_INPUT
item = xbmcgui.ListItem
openFile = xbmcvfs.File
makeFile = xbmcvfs.mkdir
deleteFile = xbmcvfs.delete
deleteDir = xbmcvfs.rmdir
listDir = xbmcvfs.listdir
exists = xbmcvfs.exists
join = os.path.join
settingsFile = os.path.join(dataPath, 'settings.xml')
bookmarksFile = os.path.join(dataPath, 'bookmarks.db')
cacheFile = os.path.join(dataPath, 'cache.db')
def infoDialog(message, heading=addonInfo('name'), icon='', time=3000):
if icon == '':
icon = addonInfo('icon')
try:
dialog.notification(heading, message, icon, time, sound=False)
except:
execute("Notification(%s, %s, %s, %s)" % (heading, message, time, icon))
def okDialog(heading, line1):
return dialog.ok(heading, line1)
def inputDialog(heading, _type_=''):
return dialog.input(heading, _type_)
def yesnoDialog(line1, line2, line3, heading=addonInfo('name'), nolabel='', yeslabel=''):
return dialog.yesno(heading, line1, line2, line3, nolabel, yeslabel)
def selectDialog(list, heading=addonInfo('name')):
return dialog.select(heading, list)
def openSettings(query=None, id=addonInfo('id')):
try:
idle()
execute('Addon.OpenSettings(%s)' % id)
if query is None:
raise Exception()
c, f = query.split('.')
execute('SetFocus(%i)' % (int(c) + 100))
execute('SetFocus(%i)' % (int(f) + 200))
except:
return
def openSettings_alt():
try:
idle()
xbmcaddon.Addon().openSettings()
except:
return
def openPlaylist():
return execute('ActivateWindow(VideoPlaylist)')
def refresh():
return execute('Container.Refresh')
def idle():
return execute('Dialog.Close(busydialog)')
def set_view_mode(vmid):
return execute('Container.SetViewMode({0})'.format(vmid))
| felipenaselva/felipe.repository | script.module.streamhub/resources/premium/modules/control.py | Python | gpl-2.0 | 3,906 |
# Miro - an RSS based video player application
# Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011
# Participatory Culture Foundation
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# In addition, as a special exception, the copyright holders give
# permission to link the code of portions of this program with the OpenSSL
# library.
#
# You must obey the GNU General Public License in all respects for all of
# the code used other than OpenSSL. If you modify file(s) with this
# exception, you may extend this exception to your version of the file(s),
# but you are not obligated to do so. If you do not wish to do so, delete
# this exception statement from your version. If you delete this exception
# statement from all source files in the program, then also delete it here.
"""watchedsfolders.py -- Manages tracking watched folders. """
from miro import messages
from miro import signals
from miro.plat.frontends.widgets import widgetset
from miro.plat.utils import filename_to_unicode
class WatchedFolderManager(signals.SignalEmitter):
"""Manages tracking watched folders.
Attributes:
model -- TableModel object that contains the current list of watched
folders. It has 3 columns: id (integer), path (text) and
visible (boolean).
Signals:
changed -- The list of watched folders has changed
"""
def __init__(self):
signals.SignalEmitter.__init__(self, 'changed')
self.model = widgetset.TableModel('integer', 'text', 'boolean')
self._iter_map = {}
def handle_watched_folder_list(self, info_list):
"""Handle the WatchedFolderList message."""
for info in info_list:
iter = self.model.append(info.id, filename_to_unicode(info.path),
info.visible)
self._iter_map[info.id] = iter
self.emit('changed')
def handle_watched_folders_changed(self, added, changed, removed):
"""Handle the WatchedFoldersChanged message."""
self.handle_watched_folder_list(added)
for info in changed:
iter = self._iter_map[info.id]
self.model.update_value(iter, 1, filename_to_unicode(info.path))
self.model.update_value(iter, 2, info.visible)
for id in removed:
iter = self._iter_map.pop(id)
self.model.remove(iter)
self.emit('changed')
def change_visible(self, id_, visible):
"""Change if a watched folder is visible or not."""
messages.SetWatchedFolderVisible(id_, visible).send_to_backend()
def remove(self, id_):
"""Remove a watched folder."""
messages.DeleteWatchedFolder(id_).send_to_backend()
def add(self, path):
"""Add a new watched folder. It will be initially visible."""
messages.NewWatchedFolder(path).send_to_backend()
| debugger06/MiroX | tv/lib/frontends/widgets/watchedfolders.py | Python | gpl-2.0 | 3,503 |
from Components.Converter.Converter import Converter
from enigma import iServiceInformation, iPlayableService
from Components.Element import cached
from Tools.Transponder import ConvertToHumanReadable
class ServiceInfo(Converter, object):
HAS_TELETEXT = 0
IS_MULTICHANNEL = 1
IS_CRYPTED = 2
IS_WIDESCREEN = 3
SUBSERVICES_AVAILABLE = 4
XRES = 5
YRES = 6
APID = 7
VPID = 8
PCRPID = 9
PMTPID = 10
TXTPID = 11
TSID = 12
ONID = 13
SID = 14
FRAMERATE = 15
TRANSFERBPS = 16
HAS_HBBTV = 17
AUDIOTRACKS_AVAILABLE = 18
SUBTITLES_AVAILABLE = 19
FREQ_INFO = 20
def __init__(self, type):
Converter.__init__(self, type)
self.type, self.interesting_events = {
"HasTelext": (self.HAS_TELETEXT, (iPlayableService.evUpdatedInfo,)),
"IsMultichannel": (self.IS_MULTICHANNEL, (iPlayableService.evUpdatedInfo,)),
"IsCrypted": (self.IS_CRYPTED, (iPlayableService.evUpdatedInfo,)),
"IsWidescreen": (self.IS_WIDESCREEN, (iPlayableService.evVideoSizeChanged,)),
"SubservicesAvailable": (self.SUBSERVICES_AVAILABLE, (iPlayableService.evUpdatedEventInfo,)),
"VideoWidth": (self.XRES, (iPlayableService.evVideoSizeChanged,)),
"VideoHeight": (self.YRES, (iPlayableService.evVideoSizeChanged,)),
"AudioPid": (self.APID, (iPlayableService.evUpdatedInfo,)),
"VideoPid": (self.VPID, (iPlayableService.evUpdatedInfo,)),
"PcrPid": (self.PCRPID, (iPlayableService.evUpdatedInfo,)),
"PmtPid": (self.PMTPID, (iPlayableService.evUpdatedInfo,)),
"TxtPid": (self.TXTPID, (iPlayableService.evUpdatedInfo,)),
"TsId": (self.TSID, (iPlayableService.evUpdatedInfo,)),
"OnId": (self.ONID, (iPlayableService.evUpdatedInfo,)),
"Sid": (self.SID, (iPlayableService.evUpdatedInfo,)),
"Framerate": (self.FRAMERATE, (iPlayableService.evVideoSizeChanged,iPlayableService.evUpdatedInfo,)),
"TransferBPS": (self.TRANSFERBPS, (iPlayableService.evUpdatedInfo,)),
"HasHBBTV": (self.HAS_HBBTV, (iPlayableService.evUpdatedInfo,iPlayableService.evHBBTVInfo,)),
"AudioTracksAvailable": (self.AUDIOTRACKS_AVAILABLE, (iPlayableService.evUpdatedInfo,)),
"SubtitlesAvailable": (self.SUBTITLES_AVAILABLE, (iPlayableService.evUpdatedInfo,)),
"Freq_Info": (self.FREQ_INFO, (iPlayableService.evUpdatedInfo,)),
}[type]
def getServiceInfoString(self, info, what, convert = lambda x: "%d" % x):
v = info.getInfo(what)
if v == -1:
return "N/A"
if v == -2:
return info.getInfoString(what)
return convert(v)
@cached
def getBoolean(self):
service = self.source.service
info = service and service.info()
if not info:
return False
if self.type == self.HAS_TELETEXT:
tpid = info.getInfo(iServiceInformation.sTXTPID)
return tpid != -1
elif self.type == self.IS_MULTICHANNEL:
# FIXME. but currently iAudioTrackInfo doesn't provide more information.
audio = service.audioTracks()
if audio:
n = audio.getNumberOfTracks()
idx = 0
while idx < n:
i = audio.getTrackInfo(idx)
description = i.getDescription();
if "AC3" in description or "AC-3" in description or "DTS" in description:
return True
idx += 1
return False
elif self.type == self.IS_CRYPTED:
return info.getInfo(iServiceInformation.sIsCrypted) == 1
elif self.type == self.IS_WIDESCREEN:
return info.getInfo(iServiceInformation.sAspect) in (3, 4, 7, 8, 0xB, 0xC, 0xF, 0x10)
elif self.type == self.SUBSERVICES_AVAILABLE:
subservices = service.subServices()
return subservices and subservices.getNumberOfSubservices() > 0
elif self.type == self.HAS_HBBTV:
return info.getInfoString(iServiceInformation.sHBBTVUrl) != ""
elif self.type == self.AUDIOTRACKS_AVAILABLE:
audio = service.audioTracks()
return audio and audio.getNumberOfTracks() > 1
elif self.type == self.SUBTITLES_AVAILABLE:
subtitle = service and service.subtitle()
subtitlelist = subtitle and subtitle.getSubtitleList()
if subtitlelist:
return len(subtitlelist) > 0
return False
boolean = property(getBoolean)
@cached
def getText(self):
service = self.source.service
info = service and service.info()
if not info:
return ""
if self.type == self.XRES:
return self.getServiceInfoString(info, iServiceInformation.sVideoWidth)
elif self.type == self.YRES:
return self.getServiceInfoString(info, iServiceInformation.sVideoHeight)
elif self.type == self.APID:
return self.getServiceInfoString(info, iServiceInformation.sAudioPID)
elif self.type == self.VPID:
return self.getServiceInfoString(info, iServiceInformation.sVideoPID)
elif self.type == self.PCRPID:
return self.getServiceInfoString(info, iServiceInformation.sPCRPID)
elif self.type == self.PMTPID:
return self.getServiceInfoString(info, iServiceInformation.sPMTPID)
elif self.type == self.TXTPID:
return self.getServiceInfoString(info, iServiceInformation.sTXTPID)
elif self.type == self.TSID:
return self.getServiceInfoString(info, iServiceInformation.sTSID)
elif self.type == self.ONID:
return self.getServiceInfoString(info, iServiceInformation.sONID)
elif self.type == self.SID:
return self.getServiceInfoString(info, iServiceInformation.sSID)
elif self.type == self.FRAMERATE:
return self.getServiceInfoString(info, iServiceInformation.sFrameRate, lambda x: "%d fps" % ((x+500)/1000))
elif self.type == self.TRANSFERBPS:
return self.getServiceInfoString(info, iServiceInformation.sTransferBPS, lambda x: "%d kB/s" % (x/1024))
elif self.type == self.FREQ_INFO:
feinfo = service.frontendInfo()
if feinfo is None:
return ""
feraw = feinfo.getAll(False)
if feraw is None:
return ""
fedata = ConvertToHumanReadable(feraw)
if fedata is None:
return ""
frequency = fedata.get("frequency")
if frequency:
frequency = str(frequency / 1000)
sr_txt = "Sr:"
polarization = fedata.get("polarization_abbreviation")
if polarization is None:
polarization = ""
symbolrate = str(int(fedata.get("symbol_rate", 0) / 1000))
if symbolrate == "0":
sr_txt = ""
symbolrate = ""
fec = fedata.get("fec_inner")
if fec is None:
fec = ""
out = "Freq: %s %s %s %s %s" % (frequency, polarization, sr_txt, symbolrate, fec)
return out
return ""
text = property(getText)
@cached
def getValue(self):
service = self.source.service
info = service and service.info()
if not info:
return -1
if self.type == self.XRES:
return info.getInfo(iServiceInformation.sVideoWidth)
if self.type == self.YRES:
return info.getInfo(iServiceInformation.sVideoHeight)
if self.type == self.FRAMERATE:
return info.getInfo(iServiceInformation.sFrameRate)
return -1
value = property(getValue)
def changed(self, what):
if what[0] != self.CHANGED_SPECIFIC or what[1] in self.interesting_events:
Converter.changed(self, what)
| BlackPole/bp-enigma2 | lib/python/Components/Converter/ServiceInfo.py | Python | gpl-2.0 | 6,815 |
#!/usr/bin/python
from time import time
REPS = 17500
def insert():
m = [None]
i = 0
now = time()
while i < REPS:
m.insert(0, i)
i += 1
print 'Elapsed (insert):', time() - now
def colonZero():
m = [None]
i = 0
now = time()
while i < REPS:
m[:0] = [i]
i += 1
print 'Elapsed (colon-0):', time() - now
def main():
insert()
colonZero()
if __name__ == '__main__':
main()
raw_input()
| opensvn/test | src/study/python/cpp/ch06/alt/insertVsColonZero.py | Python | gpl-2.0 | 442 |
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2008 Donald N. Allingham
# Copyright (C) 2009 Douglas S. Blank
# Copyright (C) 2011 Tim G L Lyons
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Provide a simplified table creation interface
"""
#-------------------------------------------------------------------------
#
# Standard python modules
#
#-------------------------------------------------------------------------
import pickle
#-------------------------------------------------------------------------
#
# GNOME modules
#
#-------------------------------------------------------------------------
from gi.repository import Gdk
from gi.repository import Gtk
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.sgettext
from gramps.gen.simple import SimpleTable
from gramps.gen.errors import WindowActiveError
from ...utils import model_to_text, text_to_clipboard
from ...widgets.multitreeview import MultiTreeView
from ...ddtargets import DdTargets
from ..quick import run_quick_report_by_name
from ...editors import (EditPerson, EditEvent, EditFamily, EditCitation,
EditSource, EditPlace, EditRepository, EditNote,
EditMedia)
#-------------------------------------------------------------------------
#
# QuickTable class
#
#-------------------------------------------------------------------------
class QuickTable(SimpleTable):
"""
Provide a simplified table creation interface.
"""
def set_callback(self, which, callback):
"""
Override (or add) a function for click/double-click
"""
if which == "leftclick":
self._callback_leftclick = callback
elif which == "leftdouble":
self._callback_leftdouble = callback
def button_press_event(self, treeview, event):
wid = treeview.get_toplevel()
try:
winmgr = self.simpledoc.doc.uistate.gwm
self.track = winmgr.get_item_from_window(wid).track
except:
self.track = []
index = None
button_code = None
event_time = None
func = None
if type(event) == bool: # enter
button_code = 3
event_time = 0
selection = treeview.get_selection()
store, paths = selection.get_selected_rows()
tpath = paths[0] if len(paths) > 0 else None
node = store.get_iter(tpath) if tpath else None
if node:
treeview.grab_focus()
index = store.get_value(node, 0)
# FIXME: make popup come where cursor is
#rectangle = treeview.get_visible_rect()
#column = treeview.get_column(0)
#rectangle = treeview.get_cell_area("0:0",
#x, y = rectangle.x, rectangle.y
#func = lambda menu: (x, y, True)
elif event.button == 3:
button_code = 3
event_time = event.time
x = int(event.x)
y = int(event.y)
path_info = treeview.get_path_at_pos(x, y)
func = None
if path_info is not None:
path, col, cellx, celly = path_info
selection = treeview.get_selection()
store, paths = selection.get_selected_rows()
tpath = paths[0] if len(paths) > 0 else None
node = store.get_iter(tpath) if tpath else None
if path:
treeview.grab_focus()
treeview.set_cursor(path, col, 0)
if store and node:
index = store.get_value(node, 0) # index Below,
# you need index, treeview, path, button_code,
# func, and event_time
if index is not None:
if self._link[index]:
objclass, handle = self._link[index]
else:
return False
if (self.simpledoc.doc.uistate.get_export_mode() and
objclass != 'Filter'):
return False # avoid edition during export
self.popup = Gtk.Menu()
popup = self.popup
menu_item = Gtk.MenuItem(label=_("Copy all"))
menu_item.connect("activate", lambda widget: text_to_clipboard(
model_to_text(treeview.get_model())))
popup.append(menu_item)
menu_item.show()
# Now add more items to popup menu, if available
# See details (edit, etc):
menu_item = Gtk.MenuItem(label=_("the object|See %s details") %
glocale.trans_objclass(objclass))
menu_item.connect(
"activate", lambda widget: self.on_table_doubleclick(treeview))
popup.append(menu_item)
menu_item.show()
# Add other items to menu:
if objclass == 'Person':
menu_item = Gtk.MenuItem(label=_("the object|Make %s active")
% glocale.trans_objclass('Person'))
menu_item.connect("activate",
lambda widget: self.on_table_click(treeview))
popup.append(menu_item)
menu_item.show()
if (self.simpledoc.doc.dbstate.db !=
self.simpledoc.doc.dbstate.db.basedb):
if (objclass == 'Filter' and
handle[0] in ['Person', 'Family', 'Place', 'Event',
'Repository', 'Note', 'Media',
'Citation', 'Source']):
menu_item = Gtk.MenuItem(label=_("See data not in Filter"))
menu_item.connect(
"activate",
lambda widget: self.show_not_in_filter(handle[0]))
popup.append(menu_item)
menu_item.show()
# Show the popup menu:
popup.popup(None, None, func, None, button_code, event_time)
return True
return False
def show_not_in_filter(self, obj_class):
run_quick_report_by_name(self.simpledoc.doc.dbstate,
self.simpledoc.doc.uistate,
'filterbyname',
'Inverse %s' % obj_class,
track=self.track)
def on_table_doubleclick(self, obj):
"""
Handle events on tables. obj is a treeview
"""
selection = obj.get_selection()
store, paths = selection.get_selected_rows()
tpath = paths[0] if len(paths) > 0 else None
node = store.get_iter(tpath) if tpath else None
if not node:
return
index = store.get_value(node, 0) # index
if self._callback_leftdouble:
self._callback_leftdouble(store.get_value(node, 1))
return True
elif self._link[index]:
objclass, handle = self._link[index]
if isinstance(handle, list):
handle = handle[0]
if objclass == 'Person':
person = self.access.dbase.get_person_from_handle(handle)
if person:
try:
EditPerson(self.simpledoc.doc.dbstate,
self.simpledoc.doc.uistate, [], person)
return True # handled event
except WindowActiveError:
pass
elif objclass == 'Event':
event = self.access.dbase.get_event_from_handle(handle)
if event:
try:
EditEvent(self.simpledoc.doc.dbstate,
self.simpledoc.doc.uistate, [], event)
return True # handled event
except WindowActiveError:
pass
elif objclass == 'Family':
ref = self.access.dbase.get_family_from_handle(handle)
if ref:
try:
EditFamily(self.simpledoc.doc.dbstate,
self.simpledoc.doc.uistate, [], ref)
return True # handled event
except WindowActiveError:
pass
elif objclass == 'Citation':
ref = self.access.dbase.get_citation_from_handle(handle)
if ref:
try:
EditCitation(self.simpledoc.doc.dbstate,
self.simpledoc.doc.uistate, [], ref)
return True # handled event
except WindowActiveError:
pass
elif objclass == 'Source':
ref = self.access.dbase.get_source_from_handle(handle)
if ref:
try:
EditSource(self.simpledoc.doc.dbstate,
self.simpledoc.doc.uistate, [], ref)
return True # handled event
except WindowActiveError:
pass
elif objclass == 'Place':
ref = self.access.dbase.get_place_from_handle(handle)
if ref:
try:
EditPlace(self.simpledoc.doc.dbstate,
self.simpledoc.doc.uistate, [], ref)
return True # handled event
except WindowActiveError:
pass
elif objclass == 'Repository':
ref = self.access.dbase.get_repository_from_handle(handle)
if ref:
try:
EditRepository(self.simpledoc.doc.dbstate,
self.simpledoc.doc.uistate, [], ref)
return True # handled event
except WindowActiveError:
pass
elif objclass == 'Note':
ref = self.access.dbase.get_note_from_handle(handle)
if ref:
try:
EditNote(self.simpledoc.doc.dbstate,
self.simpledoc.doc.uistate, [], ref)
return True # handled event
except WindowActiveError:
pass
elif objclass == 'Media':
ref = self.access.dbase.get_media_from_handle(handle)
if ref:
try:
EditMedia(self.simpledoc.doc.dbstate,
self.simpledoc.doc.uistate, [], ref)
return True # handled event
except WindowActiveError:
pass
elif objclass == 'PersonList':
run_quick_report_by_name(self.simpledoc.doc.dbstate,
self.simpledoc.doc.uistate,
'filterbyname',
'list of people',
handle=handle,
track=self.track)
elif objclass == 'Filter':
if isinstance(handle, list):
handle = handle[0]
run_quick_report_by_name(self.simpledoc.doc.dbstate,
self.simpledoc.doc.uistate,
'filterbyname',
handle, track=self.track)
return False # didn't handle event
def on_table_click(self, obj):
"""
Handle events on tables. obj is a treeview
"""
selection = obj.get_selection()
store, paths = selection.get_selected_rows()
tpath = paths[0] if len(paths) > 0 else None
node = store.get_iter(tpath)
if not node:
return
index = store.get_value(node, 0) # index
if self._callback_leftclick:
self._callback_leftclick(store.get_value(node, 1))
return True
elif self._link[index]:
objclass, handle = self._link[index]
if isinstance(handle, list):
handle = handle[0]
if objclass == 'Person':
from gi.repository import GLib
# If you emmit the signal here and it causes this table to be deleted,
# then you'll crash Python:
#self.simpledoc.doc.uistate.set_active(handle, 'Person')
# So, let's return from this, then change the active person:
return GLib.timeout_add(100, self.simpledoc.doc.uistate.set_active, handle, 'Person')
return True
return False # didn't handle event
def object_drag_data_get(self, widget, context, sel_data, info, time):
tree_selection = widget.get_selection()
model, paths = tree_selection.get_selected_rows()
retval = []
for path in paths:
node = model.get_iter(path)
index = model.get_value(node,0)
if (index is not None and self._link[index]):
retval.append(self._link[index])
sel_data.set(DdTargets.HANDLE_LIST.atom_drag_type, 8, pickle.dumps(retval))
return True
def toggle(self, obj, path, col):
"""
obj - column widget
path - row
col - column
"""
self.treeview.get_model()[path][col] = not \
self.treeview.get_model()[path][col]
def write(self, document):
self.simpledoc = document
buffer = self.simpledoc.doc.buffer
text_view = self.simpledoc.doc.text_view
model_index = 1 # start after index
if self._sort_col:
sort_index = self._columns.index(self._sort_col)
else:
sort_index = 0
treeview = MultiTreeView()
treeview.enable_model_drag_source(Gdk.ModifierType.BUTTON1_MASK,
[],
Gdk.DragAction.COPY)
tglist = Gtk.TargetList.new([])
tglist.add(DdTargets.HANDLE_LIST.atom_drag_type, Gtk.TargetFlags.SAME_WIDGET,
0)
treeview.drag_source_set_target_list(tglist)
#treeview.enable_model_drag_dest(DdTargets.all_targets(),
# Gdk.DragAction.DEFAULT)
treeview.connect('drag_data_get', self.object_drag_data_get)
treeview.set_grid_lines(Gtk.TreeViewGridLines.BOTH)
#treeview.connect('row-activated', on_table_doubleclick, self)
#treeview.connect('cursor-changed', on_table_click, self)
treeview.connect('button-press-event', self.button_press_event)
treeview.connect('select-cursor-row', self.button_press_event)
renderer = Gtk.CellRendererText()
types = [int] # index
cnt = 0
sort_data = []
sort_data_types = []
for col in self._columns:
if self.get_cell_type(cnt) == "text":
types.append(str)
if self.get_cell_markup(cnt):
column = Gtk.TreeViewColumn(col,renderer,markup=model_index)
else:
column = Gtk.TreeViewColumn(col,renderer,text=model_index)
elif self.get_cell_type(cnt) == "checkbox":
types.append(bool)
toggle_renderer = Gtk.CellRendererToggle()
toggle_renderer.set_property('activatable', True)
toggle_renderer.connect("toggled", self.toggle, model_index)
column = Gtk.TreeViewColumn(col, toggle_renderer)
column.add_attribute(toggle_renderer, "active", model_index)
column.set_resizable(True)
if self._sort_vals[cnt] != []:
sort_data.append(self._sort_vals[cnt])
column.set_sort_column_id(len(self._columns) +
len(sort_data))
sort_data_types.append(int)
else:
column.set_sort_column_id(model_index)
treeview.append_column(column)
self.model_index_of_column[col] = model_index
#if model_index == sort_index:
# FIXME: what to set here?
model_index += 1
cnt += 1
if self.title:
self.simpledoc.paragraph(self.title)
# Make a GUI to put the tree view in
types += sort_data_types
model = Gtk.ListStore(*types)
treeview.set_model(model)
treeview.get_selection().set_mode(Gtk.SelectionMode.MULTIPLE)
iter = buffer.get_end_iter()
anchor = buffer.create_child_anchor(iter)
text_view.add_child_at_anchor(treeview, anchor)
self.treeview= treeview
count = 0
for data in self._rows:
col = 0
rowdata = []
for cell in data:
rowdata.append(self.get_cell_markup(col, count, cell))
col += 1
try:
model.append(row=([count] + list(rowdata) + [col[count] for col in sort_data]))
except KeyError as msg:
print (msg)
if sort_data:
print("Quicktable: error in row %d: data: %s, sort data: %d" % (count, rowdata, len(sort_data[0])))
else:
print("Quicktable: error in row %d: data: %s" % (count, rowdata))
count += 1
text_view.show_all()
self.simpledoc.paragraph("")
self.simpledoc.paragraph("")
| jralls/gramps | gramps/gui/plug/quick/_quicktable.py | Python | gpl-2.0 | 18,614 |
# CircularLayout.py
# Copyright (C) 2009 Matthias Treder
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Implements a circular layout. All elements are placed on
a circle with a given radius in pixels. The first element
is placed on the top. The next elements are placed in
clockwise fashion.
Provide the number of elements (nr_elements) and the radius
(radius) when creating an intance of this layout. You can
also provide the angular position of the first element (start)
if you do not want it to be placed on the top.
"""
import math
class CircularLayout(object):
def __init__(self, nr_elements=20, radius=200, start= - math.pi / 2):
self.positions = []
step = 2 * math.pi / nr_elements
for i in range(nr_elements):
phi = start + i * step
x = round (radius * math.cos(phi))
y = round (radius * math.sin(phi))
self.positions.append((x, y))
| bbci/pyff | src/lib/P300Layout/CircularLayout.py | Python | gpl-2.0 | 1,596 |