repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
pforret/python-for-android | refs/heads/master | python-modules/twisted/twisted/conch/test/test_filetransfer.py | 78 | # -*- test-case-name: twisted.conch.test.test_filetransfer -*-
# Copyright (c) 2001-2008 Twisted Matrix Laboratories.
# See LICENSE file for details.
import os
import re
import struct
import sys
from twisted.trial import unittest
try:
from twisted.conch import unix
unix # shut up pyflakes
except ImportError:
unix = None
try:
del sys.modules['twisted.conch.unix'] # remove the bad import
except KeyError:
# In Python 2.4, the bad import has already been cleaned up for us.
# Hooray.
pass
from twisted.conch import avatar
from twisted.conch.ssh import common, connection, filetransfer, session
from twisted.internet import defer
from twisted.protocols import loopback
from twisted.python import components
class TestAvatar(avatar.ConchUser):
def __init__(self):
avatar.ConchUser.__init__(self)
self.channelLookup['session'] = session.SSHSession
self.subsystemLookup['sftp'] = filetransfer.FileTransferServer
def _runAsUser(self, f, *args, **kw):
try:
f = iter(f)
except TypeError:
f = [(f, args, kw)]
for i in f:
func = i[0]
args = len(i)>1 and i[1] or ()
kw = len(i)>2 and i[2] or {}
r = func(*args, **kw)
return r
class FileTransferTestAvatar(TestAvatar):
def __init__(self, homeDir):
TestAvatar.__init__(self)
self.homeDir = homeDir
def getHomeDir(self):
return os.path.join(os.getcwd(), self.homeDir)
class ConchSessionForTestAvatar:
def __init__(self, avatar):
self.avatar = avatar
if unix:
if not hasattr(unix, 'SFTPServerForUnixConchUser'):
# unix should either be a fully working module, or None. I'm not sure
# how this happens, but on win32 it does. Try to cope. --spiv.
import warnings
warnings.warn(("twisted.conch.unix imported %r, "
"but doesn't define SFTPServerForUnixConchUser'")
% (unix,))
unix = None
else:
class FileTransferForTestAvatar(unix.SFTPServerForUnixConchUser):
def gotVersion(self, version, otherExt):
return {'conchTest' : 'ext data'}
def extendedRequest(self, extName, extData):
if extName == 'testExtendedRequest':
return 'bar'
raise NotImplementedError
components.registerAdapter(FileTransferForTestAvatar,
TestAvatar,
filetransfer.ISFTPServer)
class SFTPTestBase(unittest.TestCase):
def setUp(self):
self.testDir = self.mktemp()
# Give the testDir another level so we can safely "cd .." from it in
# tests.
self.testDir = os.path.join(self.testDir, 'extra')
os.makedirs(os.path.join(self.testDir, 'testDirectory'))
f = file(os.path.join(self.testDir, 'testfile1'),'w')
f.write('a'*10+'b'*10)
f.write(file('/dev/urandom').read(1024*64)) # random data
os.chmod(os.path.join(self.testDir, 'testfile1'), 0644)
file(os.path.join(self.testDir, 'testRemoveFile'), 'w').write('a')
file(os.path.join(self.testDir, 'testRenameFile'), 'w').write('a')
file(os.path.join(self.testDir, '.testHiddenFile'), 'w').write('a')
class TestOurServerOurClient(SFTPTestBase):
if not unix:
skip = "can't run on non-posix computers"
def setUp(self):
SFTPTestBase.setUp(self)
self.avatar = FileTransferTestAvatar(self.testDir)
self.server = filetransfer.FileTransferServer(avatar=self.avatar)
clientTransport = loopback.LoopbackRelay(self.server)
self.client = filetransfer.FileTransferClient()
self._serverVersion = None
self._extData = None
def _(serverVersion, extData):
self._serverVersion = serverVersion
self._extData = extData
self.client.gotServerVersion = _
serverTransport = loopback.LoopbackRelay(self.client)
self.client.makeConnection(clientTransport)
self.server.makeConnection(serverTransport)
self.clientTransport = clientTransport
self.serverTransport = serverTransport
self._emptyBuffers()
def _emptyBuffers(self):
while self.serverTransport.buffer or self.clientTransport.buffer:
self.serverTransport.clearBuffer()
self.clientTransport.clearBuffer()
def tearDown(self):
self.serverTransport.loseConnection()
self.clientTransport.loseConnection()
self.serverTransport.clearBuffer()
self.clientTransport.clearBuffer()
def testServerVersion(self):
self.failUnlessEqual(self._serverVersion, 3)
self.failUnlessEqual(self._extData, {'conchTest' : 'ext data'})
def test_openedFileClosedWithConnection(self):
"""
A file opened with C{openFile} is close when the connection is lost.
"""
d = self.client.openFile("testfile1", filetransfer.FXF_READ |
filetransfer.FXF_WRITE, {})
self._emptyBuffers()
oldClose = os.close
closed = []
def close(fd):
closed.append(fd)
oldClose(fd)
self.patch(os, "close", close)
def _fileOpened(openFile):
fd = self.server.openFiles[openFile.handle[4:]].fd
self.serverTransport.loseConnection()
self.clientTransport.loseConnection()
self.serverTransport.clearBuffer()
self.clientTransport.clearBuffer()
self.assertEquals(self.server.openFiles, {})
self.assertIn(fd, closed)
d.addCallback(_fileOpened)
return d
def test_openedDirectoryClosedWithConnection(self):
"""
A directory opened with C{openDirectory} is close when the connection
is lost.
"""
d = self.client.openDirectory('')
self._emptyBuffers()
def _getFiles(openDir):
self.serverTransport.loseConnection()
self.clientTransport.loseConnection()
self.serverTransport.clearBuffer()
self.clientTransport.clearBuffer()
self.assertEquals(self.server.openDirs, {})
d.addCallback(_getFiles)
return d
def testOpenFileIO(self):
d = self.client.openFile("testfile1", filetransfer.FXF_READ |
filetransfer.FXF_WRITE, {})
self._emptyBuffers()
def _fileOpened(openFile):
self.failUnlessEqual(openFile, filetransfer.ISFTPFile(openFile))
d = _readChunk(openFile)
d.addCallback(_writeChunk, openFile)
return d
def _readChunk(openFile):
d = openFile.readChunk(0, 20)
self._emptyBuffers()
d.addCallback(self.failUnlessEqual, 'a'*10 + 'b'*10)
return d
def _writeChunk(_, openFile):
d = openFile.writeChunk(20, 'c'*10)
self._emptyBuffers()
d.addCallback(_readChunk2, openFile)
return d
def _readChunk2(_, openFile):
d = openFile.readChunk(0, 30)
self._emptyBuffers()
d.addCallback(self.failUnlessEqual, 'a'*10 + 'b'*10 + 'c'*10)
return d
d.addCallback(_fileOpened)
return d
def testClosedFileGetAttrs(self):
d = self.client.openFile("testfile1", filetransfer.FXF_READ |
filetransfer.FXF_WRITE, {})
self._emptyBuffers()
def _getAttrs(_, openFile):
d = openFile.getAttrs()
self._emptyBuffers()
return d
def _err(f):
self.flushLoggedErrors()
return f
def _close(openFile):
d = openFile.close()
self._emptyBuffers()
d.addCallback(_getAttrs, openFile)
d.addErrback(_err)
return self.assertFailure(d, filetransfer.SFTPError)
d.addCallback(_close)
return d
def testOpenFileAttributes(self):
d = self.client.openFile("testfile1", filetransfer.FXF_READ |
filetransfer.FXF_WRITE, {})
self._emptyBuffers()
def _getAttrs(openFile):
d = openFile.getAttrs()
self._emptyBuffers()
d.addCallback(_getAttrs2)
return d
def _getAttrs2(attrs1):
d = self.client.getAttrs('testfile1')
self._emptyBuffers()
d.addCallback(self.failUnlessEqual, attrs1)
return d
return d.addCallback(_getAttrs)
def testOpenFileSetAttrs(self):
# XXX test setAttrs
# Ok, how about this for a start? It caught a bug :) -- spiv.
d = self.client.openFile("testfile1", filetransfer.FXF_READ |
filetransfer.FXF_WRITE, {})
self._emptyBuffers()
def _getAttrs(openFile):
d = openFile.getAttrs()
self._emptyBuffers()
d.addCallback(_setAttrs)
return d
def _setAttrs(attrs):
attrs['atime'] = 0
d = self.client.setAttrs('testfile1', attrs)
self._emptyBuffers()
d.addCallback(_getAttrs2)
d.addCallback(self.failUnlessEqual, attrs)
return d
def _getAttrs2(_):
d = self.client.getAttrs('testfile1')
self._emptyBuffers()
return d
d.addCallback(_getAttrs)
return d
def test_openFileExtendedAttributes(self):
"""
Check that L{filetransfer.FileTransferClient.openFile} can send
extended attributes, that should be extracted server side. By default,
they are ignored, so we just verify they are correctly parsed.
"""
savedAttributes = {}
oldOpenFile = self.server.client.openFile
def openFile(filename, flags, attrs):
savedAttributes.update(attrs)
return oldOpenFile(filename, flags, attrs)
self.server.client.openFile = openFile
d = self.client.openFile("testfile1", filetransfer.FXF_READ |
filetransfer.FXF_WRITE, {"ext_foo": "bar"})
self._emptyBuffers()
def check(ign):
self.assertEquals(savedAttributes, {"ext_foo": "bar"})
return d.addCallback(check)
def testRemoveFile(self):
d = self.client.getAttrs("testRemoveFile")
self._emptyBuffers()
def _removeFile(ignored):
d = self.client.removeFile("testRemoveFile")
self._emptyBuffers()
return d
d.addCallback(_removeFile)
d.addCallback(_removeFile)
return self.assertFailure(d, filetransfer.SFTPError)
def testRenameFile(self):
d = self.client.getAttrs("testRenameFile")
self._emptyBuffers()
def _rename(attrs):
d = self.client.renameFile("testRenameFile", "testRenamedFile")
self._emptyBuffers()
d.addCallback(_testRenamed, attrs)
return d
def _testRenamed(_, attrs):
d = self.client.getAttrs("testRenamedFile")
self._emptyBuffers()
d.addCallback(self.failUnlessEqual, attrs)
return d.addCallback(_rename)
def testDirectoryBad(self):
d = self.client.getAttrs("testMakeDirectory")
self._emptyBuffers()
return self.assertFailure(d, filetransfer.SFTPError)
def testDirectoryCreation(self):
d = self.client.makeDirectory("testMakeDirectory", {})
self._emptyBuffers()
def _getAttrs(_):
d = self.client.getAttrs("testMakeDirectory")
self._emptyBuffers()
return d
# XXX not until version 4/5
# self.failUnlessEqual(filetransfer.FILEXFER_TYPE_DIRECTORY&attrs['type'],
# filetransfer.FILEXFER_TYPE_DIRECTORY)
def _removeDirectory(_):
d = self.client.removeDirectory("testMakeDirectory")
self._emptyBuffers()
return d
d.addCallback(_getAttrs)
d.addCallback(_removeDirectory)
d.addCallback(_getAttrs)
return self.assertFailure(d, filetransfer.SFTPError)
def testOpenDirectory(self):
d = self.client.openDirectory('')
self._emptyBuffers()
files = []
def _getFiles(openDir):
def append(f):
files.append(f)
return openDir
d = defer.maybeDeferred(openDir.next)
self._emptyBuffers()
d.addCallback(append)
d.addCallback(_getFiles)
d.addErrback(_close, openDir)
return d
def _checkFiles(ignored):
fs = list(zip(*files)[0])
fs.sort()
self.failUnlessEqual(fs,
['.testHiddenFile', 'testDirectory',
'testRemoveFile', 'testRenameFile',
'testfile1'])
def _close(_, openDir):
d = openDir.close()
self._emptyBuffers()
return d
d.addCallback(_getFiles)
d.addCallback(_checkFiles)
return d
def testLinkDoesntExist(self):
d = self.client.getAttrs('testLink')
self._emptyBuffers()
return self.assertFailure(d, filetransfer.SFTPError)
def testLinkSharesAttrs(self):
d = self.client.makeLink('testLink', 'testfile1')
self._emptyBuffers()
def _getFirstAttrs(_):
d = self.client.getAttrs('testLink', 1)
self._emptyBuffers()
return d
def _getSecondAttrs(firstAttrs):
d = self.client.getAttrs('testfile1')
self._emptyBuffers()
d.addCallback(self.assertEqual, firstAttrs)
return d
d.addCallback(_getFirstAttrs)
return d.addCallback(_getSecondAttrs)
def testLinkPath(self):
d = self.client.makeLink('testLink', 'testfile1')
self._emptyBuffers()
def _readLink(_):
d = self.client.readLink('testLink')
self._emptyBuffers()
d.addCallback(self.failUnlessEqual,
os.path.join(os.getcwd(), self.testDir, 'testfile1'))
return d
def _realPath(_):
d = self.client.realPath('testLink')
self._emptyBuffers()
d.addCallback(self.failUnlessEqual,
os.path.join(os.getcwd(), self.testDir, 'testfile1'))
return d
d.addCallback(_readLink)
d.addCallback(_realPath)
return d
def testExtendedRequest(self):
d = self.client.extendedRequest('testExtendedRequest', 'foo')
self._emptyBuffers()
d.addCallback(self.failUnlessEqual, 'bar')
d.addCallback(self._cbTestExtendedRequest)
return d
def _cbTestExtendedRequest(self, ignored):
d = self.client.extendedRequest('testBadRequest', '')
self._emptyBuffers()
return self.assertFailure(d, NotImplementedError)
class FakeConn:
def sendClose(self, channel):
pass
class TestFileTransferClose(unittest.TestCase):
if not unix:
skip = "can't run on non-posix computers"
def setUp(self):
self.avatar = TestAvatar()
def buildServerConnection(self):
# make a server connection
conn = connection.SSHConnection()
# server connections have a 'self.transport.avatar'.
class DummyTransport:
def __init__(self):
self.transport = self
def sendPacket(self, kind, data):
pass
def logPrefix(self):
return 'dummy transport'
conn.transport = DummyTransport()
conn.transport.avatar = self.avatar
return conn
def interceptConnectionLost(self, sftpServer):
self.connectionLostFired = False
origConnectionLost = sftpServer.connectionLost
def connectionLost(reason):
self.connectionLostFired = True
origConnectionLost(reason)
sftpServer.connectionLost = connectionLost
def assertSFTPConnectionLost(self):
self.assertTrue(self.connectionLostFired,
"sftpServer's connectionLost was not called")
def test_sessionClose(self):
"""
Closing a session should notify an SFTP subsystem launched by that
session.
"""
# make a session
testSession = session.SSHSession(conn=FakeConn(), avatar=self.avatar)
# start an SFTP subsystem on the session
testSession.request_subsystem(common.NS('sftp'))
sftpServer = testSession.client.transport.proto
# intercept connectionLost so we can check that it's called
self.interceptConnectionLost(sftpServer)
# close session
testSession.closeReceived()
self.assertSFTPConnectionLost()
def test_clientClosesChannelOnConnnection(self):
"""
A client sending CHANNEL_CLOSE should trigger closeReceived on the
associated channel instance.
"""
conn = self.buildServerConnection()
# somehow get a session
packet = common.NS('session') + struct.pack('>L', 0) * 3
conn.ssh_CHANNEL_OPEN(packet)
sessionChannel = conn.channels[0]
sessionChannel.request_subsystem(common.NS('sftp'))
sftpServer = sessionChannel.client.transport.proto
self.interceptConnectionLost(sftpServer)
# intercept closeReceived
self.interceptConnectionLost(sftpServer)
# close the connection
conn.ssh_CHANNEL_CLOSE(struct.pack('>L', 0))
self.assertSFTPConnectionLost()
def test_stopConnectionServiceClosesChannel(self):
"""
Closing an SSH connection should close all sessions within it.
"""
conn = self.buildServerConnection()
# somehow get a session
packet = common.NS('session') + struct.pack('>L', 0) * 3
conn.ssh_CHANNEL_OPEN(packet)
sessionChannel = conn.channels[0]
sessionChannel.request_subsystem(common.NS('sftp'))
sftpServer = sessionChannel.client.transport.proto
self.interceptConnectionLost(sftpServer)
# close the connection
conn.serviceStopped()
self.assertSFTPConnectionLost()
class TestConstants(unittest.TestCase):
"""
Tests for the constants used by the SFTP protocol implementation.
@ivar filexferSpecExcerpts: Excerpts from the
draft-ietf-secsh-filexfer-02.txt (draft) specification of the SFTP
protocol. There are more recent drafts of the specification, but this
one describes version 3, which is what conch (and OpenSSH) implements.
"""
filexferSpecExcerpts = [
"""
The following values are defined for packet types.
#define SSH_FXP_INIT 1
#define SSH_FXP_VERSION 2
#define SSH_FXP_OPEN 3
#define SSH_FXP_CLOSE 4
#define SSH_FXP_READ 5
#define SSH_FXP_WRITE 6
#define SSH_FXP_LSTAT 7
#define SSH_FXP_FSTAT 8
#define SSH_FXP_SETSTAT 9
#define SSH_FXP_FSETSTAT 10
#define SSH_FXP_OPENDIR 11
#define SSH_FXP_READDIR 12
#define SSH_FXP_REMOVE 13
#define SSH_FXP_MKDIR 14
#define SSH_FXP_RMDIR 15
#define SSH_FXP_REALPATH 16
#define SSH_FXP_STAT 17
#define SSH_FXP_RENAME 18
#define SSH_FXP_READLINK 19
#define SSH_FXP_SYMLINK 20
#define SSH_FXP_STATUS 101
#define SSH_FXP_HANDLE 102
#define SSH_FXP_DATA 103
#define SSH_FXP_NAME 104
#define SSH_FXP_ATTRS 105
#define SSH_FXP_EXTENDED 200
#define SSH_FXP_EXTENDED_REPLY 201
Additional packet types should only be defined if the protocol
version number (see Section ``Protocol Initialization'') is
incremented, and their use MUST be negotiated using the version
number. However, the SSH_FXP_EXTENDED and SSH_FXP_EXTENDED_REPLY
packets can be used to implement vendor-specific extensions. See
Section ``Vendor-Specific-Extensions'' for more details.
""",
"""
The flags bits are defined to have the following values:
#define SSH_FILEXFER_ATTR_SIZE 0x00000001
#define SSH_FILEXFER_ATTR_UIDGID 0x00000002
#define SSH_FILEXFER_ATTR_PERMISSIONS 0x00000004
#define SSH_FILEXFER_ATTR_ACMODTIME 0x00000008
#define SSH_FILEXFER_ATTR_EXTENDED 0x80000000
""",
"""
The `pflags' field is a bitmask. The following bits have been
defined.
#define SSH_FXF_READ 0x00000001
#define SSH_FXF_WRITE 0x00000002
#define SSH_FXF_APPEND 0x00000004
#define SSH_FXF_CREAT 0x00000008
#define SSH_FXF_TRUNC 0x00000010
#define SSH_FXF_EXCL 0x00000020
""",
"""
Currently, the following values are defined (other values may be
defined by future versions of this protocol):
#define SSH_FX_OK 0
#define SSH_FX_EOF 1
#define SSH_FX_NO_SUCH_FILE 2
#define SSH_FX_PERMISSION_DENIED 3
#define SSH_FX_FAILURE 4
#define SSH_FX_BAD_MESSAGE 5
#define SSH_FX_NO_CONNECTION 6
#define SSH_FX_CONNECTION_LOST 7
#define SSH_FX_OP_UNSUPPORTED 8
"""]
def test_constantsAgainstSpec(self):
"""
The constants used by the SFTP protocol implementation match those
found by searching through the spec.
"""
constants = {}
for excerpt in self.filexferSpecExcerpts:
for line in excerpt.splitlines():
m = re.match('^\s*#define SSH_([A-Z_]+)\s+([0-9x]*)\s*$', line)
if m:
constants[m.group(1)] = long(m.group(2), 0)
self.assertTrue(
len(constants) > 0, "No constants found (the test must be buggy).")
for k, v in constants.items():
self.assertEqual(v, getattr(filetransfer, k))
|
gtracy/madison-transit-api | refs/heads/master | gdata/docs/service.py | 53 | #!/usr/bin/python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DocsService extends the GDataService to streamline Google Documents
operations.
DocsService: Provides methods to query feeds and manipulate items.
Extends GDataService.
DocumentQuery: Queries a Google Document list feed.
DocumentAclQuery: Queries a Google Document Acl feed.
"""
__author__ = ('api.jfisher (Jeff Fisher), '
'e.bidelman (Eric Bidelman)')
import re
import atom
import gdata.service
import gdata.docs
import urllib
# XML Namespaces used in Google Documents entities.
DATA_KIND_SCHEME = gdata.GDATA_NAMESPACE + '#kind'
DOCUMENT_LABEL = 'document'
SPREADSHEET_LABEL = 'spreadsheet'
PRESENTATION_LABEL = 'presentation'
FOLDER_LABEL = 'folder'
PDF_LABEL = 'pdf'
LABEL_SCHEME = gdata.GDATA_NAMESPACE + '/labels'
STARRED_LABEL_TERM = LABEL_SCHEME + '#starred'
TRASHED_LABEL_TERM = LABEL_SCHEME + '#trashed'
HIDDEN_LABEL_TERM = LABEL_SCHEME + '#hidden'
MINE_LABEL_TERM = LABEL_SCHEME + '#mine'
PRIVATE_LABEL_TERM = LABEL_SCHEME + '#private'
SHARED_WITH_DOMAIN_LABEL_TERM = LABEL_SCHEME + '#shared-with-domain'
VIEWED_LABEL_TERM = LABEL_SCHEME + '#viewed'
FOLDERS_SCHEME_PREFIX = gdata.docs.DOCUMENTS_NAMESPACE + '/folders/'
# File extensions of documents that are permitted to be uploaded or downloaded.
SUPPORTED_FILETYPES = {
'CSV': 'text/csv',
'TSV': 'text/tab-separated-values',
'TAB': 'text/tab-separated-values',
'DOC': 'application/msword',
'DOCX': ('application/vnd.openxmlformats-officedocument.'
'wordprocessingml.document'),
'ODS': 'application/x-vnd.oasis.opendocument.spreadsheet',
'ODT': 'application/vnd.oasis.opendocument.text',
'RTF': 'application/rtf',
'SXW': 'application/vnd.sun.xml.writer',
'TXT': 'text/plain',
'XLS': 'application/vnd.ms-excel',
'XLSX': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
'PDF': 'application/pdf',
'PNG': 'image/png',
'PPT': 'application/vnd.ms-powerpoint',
'PPS': 'application/vnd.ms-powerpoint',
'HTM': 'text/html',
'HTML': 'text/html',
'ZIP': 'application/zip',
'SWF': 'application/x-shockwave-flash'
}
class DocsService(gdata.service.GDataService):
"""Client extension for the Google Documents service Document List feed."""
__FILE_EXT_PATTERN = re.compile('.*\.([a-zA-Z]{3,}$)')
__RESOURCE_ID_PATTERN = re.compile('^([a-z]*)(:|%3A)([\w-]*)$')
def __init__(self, email=None, password=None, source=None,
server='docs.google.com', additional_headers=None, **kwargs):
"""Creates a client for the Google Documents service.
Args:
email: string (optional) The user's email address, used for
authentication.
password: string (optional) The user's password.
source: string (optional) The name of the user's application.
server: string (optional) The name of the server to which a connection
will be opened. Default value: 'docs.google.com'.
**kwargs: The other parameters to pass to gdata.service.GDataService
constructor.
"""
gdata.service.GDataService.__init__(
self, email=email, password=password, service='writely', source=source,
server=server, additional_headers=additional_headers, **kwargs)
self.ssl = True
def _MakeKindCategory(self, label):
if label is None:
return None
return atom.Category(scheme=DATA_KIND_SCHEME,
term=gdata.docs.DOCUMENTS_NAMESPACE + '#' + label, label=label)
def _MakeContentLinkFromId(self, resource_id):
match = self.__RESOURCE_ID_PATTERN.match(resource_id)
label = match.group(1)
doc_id = match.group(3)
if label == DOCUMENT_LABEL:
return '/feeds/download/documents/Export?docId=%s' % doc_id
if label == PRESENTATION_LABEL:
return '/feeds/download/presentations/Export?docId=%s' % doc_id
if label == SPREADSHEET_LABEL:
return ('https://spreadsheets.google.com/feeds/download/spreadsheets/'
'Export?key=%s' % doc_id)
raise ValueError, 'Invalid resource id: %s' % resource_id
def _UploadFile(self, media_source, title, category, folder_or_uri=None):
"""Uploads a file to the Document List feed.
Args:
media_source: A gdata.MediaSource object containing the file to be
uploaded.
title: string The title of the document on the server after being
uploaded.
category: An atom.Category object specifying the appropriate document
type.
folder_or_uri: DocumentListEntry or string (optional) An object with a
link to a folder or a uri to a folder to upload to.
Note: A valid uri for a folder is of the form:
/feeds/folders/private/full/folder%3Afolder_id
Returns:
A DocumentListEntry containing information about the document created on
the Google Documents service.
"""
if folder_or_uri:
try:
uri = folder_or_uri.content.src
except AttributeError:
uri = folder_or_uri
else:
uri = '/feeds/documents/private/full'
entry = gdata.docs.DocumentListEntry()
entry.title = atom.Title(text=title)
if category is not None:
entry.category.append(category)
entry = self.Post(entry, uri, media_source=media_source,
extra_headers={'Slug': media_source.file_name},
converter=gdata.docs.DocumentListEntryFromString)
return entry
def _DownloadFile(self, uri, file_path):
"""Downloads a file.
Args:
uri: string The full Export URL to download the file from.
file_path: string The full path to save the file to.
Raises:
RequestError: on error response from server.
"""
server_response = self.request('GET', uri)
response_body = server_response.read()
timeout = 5
while server_response.status == 302 and timeout > 0:
server_response = self.request('GET',
server_response.getheader('Location'))
response_body = server_response.read()
timeout -= 1
if server_response.status != 200:
raise gdata.service.RequestError, {'status': server_response.status,
'reason': server_response.reason,
'body': response_body}
f = open(file_path, 'wb')
f.write(response_body)
f.flush()
f.close()
def MoveIntoFolder(self, source_entry, folder_entry):
"""Moves a document into a folder in the Document List Feed.
Args:
source_entry: DocumentListEntry An object representing the source
document/folder.
folder_entry: DocumentListEntry An object with a link to the destination
folder.
Returns:
A DocumentListEntry containing information about the document created on
the Google Documents service.
"""
entry = gdata.docs.DocumentListEntry()
entry.id = source_entry.id
entry = self.Post(entry, folder_entry.content.src,
converter=gdata.docs.DocumentListEntryFromString)
return entry
def Query(self, uri, converter=gdata.docs.DocumentListFeedFromString):
"""Queries the Document List feed and returns the resulting feed of
entries.
Args:
uri: string The full URI to be queried. This can contain query
parameters, a hostname, or simply the relative path to a Document
List feed. The DocumentQuery object is useful when constructing
query parameters.
converter: func (optional) A function which will be executed on the
retrieved item, generally to render it into a Python object.
By default the DocumentListFeedFromString function is used to
return a DocumentListFeed object. This is because most feed
queries will result in a feed and not a single entry.
"""
return self.Get(uri, converter=converter)
def QueryDocumentListFeed(self, uri):
"""Retrieves a DocumentListFeed by retrieving a URI based off the Document
List feed, including any query parameters. A DocumentQuery object can
be used to construct these parameters.
Args:
uri: string The URI of the feed being retrieved possibly with query
parameters.
Returns:
A DocumentListFeed object representing the feed returned by the server.
"""
return self.Get(uri, converter=gdata.docs.DocumentListFeedFromString)
def GetDocumentListEntry(self, uri):
"""Retrieves a particular DocumentListEntry by its unique URI.
Args:
uri: string The unique URI of an entry in a Document List feed.
Returns:
A DocumentListEntry object representing the retrieved entry.
"""
return self.Get(uri, converter=gdata.docs.DocumentListEntryFromString)
def GetDocumentListFeed(self, uri=None):
"""Retrieves a feed containing all of a user's documents.
Args:
uri: string A full URI to query the Document List feed.
"""
if not uri:
uri = gdata.docs.service.DocumentQuery().ToUri()
return self.QueryDocumentListFeed(uri)
def GetDocumentListAclEntry(self, uri):
"""Retrieves a particular DocumentListAclEntry by its unique URI.
Args:
uri: string The unique URI of an entry in a Document List feed.
Returns:
A DocumentListAclEntry object representing the retrieved entry.
"""
return self.Get(uri, converter=gdata.docs.DocumentListAclEntryFromString)
def GetDocumentListAclFeed(self, uri):
"""Retrieves a feed containing all of a user's documents.
Args:
uri: string The URI of a document's Acl feed to retrieve.
Returns:
A DocumentListAclFeed object representing the ACL feed
returned by the server.
"""
return self.Get(uri, converter=gdata.docs.DocumentListAclFeedFromString)
def Upload(self, media_source, title, folder_or_uri=None, label=None):
"""Uploads a document inside of a MediaSource object to the Document List
feed with the given title.
Args:
media_source: MediaSource The gdata.MediaSource object containing a
document file to be uploaded.
title: string The title of the document on the server after being
uploaded.
folder_or_uri: DocumentListEntry or string (optional) An object with a
link to a folder or a uri to a folder to upload to.
Note: A valid uri for a folder is of the form:
/feeds/folders/private/full/folder%3Afolder_id
label: optional label describing the type of the document to be created.
Returns:
A DocumentListEntry containing information about the document created
on the Google Documents service.
"""
return self._UploadFile(media_source, title, self._MakeKindCategory(label),
folder_or_uri)
def Download(self, entry_or_id_or_url, file_path, export_format=None,
gid=None, extra_params=None):
"""Downloads a document from the Document List.
Args:
entry_or_id_or_url: a DocumentListEntry, or the resource id of an entry,
or a url to download from (such as the content src).
file_path: string The full path to save the file to.
export_format: the format to convert to, if conversion is required.
gid: grid id, for downloading a single grid of a spreadsheet
extra_params: a map of any further parameters to control how the document
is downloaded
Raises:
RequestError if the service does not respond with success
"""
if isinstance(entry_or_id_or_url, gdata.docs.DocumentListEntry):
url = entry_or_id_or_url.content.src
else:
if self.__RESOURCE_ID_PATTERN.match(entry_or_id_or_url):
url = self._MakeContentLinkFromId(entry_or_id_or_url)
else:
url = entry_or_id_or_url
if export_format is not None:
if url.find('/Export?') == -1:
raise gdata.service.Error, ('This entry cannot be exported '
'as a different format')
url += '&exportFormat=%s' % export_format
if gid is not None:
if url.find('spreadsheets') == -1:
raise gdata.service.Error, 'grid id param is not valid for this entry'
url += '&gid=%s' % gid
if extra_params:
url += '&' + urllib.urlencode(extra_params)
self._DownloadFile(url, file_path)
def Export(self, entry_or_id_or_url, file_path, gid=None, extra_params=None):
"""Downloads a document from the Document List in a different format.
Args:
entry_or_id_or_url: a DocumentListEntry, or the resource id of an entry,
or a url to download from (such as the content src).
file_path: string The full path to save the file to. The export
format is inferred from the the file extension.
gid: grid id, for downloading a single grid of a spreadsheet
extra_params: a map of any further parameters to control how the document
is downloaded
Raises:
RequestError if the service does not respond with success
"""
ext = None
match = self.__FILE_EXT_PATTERN.match(file_path)
if match:
ext = match.group(1)
self.Download(entry_or_id_or_url, file_path, ext, gid, extra_params)
def CreateFolder(self, title, folder_or_uri=None):
"""Creates a folder in the Document List feed.
Args:
title: string The title of the folder on the server after being created.
folder_or_uri: DocumentListEntry or string (optional) An object with a
link to a folder or a uri to a folder to upload to.
Note: A valid uri for a folder is of the form:
/feeds/folders/private/full/folder%3Afolder_id
Returns:
A DocumentListEntry containing information about the folder created on
the Google Documents service.
"""
if folder_or_uri:
try:
uri = folder_or_uri.content.src
except AttributeError:
uri = folder_or_uri
else:
uri = '/feeds/documents/private/full'
folder_entry = gdata.docs.DocumentListEntry()
folder_entry.title = atom.Title(text=title)
folder_entry.category.append(self._MakeKindCategory(FOLDER_LABEL))
folder_entry = self.Post(folder_entry, uri,
converter=gdata.docs.DocumentListEntryFromString)
return folder_entry
def MoveOutOfFolder(self, source_entry):
"""Moves a document into a folder in the Document List Feed.
Args:
source_entry: DocumentListEntry An object representing the source
document/folder.
Returns:
True if the entry was moved out.
"""
return self.Delete(source_entry.GetEditLink().href)
# Deprecated methods
#@atom.deprecated('Please use Upload instead')
def UploadPresentation(self, media_source, title, folder_or_uri=None):
"""Uploads a presentation inside of a MediaSource object to the Document
List feed with the given title.
This method is deprecated, use Upload instead.
Args:
media_source: MediaSource The MediaSource object containing a
presentation file to be uploaded.
title: string The title of the presentation on the server after being
uploaded.
folder_or_uri: DocumentListEntry or string (optional) An object with a
link to a folder or a uri to a folder to upload to.
Note: A valid uri for a folder is of the form:
/feeds/folders/private/full/folder%3Afolder_id
Returns:
A DocumentListEntry containing information about the presentation created
on the Google Documents service.
"""
return self._UploadFile(
media_source, title, self._MakeKindCategory(PRESENTATION_LABEL),
folder_or_uri=folder_or_uri)
UploadPresentation = atom.deprecated('Please use Upload instead')(
UploadPresentation)
#@atom.deprecated('Please use Upload instead')
def UploadSpreadsheet(self, media_source, title, folder_or_uri=None):
"""Uploads a spreadsheet inside of a MediaSource object to the Document
List feed with the given title.
This method is deprecated, use Upload instead.
Args:
media_source: MediaSource The MediaSource object containing a spreadsheet
file to be uploaded.
title: string The title of the spreadsheet on the server after being
uploaded.
folder_or_uri: DocumentListEntry or string (optional) An object with a
link to a folder or a uri to a folder to upload to.
Note: A valid uri for a folder is of the form:
/feeds/folders/private/full/folder%3Afolder_id
Returns:
A DocumentListEntry containing information about the spreadsheet created
on the Google Documents service.
"""
return self._UploadFile(
media_source, title, self._MakeKindCategory(SPREADSHEET_LABEL),
folder_or_uri=folder_or_uri)
UploadSpreadsheet = atom.deprecated('Please use Upload instead')(
UploadSpreadsheet)
#@atom.deprecated('Please use Upload instead')
def UploadDocument(self, media_source, title, folder_or_uri=None):
"""Uploads a document inside of a MediaSource object to the Document List
feed with the given title.
This method is deprecated, use Upload instead.
Args:
media_source: MediaSource The gdata.MediaSource object containing a
document file to be uploaded.
title: string The title of the document on the server after being
uploaded.
folder_or_uri: DocumentListEntry or string (optional) An object with a
link to a folder or a uri to a folder to upload to.
Note: A valid uri for a folder is of the form:
/feeds/folders/private/full/folder%3Afolder_id
Returns:
A DocumentListEntry containing information about the document created
on the Google Documents service.
"""
return self._UploadFile(
media_source, title, self._MakeKindCategory(DOCUMENT_LABEL),
folder_or_uri=folder_or_uri)
UploadDocument = atom.deprecated('Please use Upload instead')(
UploadDocument)
"""Calling any of these functions is the same as calling Export"""
DownloadDocument = atom.deprecated('Please use Export instead')(Export)
DownloadPresentation = atom.deprecated('Please use Export instead')(Export)
DownloadSpreadsheet = atom.deprecated('Please use Export instead')(Export)
"""Calling any of these functions is the same as calling MoveIntoFolder"""
MoveDocumentIntoFolder = atom.deprecated(
'Please use MoveIntoFolder instead')(MoveIntoFolder)
MovePresentationIntoFolder = atom.deprecated(
'Please use MoveIntoFolder instead')(MoveIntoFolder)
MoveSpreadsheetIntoFolder = atom.deprecated(
'Please use MoveIntoFolder instead')(MoveIntoFolder)
MoveFolderIntoFolder = atom.deprecated(
'Please use MoveIntoFolder instead')(MoveIntoFolder)
class DocumentQuery(gdata.service.Query):
"""Object used to construct a URI to query the Google Document List feed"""
def __init__(self, feed='/feeds/documents', visibility='private',
projection='full', text_query=None, params=None,
categories=None):
"""Constructor for Document List Query
Args:
feed: string (optional) The path for the feed. (e.g. '/feeds/documents')
visibility: string (optional) The visibility chosen for the current feed.
projection: string (optional) The projection chosen for the current feed.
text_query: string (optional) The contents of the q query parameter. This
string is URL escaped upon conversion to a URI.
params: dict (optional) Parameter value string pairs which become URL
params when translated to a URI. These parameters are added to
the query's items.
categories: list (optional) List of category strings which should be
included as query categories. See gdata.service.Query for
additional documentation.
Yields:
A DocumentQuery object used to construct a URI based on the Document
List feed.
"""
self.visibility = visibility
self.projection = projection
gdata.service.Query.__init__(self, feed, text_query, params, categories)
def ToUri(self):
"""Generates a URI from the query parameters set in the object.
Returns:
A string containing the URI used to retrieve entries from the Document
List feed.
"""
old_feed = self.feed
self.feed = '/'.join([old_feed, self.visibility, self.projection])
new_feed = gdata.service.Query.ToUri(self)
self.feed = old_feed
return new_feed
def AddNamedFolder(self, email, folder_name):
"""Adds a named folder category, qualified by a schema.
This function lets you query for documents that are contained inside a
named folder without fear of collision with other categories.
Args:
email: string The email of the user who owns the folder.
folder_name: string The name of the folder.
Returns:
The string of the category that was added to the object.
"""
category = '{%s%s}%s' % (FOLDERS_SCHEME_PREFIX, email, folder_name)
self.categories.append(category)
return category
def RemoveNamedFolder(self, email, folder_name):
"""Removes a named folder category, qualified by a schema.
Args:
email: string The email of the user who owns the folder.
folder_name: string The name of the folder.
Returns:
The string of the category that was removed to the object.
"""
category = '{%s%s}%s' % (FOLDERS_SCHEME_PREFIX, email, folder_name)
self.categories.remove(category)
return category
class DocumentAclQuery(gdata.service.Query):
"""Object used to construct a URI to query a Document's ACL feed"""
def __init__(self, resource_id, feed='/feeds/acl/private/full'):
"""Constructor for Document ACL Query
Args:
resource_id: string The resource id. (e.g. 'document%3Adocument_id',
'spreadsheet%3Aspreadsheet_id', etc.)
feed: string (optional) The path for the feed.
(e.g. '/feeds/acl/private/full')
Yields:
A DocumentAclQuery object used to construct a URI based on the Document
ACL feed.
"""
self.resource_id = resource_id
gdata.service.Query.__init__(self, feed)
def ToUri(self):
"""Generates a URI from the query parameters set in the object.
Returns:
A string containing the URI used to retrieve entries from the Document
ACL feed.
"""
return '%s/%s' % (gdata.service.Query.ToUri(self), self.resource_id)
|
brandond/ansible | refs/heads/devel | test/units/executor/module_common/test_recursive_finder.py | 13 | # (c) 2017, Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import imp
import pytest
import zipfile
from collections import namedtuple
from functools import partial
from io import BytesIO, StringIO
import ansible.errors
from ansible.executor.module_common import recursive_finder
from ansible.module_utils.six import PY2
from ansible.module_utils.six.moves import builtins
original_find_module = imp.find_module
# These are the modules that are brought in by module_utils/basic.py This may need to be updated
# when basic.py gains new imports
# We will remove these when we modify AnsiBallZ to store its args in a separate file instead of in
# basic.py
MODULE_UTILS_BASIC_IMPORTS = frozenset((('_text',),
('basic',),
('common', '__init__'),
('common', '_collections_compat'),
('common', 'file'),
('common', 'process'),
('common', 'sys_info'),
('common', '_utils'),
('distro', '__init__'),
('distro', '_distro'),
('parsing', '__init__'),
('parsing', 'convert_bool'),
('pycompat24',),
('six', '__init__'),
))
MODULE_UTILS_BASIC_FILES = frozenset(('ansible/module_utils/_text.py',
'ansible/module_utils/basic.py',
'ansible/module_utils/common/__init__.py',
'ansible/module_utils/common/_collections_compat.py',
'ansible/module_utils/common/file.py',
'ansible/module_utils/common/process.py',
'ansible/module_utils/common/sys_info.py',
'ansible/module_utils/common/_utils.py',
'ansible/module_utils/distro/__init__.py',
'ansible/module_utils/distro/_distro.py',
'ansible/module_utils/parsing/__init__.py',
'ansible/module_utils/parsing/convert_bool.py',
'ansible/module_utils/pycompat24.py',
'ansible/module_utils/six/__init__.py',
))
ONLY_BASIC_IMPORT = frozenset((('basic',),))
ONLY_BASIC_FILE = frozenset(('ansible/module_utils/basic.py',))
@pytest.fixture
def finder_containers():
FinderContainers = namedtuple('FinderContainers', ['py_module_names', 'py_module_cache', 'zf'])
py_module_names = set()
# py_module_cache = {('__init__',): b''}
py_module_cache = {}
zipoutput = BytesIO()
zf = zipfile.ZipFile(zipoutput, mode='w', compression=zipfile.ZIP_STORED)
# zf.writestr('ansible/__init__.py', b'')
return FinderContainers(py_module_names, py_module_cache, zf)
def find_module_foo(module_utils_data, *args, **kwargs):
if args[0] == 'foo':
return (module_utils_data, '/usr/lib/python2.7/site-packages/ansible/module_utils/foo.py', ('.py', 'r', imp.PY_SOURCE))
return original_find_module(*args, **kwargs)
def find_package_foo(module_utils_data, *args, **kwargs):
if args[0] == 'foo':
return (module_utils_data, '/usr/lib/python2.7/site-packages/ansible/module_utils/foo', ('', '', imp.PKG_DIRECTORY))
return original_find_module(*args, **kwargs)
class TestRecursiveFinder(object):
def test_no_module_utils(self, finder_containers):
name = 'ping'
data = b'#!/usr/bin/python\nreturn \'{\"changed\": false}\''
recursive_finder(name, data, *finder_containers)
assert finder_containers.py_module_names == set(()).union(MODULE_UTILS_BASIC_IMPORTS)
assert finder_containers.py_module_cache == {}
assert frozenset(finder_containers.zf.namelist()) == MODULE_UTILS_BASIC_FILES
def test_module_utils_with_syntax_error(self, finder_containers):
name = 'fake_module'
data = b'#!/usr/bin/python\ndef something(:\n pass\n'
with pytest.raises(ansible.errors.AnsibleError) as exec_info:
recursive_finder(name, data, *finder_containers)
assert 'Unable to import fake_module due to invalid syntax' in str(exec_info)
def test_module_utils_with_identation_error(self, finder_containers):
name = 'fake_module'
data = b'#!/usr/bin/python\n def something():\n pass\n'
with pytest.raises(ansible.errors.AnsibleError) as exec_info:
recursive_finder(name, data, *finder_containers)
assert 'Unable to import fake_module due to unexpected indent' in str(exec_info)
def test_from_import_toplevel_package(self, finder_containers, mocker):
if PY2:
module_utils_data = BytesIO(b'# License\ndef do_something():\n pass\n')
else:
module_utils_data = StringIO(u'# License\ndef do_something():\n pass\n')
mocker.patch('imp.find_module', side_effect=partial(find_package_foo, module_utils_data))
mocker.patch('ansible.executor.module_common._slurp', side_effect=lambda x: b'# License\ndef do_something():\n pass\n')
name = 'ping'
data = b'#!/usr/bin/python\nfrom ansible.module_utils import foo'
recursive_finder(name, data, *finder_containers)
mocker.stopall()
assert finder_containers.py_module_names == set((('foo', '__init__'),)).union(ONLY_BASIC_IMPORT)
assert finder_containers.py_module_cache == {}
assert frozenset(finder_containers.zf.namelist()) == frozenset(('ansible/module_utils/foo/__init__.py',)).union(ONLY_BASIC_FILE)
def test_from_import_toplevel_module(self, finder_containers, mocker):
if PY2:
module_utils_data = BytesIO(b'# License\ndef do_something():\n pass\n')
else:
module_utils_data = StringIO(u'# License\ndef do_something():\n pass\n')
mocker.patch('imp.find_module', side_effect=partial(find_module_foo, module_utils_data))
name = 'ping'
data = b'#!/usr/bin/python\nfrom ansible.module_utils import foo'
recursive_finder(name, data, *finder_containers)
mocker.stopall()
assert finder_containers.py_module_names == set((('foo',),)).union(MODULE_UTILS_BASIC_IMPORTS)
assert finder_containers.py_module_cache == {}
assert frozenset(finder_containers.zf.namelist()) == frozenset(('ansible/module_utils/foo.py',)).union(MODULE_UTILS_BASIC_FILES)
#
# Test importing six with many permutations because it is not a normal module
#
def test_from_import_six(self, finder_containers):
name = 'ping'
data = b'#!/usr/bin/python\nfrom ansible.module_utils import six'
recursive_finder(name, data, *finder_containers)
assert finder_containers.py_module_names == set((('six', '__init__'),)).union(MODULE_UTILS_BASIC_IMPORTS)
assert finder_containers.py_module_cache == {}
assert frozenset(finder_containers.zf.namelist()) == frozenset(('ansible/module_utils/six/__init__.py', )).union(MODULE_UTILS_BASIC_FILES)
def test_import_six(self, finder_containers):
name = 'ping'
data = b'#!/usr/bin/python\nimport ansible.module_utils.six'
recursive_finder(name, data, *finder_containers)
assert finder_containers.py_module_names == set((('six', '__init__'),)).union(MODULE_UTILS_BASIC_IMPORTS)
assert finder_containers.py_module_cache == {}
assert frozenset(finder_containers.zf.namelist()) == frozenset(('ansible/module_utils/six/__init__.py', )).union(MODULE_UTILS_BASIC_FILES)
def test_import_six_from_many_submodules(self, finder_containers):
name = 'ping'
data = b'#!/usr/bin/python\nfrom ansible.module_utils.six.moves.urllib.parse import urlparse'
recursive_finder(name, data, *finder_containers)
assert finder_containers.py_module_names == set((('six', '__init__'),)).union(MODULE_UTILS_BASIC_IMPORTS)
assert finder_containers.py_module_cache == {}
assert frozenset(finder_containers.zf.namelist()) == frozenset(('ansible/module_utils/six/__init__.py',)).union(MODULE_UTILS_BASIC_FILES)
|
throwable-one/lettuce | refs/heads/master | tests/integration/lib/Django-1.3/tests/modeltests/reserved_names/tests.py | 92 | import datetime
from django.test import TestCase
from models import Thing
class ReservedNameTests(TestCase):
def generate(self):
day1 = datetime.date(2005, 1, 1)
t = Thing.objects.create(when='a', join='b', like='c', drop='d',
alter='e', having='f', where=day1, has_hyphen='h')
day2 = datetime.date(2006, 2, 2)
u = Thing.objects.create(when='h', join='i', like='j', drop='k',
alter='l', having='m', where=day2)
def test_simple(self):
day1 = datetime.date(2005, 1, 1)
t = Thing.objects.create(when='a', join='b', like='c', drop='d',
alter='e', having='f', where=day1, has_hyphen='h')
self.assertEqual(t.when, 'a')
day2 = datetime.date(2006, 2, 2)
u = Thing.objects.create(when='h', join='i', like='j', drop='k',
alter='l', having='m', where=day2)
self.assertEqual(u.when, 'h')
def test_order_by(self):
self.generate()
things = [t.when for t in Thing.objects.order_by('when')]
self.assertEqual(things, ['a', 'h'])
def test_fields(self):
self.generate()
v = Thing.objects.get(pk='a')
self.assertEqual(v.join, 'b')
self.assertEqual(v.where, datetime.date(year=2005, month=1, day=1))
def test_dates(self):
self.generate()
resp = Thing.objects.dates('where', 'year')
self.assertEqual(list(resp), [
datetime.datetime(2005, 1, 1, 0, 0),
datetime.datetime(2006, 1, 1, 0, 0),
])
def test_month_filter(self):
self.generate()
self.assertEqual(Thing.objects.filter(where__month=1)[0].when, 'a')
|
pusher/pusher-http-python | refs/heads/master | examples/trigger_tornado.py | 1 | import pusher
import pusher.tornado
import tornado.ioloop
ioloop = tornado.ioloop.IOLoop.instance()
def show_response(response):
print(response.result())
ioloop.stop()
pusher_client = pusher.Pusher.from_env(
backend=pusher.tornado.TornadoBackend,
timeout=50
)
response = pusher_client.trigger("hello", "world", dict(foo='bar'))
response.add_done_callback(show_response)
print("Before start")
ioloop.start()
print("After start")
|
naziris/HomeSecPi | refs/heads/master | venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/connectionpool.py | 316 | # urllib3/connectionpool.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import sys
import errno
import logging
from socket import error as SocketError, timeout as SocketTimeout
import socket
try: # Python 3
from queue import LifoQueue, Empty, Full
except ImportError:
from Queue import LifoQueue, Empty, Full
import Queue as _ # Platform-specific: Windows
from .exceptions import (
ClosedPoolError,
ConnectionError,
ConnectTimeoutError,
EmptyPoolError,
HostChangedError,
LocationParseError,
MaxRetryError,
SSLError,
TimeoutError,
ReadTimeoutError,
ProxyError,
)
from .packages.ssl_match_hostname import CertificateError
from .packages import six
from .connection import (
port_by_scheme,
DummyConnection,
HTTPConnection, HTTPSConnection, VerifiedHTTPSConnection,
HTTPException, BaseSSLError,
)
from .request import RequestMethods
from .response import HTTPResponse
from .util import (
get_host,
is_connection_dropped,
Timeout,
)
xrange = six.moves.xrange
log = logging.getLogger(__name__)
_Default = object()
## Pool objects
class ConnectionPool(object):
"""
Base class for all connection pools, such as
:class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`.
"""
scheme = None
QueueCls = LifoQueue
def __init__(self, host, port=None):
if host is None:
raise LocationParseError(host)
# httplib doesn't like it when we include brackets in ipv6 addresses
host = host.strip('[]')
self.host = host
self.port = port
def __str__(self):
return '%s(host=%r, port=%r)' % (type(self).__name__,
self.host, self.port)
# This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252
_blocking_errnos = set([errno.EAGAIN, errno.EWOULDBLOCK])
class HTTPConnectionPool(ConnectionPool, RequestMethods):
"""
Thread-safe connection pool for one host.
:param host:
Host used for this HTTP Connection (e.g. "localhost"), passed into
:class:`httplib.HTTPConnection`.
:param port:
Port used for this HTTP Connection (None is equivalent to 80), passed
into :class:`httplib.HTTPConnection`.
:param strict:
Causes BadStatusLine to be raised if the status line can't be parsed
as a valid HTTP/1.0 or 1.1 status line, passed into
:class:`httplib.HTTPConnection`.
.. note::
Only works in Python 2. This parameter is ignored in Python 3.
:param timeout:
Socket timeout in seconds for each individual connection. This can
be a float or integer, which sets the timeout for the HTTP request,
or an instance of :class:`urllib3.util.Timeout` which gives you more
fine-grained control over request timeouts. After the constructor has
been parsed, this is always a `urllib3.util.Timeout` object.
:param maxsize:
Number of connections to save that can be reused. More than 1 is useful
in multithreaded situations. If ``block`` is set to false, more
connections will be created but they will not be saved once they've
been used.
:param block:
If set to True, no more than ``maxsize`` connections will be used at
a time. When no free connections are available, the call will block
until a connection has been released. This is a useful side effect for
particular multithreaded situations where one does not want to use more
than maxsize connections per host to prevent flooding.
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
:param _proxy:
Parsed proxy URL, should not be used directly, instead, see
:class:`urllib3.connectionpool.ProxyManager`"
:param _proxy_headers:
A dictionary with proxy headers, should not be used directly,
instead, see :class:`urllib3.connectionpool.ProxyManager`"
"""
scheme = 'http'
ConnectionCls = HTTPConnection
def __init__(self, host, port=None, strict=False,
timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1, block=False,
headers=None, _proxy=None, _proxy_headers=None, **conn_kw):
ConnectionPool.__init__(self, host, port)
RequestMethods.__init__(self, headers)
self.strict = strict
# This is for backwards compatibility and can be removed once a timeout
# can only be set to a Timeout object
if not isinstance(timeout, Timeout):
timeout = Timeout.from_float(timeout)
self.timeout = timeout
self.pool = self.QueueCls(maxsize)
self.block = block
self.proxy = _proxy
self.proxy_headers = _proxy_headers or {}
# Fill the queue up so that doing get() on it will block properly
for _ in xrange(maxsize):
self.pool.put(None)
# These are mostly for testing and debugging purposes.
self.num_connections = 0
self.num_requests = 0
if sys.version_info < (2, 7): # Python 2.6 and older
conn_kw.pop('source_address', None)
self.conn_kw = conn_kw
def _new_conn(self):
"""
Return a fresh :class:`HTTPConnection`.
"""
self.num_connections += 1
log.info("Starting new HTTP connection (%d): %s" %
(self.num_connections, self.host))
conn = self.ConnectionCls(host=self.host, port=self.port,
timeout=self.timeout.connect_timeout,
strict=self.strict, **self.conn_kw)
if self.proxy is not None:
# Enable Nagle's algorithm for proxies, to avoid packet
# fragmentation.
conn.tcp_nodelay = 0
return conn
def _get_conn(self, timeout=None):
"""
Get a connection. Will return a pooled connection if one is available.
If no connections are available and :prop:`.block` is ``False``, then a
fresh connection is returned.
:param timeout:
Seconds to wait before giving up and raising
:class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and
:prop:`.block` is ``True``.
"""
conn = None
try:
conn = self.pool.get(block=self.block, timeout=timeout)
except AttributeError: # self.pool is None
raise ClosedPoolError(self, "Pool is closed.")
except Empty:
if self.block:
raise EmptyPoolError(self,
"Pool reached maximum size and no more "
"connections are allowed.")
pass # Oh well, we'll create a new connection then
# If this is a persistent connection, check if it got disconnected
if conn and is_connection_dropped(conn):
log.info("Resetting dropped connection: %s" % self.host)
conn.close()
return conn or self._new_conn()
def _put_conn(self, conn):
"""
Put a connection back into the pool.
:param conn:
Connection object for the current host and port as returned by
:meth:`._new_conn` or :meth:`._get_conn`.
If the pool is already full, the connection is closed and discarded
because we exceeded maxsize. If connections are discarded frequently,
then maxsize should be increased.
If the pool is closed, then the connection will be closed and discarded.
"""
try:
self.pool.put(conn, block=False)
return # Everything is dandy, done.
except AttributeError:
# self.pool is None.
pass
except Full:
# This should never happen if self.block == True
log.warning(
"Connection pool is full, discarding connection: %s" %
self.host)
# Connection never got put back into the pool, close it.
if conn:
conn.close()
def _get_timeout(self, timeout):
""" Helper that always returns a :class:`urllib3.util.Timeout` """
if timeout is _Default:
return self.timeout.clone()
if isinstance(timeout, Timeout):
return timeout.clone()
else:
# User passed us an int/float. This is for backwards compatibility,
# can be removed later
return Timeout.from_float(timeout)
def _make_request(self, conn, method, url, timeout=_Default,
**httplib_request_kw):
"""
Perform a request on a given urllib connection object taken from our
pool.
:param conn:
a connection from one of our connection pools
:param timeout:
Socket timeout in seconds for the request. This can be a
float or integer, which will set the same timeout value for
the socket connect and the socket read, or an instance of
:class:`urllib3.util.Timeout`, which gives you more fine-grained
control over your timeouts.
"""
self.num_requests += 1
timeout_obj = self._get_timeout(timeout)
try:
timeout_obj.start_connect()
conn.timeout = timeout_obj.connect_timeout
# conn.request() calls httplib.*.request, not the method in
# urllib3.request. It also calls makefile (recv) on the socket.
conn.request(method, url, **httplib_request_kw)
except SocketTimeout:
raise ConnectTimeoutError(
self, "Connection to %s timed out. (connect timeout=%s)" %
(self.host, timeout_obj.connect_timeout))
# Reset the timeout for the recv() on the socket
read_timeout = timeout_obj.read_timeout
# App Engine doesn't have a sock attr
if hasattr(conn, 'sock'):
# In Python 3 socket.py will catch EAGAIN and return None when you
# try and read into the file pointer created by http.client, which
# instead raises a BadStatusLine exception. Instead of catching
# the exception and assuming all BadStatusLine exceptions are read
# timeouts, check for a zero timeout before making the request.
if read_timeout == 0:
raise ReadTimeoutError(
self, url,
"Read timed out. (read timeout=%s)" % read_timeout)
if read_timeout is Timeout.DEFAULT_TIMEOUT:
conn.sock.settimeout(socket.getdefaulttimeout())
else: # None or a value
conn.sock.settimeout(read_timeout)
# Receive the response from the server
try:
try: # Python 2.7+, use buffering of HTTP responses
httplib_response = conn.getresponse(buffering=True)
except TypeError: # Python 2.6 and older
httplib_response = conn.getresponse()
except SocketTimeout:
raise ReadTimeoutError(
self, url, "Read timed out. (read timeout=%s)" % read_timeout)
except BaseSSLError as e:
# Catch possible read timeouts thrown as SSL errors. If not the
# case, rethrow the original. We need to do this because of:
# http://bugs.python.org/issue10272
if 'timed out' in str(e) or \
'did not complete (read)' in str(e): # Python 2.6
raise ReadTimeoutError(self, url, "Read timed out.")
raise
except SocketError as e: # Platform-specific: Python 2
# See the above comment about EAGAIN in Python 3. In Python 2 we
# have to specifically catch it and throw the timeout error
if e.errno in _blocking_errnos:
raise ReadTimeoutError(
self, url,
"Read timed out. (read timeout=%s)" % read_timeout)
raise
# AppEngine doesn't have a version attr.
http_version = getattr(conn, '_http_vsn_str', 'HTTP/?')
log.debug("\"%s %s %s\" %s %s" % (method, url, http_version,
httplib_response.status,
httplib_response.length))
return httplib_response
def close(self):
"""
Close all pooled connections and disable the pool.
"""
# Disable access to the pool
old_pool, self.pool = self.pool, None
try:
while True:
conn = old_pool.get(block=False)
if conn:
conn.close()
except Empty:
pass # Done.
def is_same_host(self, url):
"""
Check if the given ``url`` is a member of the same host as this
connection pool.
"""
if url.startswith('/'):
return True
# TODO: Add optional support for socket.gethostbyname checking.
scheme, host, port = get_host(url)
# Use explicit default port for comparison when none is given
if self.port and not port:
port = port_by_scheme.get(scheme)
elif not self.port and port == port_by_scheme.get(scheme):
port = None
return (scheme, host, port) == (self.scheme, self.host, self.port)
def urlopen(self, method, url, body=None, headers=None, retries=3,
redirect=True, assert_same_host=True, timeout=_Default,
pool_timeout=None, release_conn=None, **response_kw):
"""
Get a connection from the pool and perform an HTTP request. This is the
lowest level call for making a request, so you'll need to specify all
the raw details.
.. note::
More commonly, it's appropriate to use a convenience method provided
by :class:`.RequestMethods`, such as :meth:`request`.
.. note::
`release_conn` will only behave as expected if
`preload_content=False` because we want to make
`preload_content=False` the default behaviour someday soon without
breaking backwards compatibility.
:param method:
HTTP request method (such as GET, POST, PUT, etc.)
:param body:
Data to send in the request body (useful for creating
POST requests, see HTTPConnectionPool.post_url for
more convenience).
:param headers:
Dictionary of custom headers to send, such as User-Agent,
If-None-Match, etc. If None, pool headers are used. If provided,
these headers completely replace any pool-specific headers.
:param retries:
Number of retries to allow before raising a MaxRetryError exception.
If `False`, then retries are disabled and any exception is raised
immediately.
:param redirect:
If True, automatically handle redirects (status codes 301, 302,
303, 307, 308). Each redirect counts as a retry. Disabling retries
will disable redirect, too.
:param assert_same_host:
If ``True``, will make sure that the host of the pool requests is
consistent else will raise HostChangedError. When False, you can
use the pool on an HTTP proxy and request foreign hosts.
:param timeout:
If specified, overrides the default timeout for this one
request. It may be a float (in seconds) or an instance of
:class:`urllib3.util.Timeout`.
:param pool_timeout:
If set and the pool is set to block=True, then this method will
block for ``pool_timeout`` seconds and raise EmptyPoolError if no
connection is available within the time period.
:param release_conn:
If False, then the urlopen call will not release the connection
back into the pool once a response is received (but will release if
you read the entire contents of the response such as when
`preload_content=True`). This is useful if you're not preloading
the response's content immediately. You will need to call
``r.release_conn()`` on the response ``r`` to return the connection
back into the pool. If None, it takes the value of
``response_kw.get('preload_content', True)``.
:param \**response_kw:
Additional parameters are passed to
:meth:`urllib3.response.HTTPResponse.from_httplib`
"""
if headers is None:
headers = self.headers
if retries < 0 and retries is not False:
raise MaxRetryError(self, url)
if release_conn is None:
release_conn = response_kw.get('preload_content', True)
# Check host
if assert_same_host and not self.is_same_host(url):
raise HostChangedError(self, url, retries - 1)
conn = None
# Merge the proxy headers. Only do this in HTTP. We have to copy the
# headers dict so we can safely change it without those changes being
# reflected in anyone else's copy.
if self.scheme == 'http':
headers = headers.copy()
headers.update(self.proxy_headers)
# Must keep the exception bound to a separate variable or else Python 3
# complains about UnboundLocalError.
err = None
try:
# Request a connection from the queue
conn = self._get_conn(timeout=pool_timeout)
# Make the request on the httplib connection object
httplib_response = self._make_request(conn, method, url,
timeout=timeout,
body=body, headers=headers)
# If we're going to release the connection in ``finally:``, then
# the request doesn't need to know about the connection. Otherwise
# it will also try to release it and we'll have a double-release
# mess.
response_conn = not release_conn and conn
# Import httplib's response into our own wrapper object
response = HTTPResponse.from_httplib(httplib_response,
pool=self,
connection=response_conn,
**response_kw)
# else:
# The connection will be put back into the pool when
# ``response.release_conn()`` is called (implicitly by
# ``response.read()``)
except Empty:
# Timed out by queue.
raise EmptyPoolError(self, "No pool connections are available.")
except (BaseSSLError, CertificateError) as e:
# Release connection unconditionally because there is no way to
# close it externally in case of exception.
release_conn = True
raise SSLError(e)
except (TimeoutError, HTTPException, SocketError) as e:
if conn:
# Discard the connection for these exceptions. It will be
# be replaced during the next _get_conn() call.
conn.close()
conn = None
if not retries:
if isinstance(e, TimeoutError):
# TimeoutError is exempt from MaxRetryError-wrapping.
# FIXME: ... Not sure why. Add a reason here.
raise
# Wrap unexpected exceptions with the most appropriate
# module-level exception and re-raise.
if isinstance(e, SocketError) and self.proxy:
raise ProxyError('Cannot connect to proxy.', e)
if retries is False:
raise ConnectionError('Connection failed.', e)
raise MaxRetryError(self, url, e)
# Keep track of the error for the retry warning.
err = e
finally:
if release_conn:
# Put the connection back to be reused. If the connection is
# expired then it will be None, which will get replaced with a
# fresh connection during _get_conn.
self._put_conn(conn)
if not conn:
# Try again
log.warning("Retrying (%d attempts remain) after connection "
"broken by '%r': %s" % (retries, err, url))
return self.urlopen(method, url, body, headers, retries - 1,
redirect, assert_same_host,
timeout=timeout, pool_timeout=pool_timeout,
release_conn=release_conn, **response_kw)
# Handle redirect?
redirect_location = redirect and response.get_redirect_location()
if redirect_location and retries is not False:
if response.status == 303:
method = 'GET'
log.info("Redirecting %s -> %s" % (url, redirect_location))
return self.urlopen(method, redirect_location, body, headers,
retries - 1, redirect, assert_same_host,
timeout=timeout, pool_timeout=pool_timeout,
release_conn=release_conn, **response_kw)
return response
class HTTPSConnectionPool(HTTPConnectionPool):
"""
Same as :class:`.HTTPConnectionPool`, but HTTPS.
When Python is compiled with the :mod:`ssl` module, then
:class:`.VerifiedHTTPSConnection` is used, which *can* verify certificates,
instead of :class:`.HTTPSConnection`.
:class:`.VerifiedHTTPSConnection` uses one of ``assert_fingerprint``,
``assert_hostname`` and ``host`` in this order to verify connections.
If ``assert_hostname`` is False, no verification is done.
The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs`` and
``ssl_version`` are only used if :mod:`ssl` is available and are fed into
:meth:`urllib3.util.ssl_wrap_socket` to upgrade the connection socket
into an SSL socket.
"""
scheme = 'https'
ConnectionCls = HTTPSConnection
def __init__(self, host, port=None,
strict=False, timeout=None, maxsize=1,
block=False, headers=None,
_proxy=None, _proxy_headers=None,
key_file=None, cert_file=None, cert_reqs=None,
ca_certs=None, ssl_version=None,
assert_hostname=None, assert_fingerprint=None,
**conn_kw):
if sys.version_info < (2, 7): # Python 2.6 or older
conn_kw.pop('source_address', None)
HTTPConnectionPool.__init__(self, host, port, strict, timeout, maxsize,
block, headers, _proxy, _proxy_headers, **conn_kw)
self.key_file = key_file
self.cert_file = cert_file
self.cert_reqs = cert_reqs
self.ca_certs = ca_certs
self.ssl_version = ssl_version
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
self.conn_kw = conn_kw
def _prepare_conn(self, conn):
"""
Prepare the ``connection`` for :meth:`urllib3.util.ssl_wrap_socket`
and establish the tunnel if proxy is used.
"""
if isinstance(conn, VerifiedHTTPSConnection):
conn.set_cert(key_file=self.key_file,
cert_file=self.cert_file,
cert_reqs=self.cert_reqs,
ca_certs=self.ca_certs,
assert_hostname=self.assert_hostname,
assert_fingerprint=self.assert_fingerprint)
conn.ssl_version = self.ssl_version
conn.conn_kw = self.conn_kw
if self.proxy is not None:
# Python 2.7+
try:
set_tunnel = conn.set_tunnel
except AttributeError: # Platform-specific: Python 2.6
set_tunnel = conn._set_tunnel
set_tunnel(self.host, self.port, self.proxy_headers)
# Establish tunnel connection early, because otherwise httplib
# would improperly set Host: header to proxy's IP:port.
conn.connect()
return conn
def _new_conn(self):
"""
Return a fresh :class:`httplib.HTTPSConnection`.
"""
self.num_connections += 1
log.info("Starting new HTTPS connection (%d): %s"
% (self.num_connections, self.host))
if not self.ConnectionCls or self.ConnectionCls is DummyConnection:
# Platform-specific: Python without ssl
raise SSLError("Can't connect to HTTPS URL because the SSL "
"module is not available.")
actual_host = self.host
actual_port = self.port
if self.proxy is not None:
actual_host = self.proxy.host
actual_port = self.proxy.port
extra_params = {}
if not six.PY3: # Python 2
extra_params['strict'] = self.strict
extra_params.update(self.conn_kw)
conn = self.ConnectionCls(host=actual_host, port=actual_port,
timeout=self.timeout.connect_timeout,
**extra_params)
if self.proxy is not None:
# Enable Nagle's algorithm for proxies, to avoid packet
# fragmentation.
conn.tcp_nodelay = 0
return self._prepare_conn(conn)
def connection_from_url(url, **kw):
"""
Given a url, return an :class:`.ConnectionPool` instance of its host.
This is a shortcut for not having to parse out the scheme, host, and port
of the url before creating an :class:`.ConnectionPool` instance.
:param url:
Absolute URL string that must include the scheme. Port is optional.
:param \**kw:
Passes additional parameters to the constructor of the appropriate
:class:`.ConnectionPool`. Useful for specifying things like
timeout, maxsize, headers, etc.
Example: ::
>>> conn = connection_from_url('http://google.com/')
>>> r = conn.request('GET', '/')
"""
scheme, host, port = get_host(url)
if scheme == 'https':
return HTTPSConnectionPool(host, port=port, **kw)
else:
return HTTPConnectionPool(host, port=port, **kw)
|
sbidoul/odoo | refs/heads/8.0 | addons/crm_claim/__openerp__.py | 260 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Claims Management',
'version': '1.0',
'category': 'Customer Relationship Management',
'description': """
Manage Customer Claims.
=======================
This application allows you to track your customers/suppliers claims and grievances.
It is fully integrated with the email gateway so that you can create
automatically new claims based on incoming emails.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com',
'depends': ['crm'],
'data': [
'crm_claim_view.xml',
'crm_claim_menu.xml',
'security/ir.model.access.csv',
'report/crm_claim_report_view.xml',
'crm_claim_data.xml',
'res_partner_view.xml',
],
'demo': ['crm_claim_demo.xml'],
'test': [
'test/process/claim.yml',
'test/ui/claim_demo.yml'
],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
nimengyu2/dm3730-android-gingerbread-2.3-dk1.0-kernel | refs/heads/dm3730-android-gingerbread-2.3-dk1.0-first | scripts/tracing/draw_functrace.py | 14679 | #!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <fweisbec@gmail.com>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
|
SlimRemix/android_external_chromium_org | refs/heads/lp5.1 | tools/prepare-bisect-perf-regression.py | 84 | #!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Prepare Performance Test Bisect Tool
This script is used by a try bot to create a working directory and sync an
initial copy of the depot for use in bisecting performance regressions.
An example usage:
./tools/prepare-bisect-perf-regressions.py --working_directory "~/builds"
--output_buildbot_annotations
Would result in creating ~/builds/bisect and then populating it with a copy of
the depot.
"""
import optparse
import sys
from auto_bisect import bisect_utils
def main():
"""Does an initial checkout of Chromium then exits."""
usage = ('%prog [options] [-- chromium-options]\n'
'Prepares a temporary depot for use on a try bot.')
parser = optparse.OptionParser(usage=usage)
parser.add_option('-w', '--working_directory',
type='str',
help='Path to the working directory where the script will '
'do an initial checkout of the chromium depot. The '
'files will be placed in a subdirectory "bisect" under '
'working_directory and that will be used to perform the '
'bisection.')
parser.add_option('--output_buildbot_annotations',
action='store_true',
help='Add extra annotation output for buildbot.')
parser.add_option('--target_platform',
type='choice',
choices=['chromium', 'cros', 'android'],
default='chromium',
help='The target platform. Choices are "chromium" (current '
'platform), "cros", or "android". If you specify something '
'other than "chromium", you must be properly set up to '
'build that platform.')
opts, _ = parser.parse_args()
if not opts.working_directory:
print 'Error: missing required parameter: --working_directory'
print
parser.print_help()
return 1
if not bisect_utils.CheckIfBisectDepotExists(opts):
try:
bisect_utils.CreateBisectDirectoryAndSetupDepot(
opts, bisect_utils.DEFAULT_GCLIENT_CUSTOM_DEPS)
except RuntimeError:
return 1
return 0
if __name__ == '__main__':
sys.exit(main())
|
TheWardoctor/Wardoctors-repo | refs/heads/master | script.module.covenant/lib/resources/lib/sources/pl/paczamy.py | 7 | # -*- coding: utf-8 -*-
'''
Covenant Add-on
Copyright (C) 2017 homik
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import urllib, urlparse, re
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import source_utils
class source:
def __init__(self):
self.priority = 1
self.language = ['pl']
self.domains = ['paczamy.pl']
self.base_link = 'http://paczamy.pl'
self.search_link = '/szukaj?q=%s'
self.episode_link = '/seasons/%s/episodes/%s'
def movie(self, imdb, title, localtitle, aliases, year):
return self.search(localtitle, year, 'movies')
def findMatchByYear(self, year, urls):
for url in urls:
result = client.request(url)
result = client.parseDOM(result, 'h1')[0]
result = client.parseDOM(result, 'a')[0]
found_year = result[result.find("(") + 1:result.find(")")]
if(found_year == year):
return url
def search(self, localtitle, year, search_type):
try:
simply_name = cleantitle.get(localtitle)
query = self.search_link % urllib.quote_plus(cleantitle.query(localtitle))
query = urlparse.urljoin(self.base_link, query)
result = client.request(query)
result = client.parseDOM(result, 'div', attrs={'id':search_type})
links = client.parseDOM(result, 'figcaption')
names = client.parseDOM(result, 'figcaption', ret='title')
urls = []
for i in range(len(names)):
name = cleantitle.get(names[i])
url = client.parseDOM(links[i], 'a', ret='href')[0]
if(name == simply_name):
urls.append(url)
if len(urls) == 1:
return urls[0]
else:
return self.findMatchByYear(year, urls)
except :
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
return self.search(localtvshowtitle, year, 'series')
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
query = self.episode_link % (season, episode)
return url + query
except:
return
def get_lang_by_type(self, lang_type):
if 'LEKTOR' in lang_type:
return 'pl', 'Lektor'
if 'DUBBING' in lang_type:
return 'pl', 'Dubbing'
if 'NAPIS' in lang_type :
return 'pl', 'Napisy'
return 'en', None
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if url == None: return sources
result = client.request(url)
rows = client.parseDOM(result, 'tr', attrs={'data-id':'.*?'})
for row in rows:
try:
link = client.parseDOM(row, 'td', attrs={'class':'name hover'}, ret='data-bind')[0]
link = re.findall(r"'(.*?)'", link, re.DOTALL)[0]
valid, host = source_utils.is_host_valid(link, hostDict)
if not valid: continue
found_quality = client.parseDOM(row, 'td')[1]
q = 'SD'
if 'Wysoka' in found_quality: q = 'HD'
type_desc= client.parseDOM(row, 'font')[0]
lang, info = self.get_lang_by_type(type_desc)
sources.append({'source': host, 'quality': q, 'language': lang, 'url': link, 'info': info, 'direct': False, 'debridonly': False})
except:
pass
return sources
except:
return sources
def resolve(self, url):
return url
|
HackerTool/vivisect | refs/heads/master | vstruct/defs/windows/win_6_2_amd64/win32k.py | 12 | # Version: 6.2
# Architecture: amd64
import vstruct
from vstruct.primitives import *
|
jaddison/ansible-modules-core | refs/heads/devel | cloud/digital_ocean/digital_ocean_sshkey.py | 133 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: digital_ocean_sshkey
short_description: Create/delete an SSH key in DigitalOcean
description:
- Create/delete an SSH key.
version_added: "1.6"
author: "Michael Gregson (@mgregson)"
options:
state:
description:
- Indicate desired state of the target.
default: present
choices: ['present', 'absent']
client_id:
description:
- DigitalOcean manager id.
api_key:
description:
- DigitalOcean api key.
id:
description:
- Numeric, the SSH key id you want to operate on.
name:
description:
- String, this is the name of an SSH key to create or destroy.
ssh_pub_key:
description:
- The public SSH key you want to add to your account.
notes:
- Two environment variables can be used, DO_CLIENT_ID and DO_API_KEY.
- Version 1 of DigitalOcean API is used.
requirements:
- "python >= 2.6"
- dopy
'''
EXAMPLES = '''
# Ensure a SSH key is present
# If a key matches this name, will return the ssh key id and changed = False
# If no existing key matches this name, a new key is created, the ssh key id is returned and changed = False
- digital_ocean_sshkey: >
state=present
name=my_ssh_key
ssh_pub_key='ssh-rsa AAAA...'
client_id=XXX
api_key=XXX
'''
import os
import time
try:
from dopy.manager import DoError, DoManager
HAS_DOPY = True
except ImportError:
HAS_DOPY = False
class TimeoutError(DoError):
def __init__(self, msg, id):
super(TimeoutError, self).__init__(msg)
self.id = id
class JsonfyMixIn(object):
def to_json(self):
return self.__dict__
class SSH(JsonfyMixIn):
manager = None
def __init__(self, ssh_key_json):
self.__dict__.update(ssh_key_json)
update_attr = __init__
def destroy(self):
self.manager.destroy_ssh_key(self.id)
return True
@classmethod
def setup(cls, client_id, api_key):
cls.manager = DoManager(client_id, api_key)
@classmethod
def find(cls, name):
if not name:
return False
keys = cls.list_all()
for key in keys:
if key.name == name:
return key
return False
@classmethod
def list_all(cls):
json = cls.manager.all_ssh_keys()
return map(cls, json)
@classmethod
def add(cls, name, key_pub):
json = cls.manager.new_ssh_key(name, key_pub)
return cls(json)
def core(module):
def getkeyordie(k):
v = module.params[k]
if v is None:
module.fail_json(msg='Unable to load %s' % k)
return v
try:
# params['client_id'] will be None even if client_id is not passed in
client_id = module.params['client_id'] or os.environ['DO_CLIENT_ID']
api_key = module.params['api_key'] or os.environ['DO_API_KEY']
except KeyError, e:
module.fail_json(msg='Unable to load %s' % e.message)
changed = True
state = module.params['state']
SSH.setup(client_id, api_key)
name = getkeyordie('name')
if state in ('present'):
key = SSH.find(name)
if key:
module.exit_json(changed=False, ssh_key=key.to_json())
key = SSH.add(name, getkeyordie('ssh_pub_key'))
module.exit_json(changed=True, ssh_key=key.to_json())
elif state in ('absent'):
key = SSH.find(name)
if not key:
module.exit_json(changed=False, msg='SSH key with the name of %s is not found.' % name)
key.destroy()
module.exit_json(changed=True)
def main():
module = AnsibleModule(
argument_spec = dict(
state = dict(choices=['present', 'absent'], default='present'),
client_id = dict(aliases=['CLIENT_ID'], no_log=True),
api_key = dict(aliases=['API_KEY'], no_log=True),
name = dict(type='str'),
id = dict(aliases=['droplet_id'], type='int'),
ssh_pub_key = dict(type='str'),
),
required_one_of = (
['id', 'name'],
),
)
if not HAS_DOPY:
module.fail_json(msg='dopy required for this module')
try:
core(module)
except TimeoutError as e:
module.fail_json(msg=str(e), id=e.id)
except (DoError, Exception) as e:
module.fail_json(msg=str(e))
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
fentas/phantomjs | refs/heads/master | src/qt/qtwebkit/Tools/Scripts/webkitpy/bindings/main.py | 117 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import os
import os.path
import shutil
import subprocess
import sys
import tempfile
from webkitpy.common.checkout.scm.detection import detect_scm_system
from webkitpy.common.system.executive import ScriptError
class BindingsTests:
def __init__(self, reset_results, generators, executive):
self.reset_results = reset_results
self.generators = generators
self.executive = executive
def generate_from_idl(self, generator, idl_file, output_directory, supplemental_dependency_file):
cmd = ['perl', '-w',
'-IWebCore/bindings/scripts',
'WebCore/bindings/scripts/generate-bindings.pl',
# idl include directories (path relative to generate-bindings.pl)
'--include', '.',
'--defines', 'TESTING_%s' % generator,
'--generator', generator,
'--outputDir', output_directory,
'--supplementalDependencyFile', supplemental_dependency_file,
idl_file]
exit_code = 0
try:
output = self.executive.run_command(cmd)
if output:
print output
except ScriptError, e:
print e.output
exit_code = e.exit_code
return exit_code
def generate_supplemental_dependency(self, input_directory, supplemental_dependency_file, window_constructors_file, workerglobalscope_constructors_file, sharedworkerglobalscope_constructors_file, dedicatedworkerglobalscope_constructors_file):
idl_files_list = tempfile.mkstemp()
for input_file in os.listdir(input_directory):
(name, extension) = os.path.splitext(input_file)
if extension != '.idl':
continue
os.write(idl_files_list[0], os.path.join(input_directory, input_file) + "\n")
os.close(idl_files_list[0])
cmd = ['perl', '-w',
'-IWebCore/bindings/scripts',
'WebCore/bindings/scripts/preprocess-idls.pl',
'--idlFilesList', idl_files_list[1],
'--defines', '',
'--supplementalDependencyFile', supplemental_dependency_file,
'--windowConstructorsFile', window_constructors_file,
'--workerGlobalScopeConstructorsFile', workerglobalscope_constructors_file,
'--sharedWorkerGlobalScopeConstructorsFile', sharedworkerglobalscope_constructors_file,
'--dedicatedWorkerGlobalScopeConstructorsFile', dedicatedworkerglobalscope_constructors_file]
exit_code = 0
try:
output = self.executive.run_command(cmd)
if output:
print output
except ScriptError, e:
print e.output
exit_code = e.exit_code
os.remove(idl_files_list[1])
return exit_code
def detect_changes(self, generator, work_directory, reference_directory):
changes_found = False
for output_file in os.listdir(work_directory):
cmd = ['diff',
'-u',
'-N',
os.path.join(reference_directory, output_file),
os.path.join(work_directory, output_file)]
exit_code = 0
try:
output = self.executive.run_command(cmd)
except ScriptError, e:
output = e.output
exit_code = e.exit_code
if exit_code or output:
print 'FAIL: (%s) %s' % (generator, output_file)
print output
changes_found = True
else:
print 'PASS: (%s) %s' % (generator, output_file)
return changes_found
def run_tests(self, generator, input_directory, reference_directory, supplemental_dependency_file):
work_directory = reference_directory
passed = True
for input_file in os.listdir(input_directory):
(name, extension) = os.path.splitext(input_file)
if extension != '.idl':
continue
# Generate output into the work directory (either the given one or a
# temp one if not reset_results is performed)
if not self.reset_results:
work_directory = tempfile.mkdtemp()
if self.generate_from_idl(generator,
os.path.join(input_directory, input_file),
work_directory,
supplemental_dependency_file):
passed = False
if self.reset_results:
print "Reset results: (%s) %s" % (generator, input_file)
continue
# Detect changes
if self.detect_changes(generator, work_directory, reference_directory):
passed = False
shutil.rmtree(work_directory)
return passed
def main(self):
current_scm = detect_scm_system(os.curdir)
os.chdir(os.path.join(current_scm.checkout_root, 'Source'))
all_tests_passed = True
input_directory = os.path.join('WebCore', 'bindings', 'scripts', 'test')
supplemental_dependency_file = tempfile.mkstemp()[1]
window_constructors_file = tempfile.mkstemp()[1]
workerglobalscope_constructors_file = tempfile.mkstemp()[1]
sharedworkerglobalscope_constructors_file = tempfile.mkstemp()[1]
dedicatedworkerglobalscope_constructors_file = tempfile.mkstemp()[1]
if self.generate_supplemental_dependency(input_directory, supplemental_dependency_file, window_constructors_file, workerglobalscope_constructors_file, sharedworkerglobalscope_constructors_file, dedicatedworkerglobalscope_constructors_file):
print 'Failed to generate a supplemental dependency file.'
os.remove(supplemental_dependency_file)
os.remove(window_constructors_file)
os.remove(workerglobalscope_constructors_file)
os.remove(sharedworkerglobalscope_constructors_file)
os.remove(dedicatedworkerglobalscope_constructors_file)
return -1
for generator in self.generators:
input_directory = os.path.join('WebCore', 'bindings', 'scripts', 'test')
reference_directory = os.path.join('WebCore', 'bindings', 'scripts', 'test', generator)
if not self.run_tests(generator, input_directory, reference_directory, supplemental_dependency_file):
all_tests_passed = False
os.remove(supplemental_dependency_file)
os.remove(window_constructors_file)
os.remove(workerglobalscope_constructors_file)
os.remove(sharedworkerglobalscope_constructors_file)
os.remove(dedicatedworkerglobalscope_constructors_file)
print ''
if all_tests_passed:
print 'All tests PASS!'
return 0
else:
print 'Some tests FAIL! (To update the reference files, execute "run-bindings-tests --reset-results")'
return -1
|
uw-it-aca/nagios_registration | refs/heads/master | nagios_registration/migrations/0008_host_contact_groups.py | 1 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('nagios_registration', '0007_contact_contactgroup'),
]
operations = [
migrations.AddField(
model_name='host',
name='contact_groups',
field=models.CharField(max_length=200, null=True),
),
]
|
duqiao/django | refs/heads/master | django/conf/locale/uk/formats.py | 565 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j E Y р.'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j E Y р. H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'j M Y'
# SHORT_DATETIME_FORMAT =
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# DATE_INPUT_FORMATS =
# TIME_INPUT_FORMATS =
# DATETIME_INPUT_FORMATS =
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = ' '
# NUMBER_GROUPING =
|
fangxingli/hue | refs/heads/master | desktop/core/ext-py/Django-1.6.10/django/forms/models.py | 45 | """
Helper functions for creating Form classes from Django models
and database field objects.
"""
from __future__ import absolute_import, unicode_literals
import warnings
from django.core.exceptions import ValidationError, NON_FIELD_ERRORS, FieldError
from django.forms.fields import Field, ChoiceField
from django.forms.forms import BaseForm, get_declared_fields
from django.forms.formsets import BaseFormSet, formset_factory
from django.forms.util import ErrorList
from django.forms.widgets import (SelectMultiple, HiddenInput,
MultipleHiddenInput, media_property, CheckboxSelectMultiple)
from django.utils.encoding import smart_text, force_text
from django.utils.datastructures import SortedDict
from django.utils import six
from django.utils.text import get_text_list, capfirst
from django.utils.translation import ugettext_lazy as _, ugettext, string_concat
__all__ = (
'ModelForm', 'BaseModelForm', 'model_to_dict', 'fields_for_model',
'save_instance', 'ModelChoiceField', 'ModelMultipleChoiceField',
'ALL_FIELDS',
)
ALL_FIELDS = '__all__'
def construct_instance(form, instance, fields=None, exclude=None):
"""
Constructs and returns a model instance from the bound ``form``'s
``cleaned_data``, but does not save the returned instance to the
database.
"""
from django.db import models
opts = instance._meta
cleaned_data = form.cleaned_data
file_field_list = []
for f in opts.fields:
if not f.editable or isinstance(f, models.AutoField) \
or not f.name in cleaned_data:
continue
if fields is not None and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
# Defer saving file-type fields until after the other fields, so a
# callable upload_to can use the values from other fields.
if isinstance(f, models.FileField):
file_field_list.append(f)
else:
f.save_form_data(instance, cleaned_data[f.name])
for f in file_field_list:
f.save_form_data(instance, cleaned_data[f.name])
return instance
def save_instance(form, instance, fields=None, fail_message='saved',
commit=True, exclude=None, construct=True):
"""
Saves bound Form ``form``'s cleaned_data into model instance ``instance``.
If commit=True, then the changes to ``instance`` will be saved to the
database. Returns ``instance``.
If construct=False, assume ``instance`` has already been constructed and
just needs to be saved.
"""
if construct:
instance = construct_instance(form, instance, fields, exclude)
opts = instance._meta
if form.errors:
raise ValueError("The %s could not be %s because the data didn't"
" validate." % (opts.object_name, fail_message))
# Wrap up the saving of m2m data as a function.
def save_m2m():
cleaned_data = form.cleaned_data
# Note that for historical reasons we want to include also
# virtual_fields here. (GenericRelation was previously a fake
# m2m field).
for f in opts.many_to_many + opts.virtual_fields:
if not hasattr(f, 'save_form_data'):
continue
if fields and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
if f.name in cleaned_data:
f.save_form_data(instance, cleaned_data[f.name])
if commit:
# If we are committing, save the instance and the m2m data immediately.
instance.save()
save_m2m()
else:
# We're not committing. Add a method to the form to allow deferred
# saving of m2m data.
form.save_m2m = save_m2m
return instance
# ModelForms #################################################################
def model_to_dict(instance, fields=None, exclude=None):
"""
Returns a dict containing the data in ``instance`` suitable for passing as
a Form's ``initial`` keyword argument.
``fields`` is an optional list of field names. If provided, only the named
fields will be included in the returned dict.
``exclude`` is an optional list of field names. If provided, the named
fields will be excluded from the returned dict, even if they are listed in
the ``fields`` argument.
"""
# avoid a circular import
from django.db.models.fields.related import ManyToManyField
opts = instance._meta
data = {}
for f in opts.concrete_fields + opts.virtual_fields + opts.many_to_many:
if not getattr(f, 'editable', False):
continue
if fields and not f.name in fields:
continue
if exclude and f.name in exclude:
continue
if isinstance(f, ManyToManyField):
# If the object doesn't have a primary key yet, just use an empty
# list for its m2m fields. Calling f.value_from_object will raise
# an exception.
if instance.pk is None:
data[f.name] = []
else:
# MultipleChoiceWidget needs a list of pks, not object instances.
data[f.name] = list(f.value_from_object(instance).values_list('pk', flat=True))
else:
data[f.name] = f.value_from_object(instance)
return data
def fields_for_model(model, fields=None, exclude=None, widgets=None,
formfield_callback=None, localized_fields=None,
labels=None, help_texts=None, error_messages=None):
"""
Returns a ``SortedDict`` containing form fields for the given model.
``fields`` is an optional list of field names. If provided, only the named
fields will be included in the returned fields.
``exclude`` is an optional list of field names. If provided, the named
fields will be excluded from the returned fields, even if they are listed
in the ``fields`` argument.
``widgets`` is a dictionary of model field names mapped to a widget.
``localized_fields`` is a list of names of fields which should be localized.
``labels`` is a dictionary of model field names mapped to a label.
``help_texts`` is a dictionary of model field names mapped to a help text.
``error_messages`` is a dictionary of model field names mapped to a
dictionary of error messages.
``formfield_callback`` is a callable that takes a model field and returns
a form field.
"""
field_list = []
ignored = []
opts = model._meta
# Avoid circular import
from django.db.models.fields import Field as ModelField
sortable_virtual_fields = [f for f in opts.virtual_fields
if isinstance(f, ModelField)]
for f in sorted(opts.concrete_fields + sortable_virtual_fields + opts.many_to_many):
if not getattr(f, 'editable', False):
continue
if fields is not None and not f.name in fields:
continue
if exclude and f.name in exclude:
continue
kwargs = {}
if widgets and f.name in widgets:
kwargs['widget'] = widgets[f.name]
if localized_fields == ALL_FIELDS or (localized_fields and f.name in localized_fields):
kwargs['localize'] = True
if labels and f.name in labels:
kwargs['label'] = labels[f.name]
if help_texts and f.name in help_texts:
kwargs['help_text'] = help_texts[f.name]
if error_messages and f.name in error_messages:
kwargs['error_messages'] = error_messages[f.name]
if formfield_callback is None:
formfield = f.formfield(**kwargs)
elif not callable(formfield_callback):
raise TypeError('formfield_callback must be a function or callable')
else:
formfield = formfield_callback(f, **kwargs)
if formfield:
field_list.append((f.name, formfield))
else:
ignored.append(f.name)
field_dict = SortedDict(field_list)
if fields:
field_dict = SortedDict(
[(f, field_dict.get(f)) for f in fields
if ((not exclude) or (exclude and f not in exclude)) and (f not in ignored)]
)
return field_dict
class ModelFormOptions(object):
def __init__(self, options=None):
self.model = getattr(options, 'model', None)
self.fields = getattr(options, 'fields', None)
self.exclude = getattr(options, 'exclude', None)
self.widgets = getattr(options, 'widgets', None)
self.localized_fields = getattr(options, 'localized_fields', None)
self.labels = getattr(options, 'labels', None)
self.help_texts = getattr(options, 'help_texts', None)
self.error_messages = getattr(options, 'error_messages', None)
class ModelFormMetaclass(type):
def __new__(cls, name, bases, attrs):
formfield_callback = attrs.pop('formfield_callback', None)
try:
parents = [b for b in bases if issubclass(b, ModelForm)]
except NameError:
# We are defining ModelForm itself.
parents = None
declared_fields = get_declared_fields(bases, attrs, False)
new_class = super(ModelFormMetaclass, cls).__new__(cls, name, bases,
attrs)
if not parents:
return new_class
if 'media' not in attrs:
new_class.media = media_property(new_class)
opts = new_class._meta = ModelFormOptions(getattr(new_class, 'Meta', None))
# We check if a string was passed to `fields` or `exclude`,
# which is likely to be a mistake where the user typed ('foo') instead
# of ('foo',)
for opt in ['fields', 'exclude', 'localized_fields']:
value = getattr(opts, opt)
if isinstance(value, six.string_types) and value != ALL_FIELDS:
msg = ("%(model)s.Meta.%(opt)s cannot be a string. "
"Did you mean to type: ('%(value)s',)?" % {
'model': new_class.__name__,
'opt': opt,
'value': value,
})
raise TypeError(msg)
if opts.model:
# If a model is defined, extract form fields from it.
if opts.fields is None and opts.exclude is None:
# This should be some kind of assertion error once deprecation
# cycle is complete.
warnings.warn("Creating a ModelForm without either the 'fields' attribute "
"or the 'exclude' attribute is deprecated - form %s "
"needs updating" % name,
PendingDeprecationWarning, stacklevel=2)
if opts.fields == ALL_FIELDS:
# sentinel for fields_for_model to indicate "get the list of
# fields from the model"
opts.fields = None
fields = fields_for_model(opts.model, opts.fields, opts.exclude,
opts.widgets, formfield_callback,
opts.localized_fields, opts.labels,
opts.help_texts, opts.error_messages)
# make sure opts.fields doesn't specify an invalid field
none_model_fields = [k for k, v in six.iteritems(fields) if not v]
missing_fields = set(none_model_fields) - \
set(declared_fields.keys())
if missing_fields:
message = 'Unknown field(s) (%s) specified for %s'
message = message % (', '.join(missing_fields),
opts.model.__name__)
raise FieldError(message)
# Override default model fields with any custom declared ones
# (plus, include all the other declared fields).
fields.update(declared_fields)
else:
fields = declared_fields
new_class.declared_fields = declared_fields
new_class.base_fields = fields
return new_class
class BaseModelForm(BaseForm):
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
initial=None, error_class=ErrorList, label_suffix=None,
empty_permitted=False, instance=None):
opts = self._meta
if opts.model is None:
raise ValueError('ModelForm has no model class specified.')
if instance is None:
# if we didn't get an instance, instantiate a new one
self.instance = opts.model()
object_data = {}
else:
self.instance = instance
object_data = model_to_dict(instance, opts.fields, opts.exclude)
# if initial was provided, it should override the values from instance
if initial is not None:
object_data.update(initial)
# self._validate_unique will be set to True by BaseModelForm.clean().
# It is False by default so overriding self.clean() and failing to call
# super will stop validate_unique from being called.
self._validate_unique = False
super(BaseModelForm, self).__init__(data, files, auto_id, prefix, object_data,
error_class, label_suffix, empty_permitted)
def _update_errors(self, errors):
for field, messages in errors.error_dict.items():
if field not in self.fields:
continue
field = self.fields[field]
for message in messages:
if isinstance(message, ValidationError):
if message.code in field.error_messages:
message.message = field.error_messages[message.code]
message_dict = errors.message_dict
for k, v in message_dict.items():
if k != NON_FIELD_ERRORS:
self._errors.setdefault(k, self.error_class()).extend(v)
# Remove the data from the cleaned_data dict since it was invalid
if k in self.cleaned_data:
del self.cleaned_data[k]
if NON_FIELD_ERRORS in message_dict:
messages = message_dict[NON_FIELD_ERRORS]
self._errors.setdefault(NON_FIELD_ERRORS, self.error_class()).extend(messages)
def _get_validation_exclusions(self):
"""
For backwards-compatibility, several types of fields need to be
excluded from model validation. See the following tickets for
details: #12507, #12521, #12553
"""
exclude = []
# Build up a list of fields that should be excluded from model field
# validation and unique checks.
for f in self.instance._meta.fields:
field = f.name
# Exclude fields that aren't on the form. The developer may be
# adding these values to the model after form validation.
if field not in self.fields:
exclude.append(f.name)
# Don't perform model validation on fields that were defined
# manually on the form and excluded via the ModelForm's Meta
# class. See #12901.
elif self._meta.fields and field not in self._meta.fields:
exclude.append(f.name)
elif self._meta.exclude and field in self._meta.exclude:
exclude.append(f.name)
# Exclude fields that failed form validation. There's no need for
# the model fields to validate them as well.
elif field in self._errors.keys():
exclude.append(f.name)
# Exclude empty fields that are not required by the form, if the
# underlying model field is required. This keeps the model field
# from raising a required error. Note: don't exclude the field from
# validation if the model field allows blanks. If it does, the blank
# value may be included in a unique check, so cannot be excluded
# from validation.
else:
form_field = self.fields[field]
field_value = self.cleaned_data.get(field, None)
if not f.blank and not form_field.required and field_value in form_field.empty_values:
exclude.append(f.name)
return exclude
def clean(self):
self._validate_unique = True
return self.cleaned_data
def _post_clean(self):
opts = self._meta
# Update the model instance with self.cleaned_data.
self.instance = construct_instance(self, self.instance, opts.fields, opts.exclude)
exclude = self._get_validation_exclusions()
# Foreign Keys being used to represent inline relationships
# are excluded from basic field value validation. This is for two
# reasons: firstly, the value may not be supplied (#12507; the
# case of providing new values to the admin); secondly the
# object being referred to may not yet fully exist (#12749).
# However, these fields *must* be included in uniqueness checks,
# so this can't be part of _get_validation_exclusions().
for f_name, field in self.fields.items():
if isinstance(field, InlineForeignKeyField):
exclude.append(f_name)
try:
self.instance.full_clean(exclude=exclude,
validate_unique=False)
except ValidationError as e:
self._update_errors(e)
# Validate uniqueness if needed.
if self._validate_unique:
self.validate_unique()
def validate_unique(self):
"""
Calls the instance's validate_unique() method and updates the form's
validation errors if any were raised.
"""
exclude = self._get_validation_exclusions()
try:
self.instance.validate_unique(exclude=exclude)
except ValidationError as e:
self._update_errors(e)
def save(self, commit=True):
"""
Saves this ``form``'s cleaned_data into model instance
``self.instance``.
If commit=True, then the changes to ``instance`` will be saved to the
database. Returns ``instance``.
"""
if self.instance.pk is None:
fail_message = 'created'
else:
fail_message = 'changed'
return save_instance(self, self.instance, self._meta.fields,
fail_message, commit, self._meta.exclude,
construct=False)
save.alters_data = True
class ModelForm(six.with_metaclass(ModelFormMetaclass, BaseModelForm)):
pass
def modelform_factory(model, form=ModelForm, fields=None, exclude=None,
formfield_callback=None, widgets=None, localized_fields=None,
labels=None, help_texts=None, error_messages=None):
"""
Returns a ModelForm containing form fields for the given model.
``fields`` is an optional list of field names. If provided, only the named
fields will be included in the returned fields. If omitted or '__all__',
all fields will be used.
``exclude`` is an optional list of field names. If provided, the named
fields will be excluded from the returned fields, even if they are listed
in the ``fields`` argument.
``widgets`` is a dictionary of model field names mapped to a widget.
``localized_fields`` is a list of names of fields which should be localized.
``formfield_callback`` is a callable that takes a model field and returns
a form field.
``labels`` is a dictionary of model field names mapped to a label.
``help_texts`` is a dictionary of model field names mapped to a help text.
``error_messages`` is a dictionary of model field names mapped to a
dictionary of error messages.
"""
# Create the inner Meta class. FIXME: ideally, we should be able to
# construct a ModelForm without creating and passing in a temporary
# inner class.
# Build up a list of attributes that the Meta object will have.
attrs = {'model': model}
if fields is not None:
attrs['fields'] = fields
if exclude is not None:
attrs['exclude'] = exclude
if widgets is not None:
attrs['widgets'] = widgets
if localized_fields is not None:
attrs['localized_fields'] = localized_fields
if labels is not None:
attrs['labels'] = labels
if help_texts is not None:
attrs['help_texts'] = help_texts
if error_messages is not None:
attrs['error_messages'] = error_messages
# If parent form class already has an inner Meta, the Meta we're
# creating needs to inherit from the parent's inner meta.
parent = (object,)
if hasattr(form, 'Meta'):
parent = (form.Meta, object)
Meta = type(str('Meta'), parent, attrs)
# Give this new form class a reasonable name.
class_name = model.__name__ + str('Form')
# Class attributes for the new form class.
form_class_attrs = {
'Meta': Meta,
'formfield_callback': formfield_callback
}
# The ModelFormMetaclass will trigger a similar warning/error, but this will
# be difficult to debug for code that needs updating, so we produce the
# warning here too.
if (getattr(Meta, 'fields', None) is None and
getattr(Meta, 'exclude', None) is None):
warnings.warn("Calling modelform_factory without defining 'fields' or "
"'exclude' explicitly is deprecated",
PendingDeprecationWarning, stacklevel=2)
# Instatiate type(form) in order to use the same metaclass as form.
return type(form)(class_name, (form,), form_class_attrs)
# ModelFormSets ##############################################################
class BaseModelFormSet(BaseFormSet):
"""
A ``FormSet`` for editing a queryset and/or adding new objects to it.
"""
model = None
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
queryset=None, **kwargs):
self.queryset = queryset
self.initial_extra = kwargs.pop('initial', None)
defaults = {'data': data, 'files': files, 'auto_id': auto_id, 'prefix': prefix}
defaults.update(kwargs)
super(BaseModelFormSet, self).__init__(**defaults)
def initial_form_count(self):
"""Returns the number of forms that are required in this FormSet."""
if not (self.data or self.files):
return len(self.get_queryset())
return super(BaseModelFormSet, self).initial_form_count()
def _existing_object(self, pk):
if not hasattr(self, '_object_dict'):
self._object_dict = dict([(o.pk, o) for o in self.get_queryset()])
return self._object_dict.get(pk)
def _construct_form(self, i, **kwargs):
if self.is_bound and i < self.initial_form_count():
# Import goes here instead of module-level because importing
# django.db has side effects.
from django.db import connections
pk_key = "%s-%s" % (self.add_prefix(i), self.model._meta.pk.name)
pk = self.data[pk_key]
pk_field = self.model._meta.pk
pk = pk_field.get_db_prep_lookup('exact', pk,
connection=connections[self.get_queryset().db])
if isinstance(pk, list):
pk = pk[0]
kwargs['instance'] = self._existing_object(pk)
if i < self.initial_form_count() and not kwargs.get('instance'):
kwargs['instance'] = self.get_queryset()[i]
if i >= self.initial_form_count() and self.initial_extra:
# Set initial values for extra forms
try:
kwargs['initial'] = self.initial_extra[i-self.initial_form_count()]
except IndexError:
pass
return super(BaseModelFormSet, self)._construct_form(i, **kwargs)
def get_queryset(self):
if not hasattr(self, '_queryset'):
if self.queryset is not None:
qs = self.queryset
else:
qs = self.model._default_manager.get_queryset()
# If the queryset isn't already ordered we need to add an
# artificial ordering here to make sure that all formsets
# constructed from this queryset have the same form order.
if not qs.ordered:
qs = qs.order_by(self.model._meta.pk.name)
# Removed queryset limiting here. As per discussion re: #13023
# on django-dev, max_num should not prevent existing
# related objects/inlines from being displayed.
self._queryset = qs
return self._queryset
def save_new(self, form, commit=True):
"""Saves and returns a new model instance for the given form."""
return form.save(commit=commit)
def save_existing(self, form, instance, commit=True):
"""Saves and returns an existing model instance for the given form."""
return form.save(commit=commit)
def save(self, commit=True):
"""Saves model instances for every form, adding and changing instances
as necessary, and returns the list of instances.
"""
if not commit:
self.saved_forms = []
def save_m2m():
for form in self.saved_forms:
form.save_m2m()
self.save_m2m = save_m2m
return self.save_existing_objects(commit) + self.save_new_objects(commit)
save.alters_data = True
def clean(self):
self.validate_unique()
def validate_unique(self):
# Collect unique_checks and date_checks to run from all the forms.
all_unique_checks = set()
all_date_checks = set()
forms_to_delete = self.deleted_forms
valid_forms = [form for form in self.forms if form.is_valid() and form not in forms_to_delete]
for form in valid_forms:
exclude = form._get_validation_exclusions()
unique_checks, date_checks = form.instance._get_unique_checks(exclude=exclude)
all_unique_checks = all_unique_checks.union(set(unique_checks))
all_date_checks = all_date_checks.union(set(date_checks))
errors = []
# Do each of the unique checks (unique and unique_together)
for uclass, unique_check in all_unique_checks:
seen_data = set()
for form in valid_forms:
# get data for each field of each of unique_check
row_data = tuple([form.cleaned_data[field] for field in unique_check if field in form.cleaned_data])
if row_data and not None in row_data:
# if we've already seen it then we have a uniqueness failure
if row_data in seen_data:
# poke error messages into the right places and mark
# the form as invalid
errors.append(self.get_unique_error_message(unique_check))
form._errors[NON_FIELD_ERRORS] = self.error_class([self.get_form_error()])
# remove the data from the cleaned_data dict since it was invalid
for field in unique_check:
if field in form.cleaned_data:
del form.cleaned_data[field]
# mark the data as seen
seen_data.add(row_data)
# iterate over each of the date checks now
for date_check in all_date_checks:
seen_data = set()
uclass, lookup, field, unique_for = date_check
for form in valid_forms:
# see if we have data for both fields
if (form.cleaned_data and form.cleaned_data[field] is not None
and form.cleaned_data[unique_for] is not None):
# if it's a date lookup we need to get the data for all the fields
if lookup == 'date':
date = form.cleaned_data[unique_for]
date_data = (date.year, date.month, date.day)
# otherwise it's just the attribute on the date/datetime
# object
else:
date_data = (getattr(form.cleaned_data[unique_for], lookup),)
data = (form.cleaned_data[field],) + date_data
# if we've already seen it then we have a uniqueness failure
if data in seen_data:
# poke error messages into the right places and mark
# the form as invalid
errors.append(self.get_date_error_message(date_check))
form._errors[NON_FIELD_ERRORS] = self.error_class([self.get_form_error()])
# remove the data from the cleaned_data dict since it was invalid
del form.cleaned_data[field]
# mark the data as seen
seen_data.add(data)
if errors:
raise ValidationError(errors)
def get_unique_error_message(self, unique_check):
if len(unique_check) == 1:
return ugettext("Please correct the duplicate data for %(field)s.") % {
"field": unique_check[0],
}
else:
return ugettext("Please correct the duplicate data for %(field)s, "
"which must be unique.") % {
"field": get_text_list(unique_check, six.text_type(_("and"))),
}
def get_date_error_message(self, date_check):
return ugettext("Please correct the duplicate data for %(field_name)s "
"which must be unique for the %(lookup)s in %(date_field)s.") % {
'field_name': date_check[2],
'date_field': date_check[3],
'lookup': six.text_type(date_check[1]),
}
def get_form_error(self):
return ugettext("Please correct the duplicate values below.")
def save_existing_objects(self, commit=True):
self.changed_objects = []
self.deleted_objects = []
if not self.initial_forms:
return []
saved_instances = []
forms_to_delete = self.deleted_forms
for form in self.initial_forms:
pk_name = self._pk_field.name
raw_pk_value = form._raw_value(pk_name)
# clean() for different types of PK fields can sometimes return
# the model instance, and sometimes the PK. Handle either.
pk_value = form.fields[pk_name].clean(raw_pk_value)
pk_value = getattr(pk_value, 'pk', pk_value)
obj = self._existing_object(pk_value)
if form in forms_to_delete:
self.deleted_objects.append(obj)
obj.delete()
continue
if form.has_changed():
self.changed_objects.append((obj, form.changed_data))
saved_instances.append(self.save_existing(form, obj, commit=commit))
if not commit:
self.saved_forms.append(form)
return saved_instances
def save_new_objects(self, commit=True):
self.new_objects = []
for form in self.extra_forms:
if not form.has_changed():
continue
# If someone has marked an add form for deletion, don't save the
# object.
if self.can_delete and self._should_delete_form(form):
continue
self.new_objects.append(self.save_new(form, commit=commit))
if not commit:
self.saved_forms.append(form)
return self.new_objects
def add_fields(self, form, index):
"""Add a hidden field for the object's primary key."""
from django.db.models import AutoField, OneToOneField, ForeignKey
self._pk_field = pk = self.model._meta.pk
# If a pk isn't editable, then it won't be on the form, so we need to
# add it here so we can tell which object is which when we get the
# data back. Generally, pk.editable should be false, but for some
# reason, auto_created pk fields and AutoField's editable attribute is
# True, so check for that as well.
def pk_is_not_editable(pk):
return ((not pk.editable) or (pk.auto_created or isinstance(pk, AutoField))
or (pk.rel and pk.rel.parent_link and pk_is_not_editable(pk.rel.to._meta.pk)))
if pk_is_not_editable(pk) or pk.name not in form.fields:
if form.is_bound:
pk_value = form.instance.pk
else:
try:
if index is not None:
pk_value = self.get_queryset()[index].pk
else:
pk_value = None
except IndexError:
pk_value = None
if isinstance(pk, OneToOneField) or isinstance(pk, ForeignKey):
qs = pk.rel.to._default_manager.get_queryset()
else:
qs = self.model._default_manager.get_queryset()
qs = qs.using(form.instance._state.db)
if form._meta.widgets:
widget = form._meta.widgets.get(self._pk_field.name, HiddenInput)
else:
widget = HiddenInput
form.fields[self._pk_field.name] = ModelChoiceField(qs, initial=pk_value, required=False, widget=widget)
super(BaseModelFormSet, self).add_fields(form, index)
def modelformset_factory(model, form=ModelForm, formfield_callback=None,
formset=BaseModelFormSet, extra=1, can_delete=False,
can_order=False, max_num=None, fields=None, exclude=None,
widgets=None, validate_max=False, localized_fields=None,
labels=None, help_texts=None, error_messages=None):
"""
Returns a FormSet class for the given Django model class.
"""
# modelform_factory will produce the same warning/error, but that will be
# difficult to debug for code that needs upgrading, so we produce the
# warning here too. This logic is reproducing logic inside
# modelform_factory, but it can be removed once the deprecation cycle is
# complete, since the validation exception will produce a helpful
# stacktrace.
meta = getattr(form, 'Meta', None)
if meta is None:
meta = type(str('Meta'), (object,), {})
if (getattr(meta, 'fields', fields) is None and
getattr(meta, 'exclude', exclude) is None):
warnings.warn("Calling modelformset_factory without defining 'fields' or "
"'exclude' explicitly is deprecated",
PendingDeprecationWarning, stacklevel=2)
form = modelform_factory(model, form=form, fields=fields, exclude=exclude,
formfield_callback=formfield_callback,
widgets=widgets, localized_fields=localized_fields,
labels=labels, help_texts=help_texts, error_messages=error_messages)
FormSet = formset_factory(form, formset, extra=extra, max_num=max_num,
can_order=can_order, can_delete=can_delete,
validate_max=validate_max)
FormSet.model = model
return FormSet
# InlineFormSets #############################################################
class BaseInlineFormSet(BaseModelFormSet):
"""A formset for child objects related to a parent."""
def __init__(self, data=None, files=None, instance=None,
save_as_new=False, prefix=None, queryset=None, **kwargs):
if instance is None:
self.instance = self.fk.rel.to()
else:
self.instance = instance
self.save_as_new = save_as_new
if queryset is None:
queryset = self.model._default_manager
if self.instance.pk is not None:
qs = queryset.filter(**{self.fk.name: self.instance})
else:
qs = queryset.none()
super(BaseInlineFormSet, self).__init__(data, files, prefix=prefix,
queryset=qs, **kwargs)
def initial_form_count(self):
if self.save_as_new:
return 0
return super(BaseInlineFormSet, self).initial_form_count()
def _construct_form(self, i, **kwargs):
form = super(BaseInlineFormSet, self)._construct_form(i, **kwargs)
if self.save_as_new:
# Remove the primary key from the form's data, we are only
# creating new instances
form.data[form.add_prefix(self._pk_field.name)] = None
# Remove the foreign key from the form's data
form.data[form.add_prefix(self.fk.name)] = None
# Set the fk value here so that the form can do its validation.
fk_value = self.instance.pk
if self.fk.rel.field_name != self.fk.rel.to._meta.pk.name:
fk_value = getattr(self.instance, self.fk.rel.field_name)
fk_value = getattr(fk_value, 'pk', fk_value)
setattr(form.instance, self.fk.get_attname(), fk_value)
return form
@classmethod
def get_default_prefix(cls):
from django.db.models.fields.related import RelatedObject
return RelatedObject(cls.fk.rel.to, cls.model, cls.fk).get_accessor_name().replace('+','')
def save_new(self, form, commit=True):
# Use commit=False so we can assign the parent key afterwards, then
# save the object.
obj = form.save(commit=False)
pk_value = getattr(self.instance, self.fk.rel.field_name)
setattr(obj, self.fk.get_attname(), getattr(pk_value, 'pk', pk_value))
if commit:
obj.save()
# form.save_m2m() can be called via the formset later on if commit=False
if commit and hasattr(form, 'save_m2m'):
form.save_m2m()
return obj
def add_fields(self, form, index):
super(BaseInlineFormSet, self).add_fields(form, index)
if self._pk_field == self.fk:
name = self._pk_field.name
kwargs = {'pk_field': True}
else:
# The foreign key field might not be on the form, so we poke at the
# Model field to get the label, since we need that for error messages.
name = self.fk.name
kwargs = {
'label': getattr(form.fields.get(name), 'label', capfirst(self.fk.verbose_name))
}
if self.fk.rel.field_name != self.fk.rel.to._meta.pk.name:
kwargs['to_field'] = self.fk.rel.field_name
form.fields[name] = InlineForeignKeyField(self.instance, **kwargs)
# Add the generated field to form._meta.fields if it's defined to make
# sure validation isn't skipped on that field.
if form._meta.fields:
if isinstance(form._meta.fields, tuple):
form._meta.fields = list(form._meta.fields)
form._meta.fields.append(self.fk.name)
def get_unique_error_message(self, unique_check):
unique_check = [field for field in unique_check if field != self.fk.name]
return super(BaseInlineFormSet, self).get_unique_error_message(unique_check)
def _get_foreign_key(parent_model, model, fk_name=None, can_fail=False):
"""
Finds and returns the ForeignKey from model to parent if there is one
(returns None if can_fail is True and no such field exists). If fk_name is
provided, assume it is the name of the ForeignKey field. Unless can_fail is
True, an exception is raised if there is no ForeignKey from model to
parent_model.
"""
# avoid circular import
from django.db.models import ForeignKey
opts = model._meta
if fk_name:
fks_to_parent = [f for f in opts.fields if f.name == fk_name]
if len(fks_to_parent) == 1:
fk = fks_to_parent[0]
if not isinstance(fk, ForeignKey) or \
(fk.rel.to != parent_model and
fk.rel.to not in parent_model._meta.get_parent_list()):
raise Exception("fk_name '%s' is not a ForeignKey to %s" % (fk_name, parent_model))
elif len(fks_to_parent) == 0:
raise Exception("%s has no field named '%s'" % (model, fk_name))
else:
# Try to discover what the ForeignKey from model to parent_model is
fks_to_parent = [
f for f in opts.fields
if isinstance(f, ForeignKey)
and (f.rel.to == parent_model
or f.rel.to in parent_model._meta.get_parent_list())
]
if len(fks_to_parent) == 1:
fk = fks_to_parent[0]
elif len(fks_to_parent) == 0:
if can_fail:
return
raise Exception("%s has no ForeignKey to %s" % (model, parent_model))
else:
raise Exception("%s has more than 1 ForeignKey to %s" % (model, parent_model))
return fk
def inlineformset_factory(parent_model, model, form=ModelForm,
formset=BaseInlineFormSet, fk_name=None,
fields=None, exclude=None, extra=3, can_order=False,
can_delete=True, max_num=None, formfield_callback=None,
widgets=None, validate_max=False, localized_fields=None,
labels=None, help_texts=None, error_messages=None):
"""
Returns an ``InlineFormSet`` for the given kwargs.
You must provide ``fk_name`` if ``model`` has more than one ``ForeignKey``
to ``parent_model``.
"""
fk = _get_foreign_key(parent_model, model, fk_name=fk_name)
# enforce a max_num=1 when the foreign key to the parent model is unique.
if fk.unique:
max_num = 1
kwargs = {
'form': form,
'formfield_callback': formfield_callback,
'formset': formset,
'extra': extra,
'can_delete': can_delete,
'can_order': can_order,
'fields': fields,
'exclude': exclude,
'max_num': max_num,
'widgets': widgets,
'validate_max': validate_max,
'localized_fields': localized_fields,
'labels': labels,
'help_texts': help_texts,
'error_messages': error_messages,
}
FormSet = modelformset_factory(model, **kwargs)
FormSet.fk = fk
return FormSet
# Fields #####################################################################
class InlineForeignKeyField(Field):
"""
A basic integer field that deals with validating the given value to a
given parent instance in an inline.
"""
widget = HiddenInput
default_error_messages = {
'invalid_choice': _('The inline foreign key did not match the parent instance primary key.'),
}
def __init__(self, parent_instance, *args, **kwargs):
self.parent_instance = parent_instance
self.pk_field = kwargs.pop("pk_field", False)
self.to_field = kwargs.pop("to_field", None)
if self.parent_instance is not None:
if self.to_field:
kwargs["initial"] = getattr(self.parent_instance, self.to_field)
else:
kwargs["initial"] = self.parent_instance.pk
kwargs["required"] = False
super(InlineForeignKeyField, self).__init__(*args, **kwargs)
def clean(self, value):
if value in self.empty_values:
if self.pk_field:
return None
# if there is no value act as we did before.
return self.parent_instance
# ensure the we compare the values as equal types.
if self.to_field:
orig = getattr(self.parent_instance, self.to_field)
else:
orig = self.parent_instance.pk
if force_text(value) != force_text(orig):
raise ValidationError(self.error_messages['invalid_choice'], code='invalid_choice')
return self.parent_instance
def _has_changed(self, initial, data):
return False
class ModelChoiceIterator(object):
def __init__(self, field):
self.field = field
self.queryset = field.queryset
def __iter__(self):
if self.field.empty_label is not None:
yield ("", self.field.empty_label)
if self.field.cache_choices:
if self.field.choice_cache is None:
self.field.choice_cache = [
self.choice(obj) for obj in self.queryset.all()
]
for choice in self.field.choice_cache:
yield choice
else:
for obj in self.queryset.all():
yield self.choice(obj)
def __len__(self):
return len(self.queryset) +\
(1 if self.field.empty_label is not None else 0)
def choice(self, obj):
return (self.field.prepare_value(obj), self.field.label_from_instance(obj))
class ModelChoiceField(ChoiceField):
"""A ChoiceField whose choices are a model QuerySet."""
# This class is a subclass of ChoiceField for purity, but it doesn't
# actually use any of ChoiceField's implementation.
default_error_messages = {
'invalid_choice': _('Select a valid choice. That choice is not one of'
' the available choices.'),
}
def __init__(self, queryset, empty_label="---------", cache_choices=False,
required=True, widget=None, label=None, initial=None,
help_text='', to_field_name=None, *args, **kwargs):
if required and (initial is not None):
self.empty_label = None
else:
self.empty_label = empty_label
self.cache_choices = cache_choices
# Call Field instead of ChoiceField __init__() because we don't need
# ChoiceField.__init__().
Field.__init__(self, required, widget, label, initial, help_text,
*args, **kwargs)
self.queryset = queryset
self.choice_cache = None
self.to_field_name = to_field_name
def __deepcopy__(self, memo):
result = super(ChoiceField, self).__deepcopy__(memo)
# Need to force a new ModelChoiceIterator to be created, bug #11183
result.queryset = result.queryset
return result
def _get_queryset(self):
return self._queryset
def _set_queryset(self, queryset):
self._queryset = queryset
self.widget.choices = self.choices
queryset = property(_get_queryset, _set_queryset)
# this method will be used to create object labels by the QuerySetIterator.
# Override it to customize the label.
def label_from_instance(self, obj):
"""
This method is used to convert objects into strings; it's used to
generate the labels for the choices presented by this object. Subclasses
can override this method to customize the display of the choices.
"""
return smart_text(obj)
def _get_choices(self):
# If self._choices is set, then somebody must have manually set
# the property self.choices. In this case, just return self._choices.
if hasattr(self, '_choices'):
return self._choices
# Otherwise, execute the QuerySet in self.queryset to determine the
# choices dynamically. Return a fresh ModelChoiceIterator that has not been
# consumed. Note that we're instantiating a new ModelChoiceIterator *each*
# time _get_choices() is called (and, thus, each time self.choices is
# accessed) so that we can ensure the QuerySet has not been consumed. This
# construct might look complicated but it allows for lazy evaluation of
# the queryset.
return ModelChoiceIterator(self)
choices = property(_get_choices, ChoiceField._set_choices)
def prepare_value(self, value):
if hasattr(value, '_meta'):
if self.to_field_name:
return value.serializable_value(self.to_field_name)
else:
return value.pk
return super(ModelChoiceField, self).prepare_value(value)
def to_python(self, value):
if value in self.empty_values:
return None
try:
key = self.to_field_name or 'pk'
value = self.queryset.get(**{key: value})
except (ValueError, self.queryset.model.DoesNotExist):
raise ValidationError(self.error_messages['invalid_choice'], code='invalid_choice')
return value
def validate(self, value):
return Field.validate(self, value)
def _has_changed(self, initial, data):
initial_value = initial if initial is not None else ''
data_value = data if data is not None else ''
return force_text(self.prepare_value(initial_value)) != force_text(data_value)
class ModelMultipleChoiceField(ModelChoiceField):
"""A MultipleChoiceField whose choices are a model QuerySet."""
widget = SelectMultiple
hidden_widget = MultipleHiddenInput
default_error_messages = {
'list': _('Enter a list of values.'),
'invalid_choice': _('Select a valid choice. %(value)s is not one of the'
' available choices.'),
'invalid_pk_value': _('"%(pk)s" is not a valid value for a primary key.')
}
def __init__(self, queryset, cache_choices=False, required=True,
widget=None, label=None, initial=None,
help_text='', *args, **kwargs):
super(ModelMultipleChoiceField, self).__init__(queryset, None,
cache_choices, required, widget, label, initial, help_text,
*args, **kwargs)
# Remove this in Django 1.8
if isinstance(self.widget, SelectMultiple) and not isinstance(self.widget, CheckboxSelectMultiple):
msg = _('Hold down "Control", or "Command" on a Mac, to select more than one.')
self.help_text = string_concat(self.help_text, ' ', msg)
def to_python(self, value):
if not value:
return []
return list(self._check_values(value))
def clean(self, value):
if self.required and not value:
raise ValidationError(self.error_messages['required'], code='required')
elif not self.required and not value:
return self.queryset.none()
if not isinstance(value, (list, tuple)):
raise ValidationError(self.error_messages['list'], code='list')
qs = self._check_values(value)
# Since this overrides the inherited ModelChoiceField.clean
# we run custom validators here
self.run_validators(value)
return qs
def _check_values(self, value):
"""
Given a list of possible PK values, returns a QuerySet of the
corresponding objects. Raises a ValidationError if a given value is
invalid (not a valid PK, not in the queryset, etc.)
"""
key = self.to_field_name or 'pk'
# deduplicate given values to avoid creating many querysets or
# requiring the database backend deduplicate efficiently.
try:
value = frozenset(value)
except TypeError:
# list of lists isn't hashable, for example
raise ValidationError(
self.error_messages['list'],
code='list',
)
for pk in value:
try:
self.queryset.filter(**{key: pk})
except ValueError:
raise ValidationError(
self.error_messages['invalid_pk_value'],
code='invalid_pk_value',
params={'pk': pk},
)
qs = self.queryset.filter(**{'%s__in' % key: value})
pks = set([force_text(getattr(o, key)) for o in qs])
for val in value:
if force_text(val) not in pks:
raise ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': val},
)
return qs
def prepare_value(self, value):
if (hasattr(value, '__iter__') and
not isinstance(value, six.text_type) and
not hasattr(value, '_meta')):
return [super(ModelMultipleChoiceField, self).prepare_value(v) for v in value]
return super(ModelMultipleChoiceField, self).prepare_value(value)
def _has_changed(self, initial, data):
if initial is None:
initial = []
if data is None:
data = []
if len(initial) != len(data):
return True
initial_set = set([force_text(value) for value in self.prepare_value(initial)])
data_set = set([force_text(value) for value in data])
return data_set != initial_set
def modelform_defines_fields(form_class):
return (form_class is not None and (
hasattr(form_class, '_meta') and
(form_class._meta.fields is not None or
form_class._meta.exclude is not None)
))
|
myerpengine/odoo | refs/heads/master | openerp/tools/yaml_tag.py | 105 | import yaml
import logging
class YamlTag(object):
"""
Superclass for constructors of custom tags defined in yaml file.
__str__ is overriden in subclass and used for serialization in module recorder.
"""
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def __getitem__(self, key):
return getattr(self, key)
def __getattr__(self, attr):
return None
def __repr__(self):
return "<%s %s>" % (self.__class__.__name__, sorted(self.__dict__.items()))
class Assert(YamlTag):
def __init__(self, model, id=None, severity=logging.WARNING, string="NONAME", **kwargs):
self.model = model
self.id = id
self.severity = severity
self.string = string
super(Assert, self).__init__(**kwargs)
class Record(YamlTag):
def __init__(self, model, id, use='id', view=True, **kwargs):
self.model = model
self.id = id
self.view = view
super(Record, self).__init__(**kwargs)
def __str__(self):
return '!record {model: %s, id: %s}:' % (str(self.model,), str(self.id,))
class Python(YamlTag):
def __init__(self, model, severity=logging.ERROR, name="", **kwargs):
self.model= model
self.severity = severity
self.name = name
super(Python, self).__init__(**kwargs)
def __str__(self):
return '!python {model: %s}: |' % (str(self.model), )
class Menuitem(YamlTag):
def __init__(self, id, name, **kwargs):
self.id = id
self.name = name
super(Menuitem, self).__init__(**kwargs)
class Workflow(YamlTag):
def __init__(self, model, action, ref=None, **kwargs):
self.model = model
self.action = action
self.ref = ref
super(Workflow, self).__init__(**kwargs)
def __str__(self):
return '!workflow {model: %s, action: %s, ref: %s}' % (str(self.model,), str(self.action,), str(self.ref,))
class ActWindow(YamlTag):
def __init__(self, **kwargs):
super(ActWindow, self).__init__(**kwargs)
class Function(YamlTag):
def __init__(self, model, name, **kwargs):
self.model = model
self.name = name
super(Function, self).__init__(**kwargs)
class Report(YamlTag):
def __init__(self, model, name, string, **kwargs):
self.model = model
self.name = name
self.string = string
super(Report, self).__init__(**kwargs)
class Delete(YamlTag):
def __init__(self, **kwargs):
super(Delete, self).__init__(**kwargs)
class Context(YamlTag):
def __init__(self, **kwargs):
super(Context, self).__init__(**kwargs)
class Url(YamlTag):
def __init__(self, **kwargs):
super(Url, self).__init__(**kwargs)
class Eval(YamlTag):
def __init__(self, expression):
self.expression = expression
super(Eval, self).__init__()
def __str__(self):
return '!eval %s' % str(self.expression)
class Ref(YamlTag):
def __init__(self, expr="False", *args, **kwargs):
self.expr = expr
super(Ref, self).__init__(*args, **kwargs)
def __str__(self):
return 'ref(%s)' % repr(self.expr)
class IrSet(YamlTag):
def __init__(self):
super(IrSet, self).__init__()
def assert_constructor(loader, node):
kwargs = loader.construct_mapping(node)
return Assert(**kwargs)
def record_constructor(loader, node):
kwargs = loader.construct_mapping(node)
assert "model" in kwargs, "'model' argument is required for !record"
assert "id" in kwargs, "'id' argument is required for !record"
return Record(**kwargs)
def python_constructor(loader, node):
kwargs = loader.construct_mapping(node)
return Python(**kwargs)
def menuitem_constructor(loader, node):
kwargs = loader.construct_mapping(node)
return Menuitem(**kwargs)
def workflow_constructor(loader, node):
kwargs = loader.construct_mapping(node)
return Workflow(**kwargs)
def act_window_constructor(loader, node):
kwargs = loader.construct_mapping(node)
return ActWindow(**kwargs)
def function_constructor(loader, node):
kwargs = loader.construct_mapping(node)
return Function(**kwargs)
def report_constructor(loader, node):
kwargs = loader.construct_mapping(node)
return Report(**kwargs)
def delete_constructor(loader, node):
kwargs = loader.construct_mapping(node)
return Delete(**kwargs)
def context_constructor(loader, node):
kwargs = loader.construct_mapping(node)
return Context(**kwargs)
def url_constructor(loader, node):
kwargs = loader.construct_mapping(node)
return Url(**kwargs)
def eval_constructor(loader, node):
expression = loader.construct_scalar(node)
return Eval(expression)
def ref_constructor(loader, tag_suffix, node):
if tag_suffix == "id":
kwargs = {"id": loader.construct_scalar(node)}
else:
kwargs = loader.construct_mapping(node)
return Ref(**kwargs)
def ir_set_constructor(loader, node):
kwargs = loader.construct_mapping(node)
return IrSet(**kwargs)
# Registers constructors for custom tags.
# Constructors are actually defined globally: do not redefined them in another
# class/file/package. This means that module recorder need import this file.
def add_constructors():
yaml.add_constructor(u"!assert", assert_constructor)
yaml.add_constructor(u"!record", record_constructor)
yaml.add_constructor(u"!python", python_constructor)
yaml.add_constructor(u"!menuitem", menuitem_constructor)
yaml.add_constructor(u"!workflow", workflow_constructor)
yaml.add_constructor(u"!act_window", act_window_constructor)
yaml.add_constructor(u"!function", function_constructor)
yaml.add_constructor(u"!report", report_constructor)
yaml.add_constructor(u"!context", context_constructor)
yaml.add_constructor(u"!delete", delete_constructor)
yaml.add_constructor(u"!url", url_constructor)
yaml.add_constructor(u"!eval", eval_constructor)
yaml.add_multi_constructor(u"!ref", ref_constructor)
yaml.add_constructor(u"!ir_set", ir_set_constructor)
add_constructors()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
rigetticomputing/pyquil | refs/heads/master | pyquil/api/_wavefunction_simulator.py | 1 | ##############################################################################
# Copyright 2018 Rigetti Computing
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from typing import Dict, List, Union, Optional, Any, Set, cast
from warnings import warn
import numpy as np
from pyquil.api._base_connection import ForestConnection
from pyquil.api._error_reporting import _record_call
from pyquil.paulis import PauliSum, PauliTerm
from pyquil.quil import Program, percolate_declares
from pyquil.quilatom import MemoryReference
from pyquil.gates import MOVE
from pyquil.wavefunction import Wavefunction
class WavefunctionSimulator:
@_record_call
def __init__(
self, connection: Optional[ForestConnection] = None, random_seed: Optional[int] = None
) -> None:
"""
A simulator that propagates a wavefunction representation of a quantum state.
:param connection: A connection to the Forest web API.
:param random_seed: A seed for the simulator's random number generators. Either None (for
an automatically generated seed) or a non-negative integer.
"""
if connection is None:
connection = ForestConnection()
self.connection = connection
if random_seed is None:
self.random_seed = None
elif isinstance(random_seed, int) and random_seed >= 0:
self.random_seed = random_seed
else:
raise TypeError("random_seed should be None or a non-negative int")
@_record_call
def wavefunction(self, quil_program: Program, memory_map: Any = None) -> Wavefunction:
"""
Simulate a Quil program and return the wavefunction.
.. note:: If your program contains measurements or noisy gates, this method may not do what
you want. If the execution of ``quil_program`` is **non-deterministic** then the
final wavefunction only represents a stochastically generated sample and the
wavefunctions returned by *different* ``wavefunction`` calls *will generally be
different*.
:param quil_program: A Quil program.
:param memory_map: An assignment of classical registers to values, representing an initial
state for the QAM's classical memory.
This is expected to be of type Dict[str, List[Union[int, float]]],
where the keys are memory region names and the values are arrays of
initialization data.
For now, we also support input of type Dict[MemoryReference, Any],
but this is deprecated and will be removed in a future release.
:return: A Wavefunction object representing the state of the QVM.
"""
if memory_map is not None:
quil_program = self.augment_program_with_memory_values(quil_program, memory_map)
return cast(
Wavefunction,
self.connection._wavefunction(quil_program=quil_program, random_seed=self.random_seed),
)
@_record_call
def expectation(
self,
prep_prog: Program,
pauli_terms: Union[PauliSum, List[PauliTerm]],
memory_map: Any = None,
) -> Union[float, np.ndarray]:
"""
Calculate the expectation value of Pauli operators given a state prepared by prep_program.
If ``pauli_terms`` is a ``PauliSum`` then the returned value is a single ``float``,
otherwise the returned value is an array of values, one for each ``PauliTerm`` in the
list.
.. note:: If your program contains measurements or noisy gates, this method may not do what
you want. If the execution of ``quil_program`` is **non-deterministic** then the
final wavefunction from which the expectation value is calculated only represents
a stochastically generated sample and the wavefunctions returned by *different*
``wavefunction`` calls *will generally be different*.
:param prep_prog: A program that prepares the state on which we measure the expectation.
:param pauli_terms: A Pauli representation of a quantum operator.
:param memory_map: An assignment of classical registers to values, representing an initial
state for the QAM's classical memory.
This is expected to be of type Dict[str, List[Union[int, float]]],
where the keys are memory region names and the values are arrays of
initialization data.
For now, we also support input of type Dict[MemoryReference, Any],
but this is deprecated and will be removed in a future release.
:return: Either a float or array floats depending on ``pauli_terms``.
"""
is_pauli_sum = False
if isinstance(pauli_terms, PauliSum):
progs, coeffs = pauli_terms.get_programs()
is_pauli_sum = True
else:
coeffs = np.array([pt.coefficient for pt in pauli_terms])
progs = [pt.program for pt in pauli_terms]
if memory_map is not None:
prep_prog = self.augment_program_with_memory_values(prep_prog, memory_map)
bare_results = self.connection._expectation(prep_prog, progs, random_seed=self.random_seed)
results = coeffs * bare_results
if is_pauli_sum:
return np.sum(results)
return results
@_record_call
def run_and_measure(
self,
quil_program: Program,
qubits: Optional[List[int]] = None,
trials: int = 1,
memory_map: Optional[
Union[Dict[str, List[Union[int, float]]], Dict[MemoryReference, Any]]
] = None,
) -> np.ndarray:
"""
Run a Quil program once to determine the final wavefunction, and measure multiple times.
Alternatively, consider using ``wavefunction`` and calling ``sample_bitstrings`` on the
resulting object.
For a large wavefunction and a low-medium number of trials, use this function.
On the other hand, if you're sampling a small system many times you might want to
use ``Wavefunction.sample_bitstrings``.
.. note:: If your program contains measurements or noisy gates, this method may not do what
you want. If the execution of ``quil_program`` is **non-deterministic** then the
final wavefunction from which the returned bitstrings are sampled itself only
represents a stochastically generated sample and the outcomes sampled from
*different* ``run_and_measure`` calls *generally sample different bitstring
distributions*.
:param quil_program: The program to run and measure
:param qubits: An optional list of qubits to measure. The order of this list is
respected in the returned bitstrings. If not provided, all qubits used in
the program will be measured and returned in their sorted order.
:param int trials: Number of times to sample from the prepared wavefunction.
:param memory_map: An assignment of classical registers to values, representing an initial
state for the QAM's classical memory.
This is expected to be of type Dict[str, List[Union[int, float]]],
where the keys are memory region names and the values are arrays of
initialization data.
For now, we also support input of type Dict[MemoryReference, Any],
but this is deprecated and will be removed in a future release.
:return: An array of measurement results (0 or 1) of shape (trials, len(qubits))
"""
if qubits is None:
qubits = sorted(cast(Set[int], quil_program.get_qubits(indices=True)))
if memory_map is not None:
quil_program = self.augment_program_with_memory_values(quil_program, memory_map)
return self.connection._run_and_measure(
quil_program=quil_program, qubits=qubits, trials=trials, random_seed=self.random_seed
)
@staticmethod
def augment_program_with_memory_values(
quil_program: Program,
memory_map: Union[Dict[str, List[Union[int, float]]], Dict[MemoryReference, Any]],
) -> Program:
p = Program()
# we stupidly allowed memory_map to be of type Dict[MemoryReference, Any], whereas qc.run
# takes a memory initialization argument of type Dict[str, List[Union[int, float]]. until
# we are in a position to remove this, we support both styles of input.
if len(memory_map.keys()) == 0:
return quil_program
elif isinstance(list(memory_map.keys())[0], MemoryReference):
warn(
"Use of memory_map values of type Dict[MemoryReference, Any] have been "
"deprecated. Please use Dict[str, List[Union[int, float]]], as with "
"QuantumComputer.run ."
)
for k, v in memory_map.items():
p += MOVE(k, v)
elif isinstance(list(memory_map.keys())[0], str):
for name, arr in memory_map.items():
for index, value in enumerate(arr):
p += MOVE(MemoryReference(cast(str, name), offset=index), value)
else:
raise TypeError("Bad memory_map type; expected Dict[str, List[Union[int, float]]].")
p += quil_program
return percolate_declares(p)
|
hajuuk/asuswrt | refs/heads/master | release/src/router/asusnatnl/pjproject-1.12/pjsip-apps/src/py_pjsua/pjsua_app.py | 32 | # $Id: pjsua_app.py 1438 2007-09-17 15:44:47Z bennylp $
#
# Sample and simple Python script to make and receive calls, and do
# presence and instant messaging/IM using PJSUA-API binding for Python.
#
# Copyright (C) 2003-2007 Benny Prijono <benny@prijono.org>
#
import py_pjsua
import sys
import thread
#
# Configurations
#
THIS_FILE = "pjsua_app.py"
C_QUIT = 0
C_LOG_LEVEL = 4
# STUN config.
# Set C_STUN_HOST to the address:port of the STUN server to enable STUN
#
C_STUN_HOST = ""
#C_STUN_HOST = "192.168.0.2"
#C_STUN_HOST = "stun.iptel.org:3478"
# SIP port
C_SIP_PORT = 5060
# Globals
#
g_ua_cfg = None
g_acc_id = py_pjsua.PJSUA_INVALID_ID
g_current_call = py_pjsua.PJSUA_INVALID_ID
g_wav_files = []
g_wav_id = 0
g_wav_port = 0
g_rec_file = ""
g_rec_id = 0
g_rec_port = 0
# Utility: display PJ error and exit
#
def err_exit(title, rc):
py_pjsua.perror(THIS_FILE, title, rc)
py_pjsua.destroy()
exit(1)
# Logging function (also callback, called by pjsua-lib)
#
def log_cb(level, str, len):
if level <= C_LOG_LEVEL:
print str,
def write_log(level, str):
log_cb(level, str + "\n", 0)
# Utility to get call info
#
def call_name(call_id):
ci = py_pjsua.call_get_info(call_id)
return "[Call " + `call_id` + " " + ci.remote_info + "]"
# Callback when call state has changed.
#
def on_call_state(call_id, e):
global g_current_call
ci = py_pjsua.call_get_info(call_id)
write_log(3, call_name(call_id) + " state = " + `ci.state_text`)
if ci.state == py_pjsua.PJSIP_INV_STATE_DISCONNECTED:
g_current_call = py_pjsua.PJSUA_INVALID_ID
# Callback for incoming call
#
def on_incoming_call(acc_id, call_id, rdata):
global g_current_call
if g_current_call != py_pjsua.PJSUA_INVALID_ID:
# There's call in progress - answer Busy
py_pjsua.call_answer(call_id, 486, None, None)
return
g_current_call = call_id
ci = py_pjsua.call_get_info(call_id)
write_log(3, "*** Incoming call: " + call_name(call_id) + "***")
write_log(3, "*** Press a to answer or h to hangup ***")
# Callback when media state has changed (e.g. established or terminated)
#
def on_call_media_state(call_id):
ci = py_pjsua.call_get_info(call_id)
if ci.media_status == py_pjsua.PJSUA_CALL_MEDIA_ACTIVE:
py_pjsua.conf_connect(ci.conf_slot, 0)
py_pjsua.conf_connect(0, ci.conf_slot)
write_log(3, call_name(call_id) + ": media is active")
else:
write_log(3, call_name(call_id) + ": media is inactive")
# Callback when account registration state has changed
#
def on_reg_state(acc_id):
acc_info = py_pjsua.acc_get_info(acc_id)
if acc_info.has_registration != 0:
cmd = "registration"
else:
cmd = "unregistration"
if acc_info.status != 0 and acc_info.status != 200:
write_log(3, "Account " + cmd + " failed: rc=" + `acc_info.status` + " " + acc_info.status_text)
else:
write_log(3, "Account " + cmd + " success")
# Callback when buddy's presence state has changed
#
def on_buddy_state(buddy_id):
write_log(3, "On Buddy state called")
buddy_info = py_pjsua.buddy_get_info(buddy_id)
if buddy_info.status != 0 and buddy_info.status != 200:
write_log(3, "Status of " + `buddy_info.uri` + " is " + `buddy_info.status_text`)
else:
write_log(3, "Status : " + `buddy_info.status`)
# Callback on incoming pager (MESSAGE)
#
def on_pager(call_id, strfrom, strto, contact, mime_type, text):
write_log(3, "MESSAGE from " + `strfrom` + " : " + `text`)
# Callback on the delivery status of outgoing pager (MESSAGE)
#
def on_pager_status(call_id, strto, body, user_data, status, reason):
write_log(3, "MESSAGE to " + `strto` + " status " + `status` + " reason " + `reason`)
# Received typing indication
#
def on_typing(call_id, strfrom, to, contact, is_typing):
str_t = ""
if is_typing:
str_t = "is typing.."
else:
str_t = "has stopped typing"
write_log(3, "IM indication: " + strfrom + " " + str_t)
# Received the status of previous call transfer request
#
def on_call_transfer_status(call_id,status_code,status_text,final,p_cont):
strfinal = ""
if final == 1:
strfinal = "[final]"
write_log(3, "Call " + `call_id` + ": transfer status= " + `status_code` + " " + status_text+ " " + strfinal)
if status_code/100 == 2:
write_log(3, "Call " + `call_id` + " : call transfered successfully, disconnecting call")
status = py_pjsua.call_hangup(call_id, 410, None, None)
p_cont = 0
# Callback on incoming call transfer request
#
def on_call_transfer_request(call_id, dst, code):
write_log(3, "Call transfer request from " + `call_id` + " to " + dst + " with code " + `code`)
#
# Initialize pjsua.
#
def app_init():
global g_acc_id, g_ua_cfg
# Create pjsua before anything else
status = py_pjsua.create()
if status != 0:
err_exit("pjsua create() error", status)
# Create and initialize logging config
log_cfg = py_pjsua.logging_config_default()
log_cfg.level = C_LOG_LEVEL
log_cfg.cb = log_cb
# Create and initialize pjsua config
# Note: for this Python module, thread_cnt must be 0 since Python
# doesn't like to be called from alien thread (pjsua's thread
# in this case)
ua_cfg = py_pjsua.config_default()
ua_cfg.thread_cnt = 0
ua_cfg.user_agent = "PJSUA/Python 0.1"
ua_cfg.cb.on_incoming_call = on_incoming_call
ua_cfg.cb.on_call_media_state = on_call_media_state
ua_cfg.cb.on_reg_state = on_reg_state
ua_cfg.cb.on_call_state = on_call_state
ua_cfg.cb.on_buddy_state = on_buddy_state
ua_cfg.cb.on_pager = on_pager
ua_cfg.cb.on_pager_status = on_pager_status
ua_cfg.cb.on_typing = on_typing
ua_cfg.cb.on_call_transfer_status = on_call_transfer_status
ua_cfg.cb.on_call_transfer_request = on_call_transfer_request
# Configure STUN setting
if C_STUN_HOST != "":
ua_cfg.stun_host = C_STUN_HOST;
# Create and initialize media config
med_cfg = py_pjsua.media_config_default()
med_cfg.ec_tail_len = 0
#
# Initialize pjsua!!
#
status = py_pjsua.init(ua_cfg, log_cfg, med_cfg)
if status != 0:
err_exit("pjsua init() error", status)
# Configure UDP transport config
transport_cfg = py_pjsua.transport_config_default()
transport_cfg.port = C_SIP_PORT
# Create UDP transport
status, transport_id = \
py_pjsua.transport_create(py_pjsua.PJSIP_TRANSPORT_UDP, transport_cfg)
if status != 0:
err_exit("Error creating UDP transport", status)
# Create initial default account
status, acc_id = py_pjsua.acc_add_local(transport_id, 1)
if status != 0:
err_exit("Error creating account", status)
g_acc_id = acc_id
g_ua_cfg = ua_cfg
# Add SIP account interractively
#
def add_account():
global g_acc_id
acc_domain = ""
acc_username = ""
acc_passwd =""
confirm = ""
# Input account configs
print "Your SIP domain (e.g. myprovider.com): ",
acc_domain = sys.stdin.readline()
if acc_domain == "\n":
return
acc_domain = acc_domain.replace("\n", "")
print "Your username (e.g. alice): ",
acc_username = sys.stdin.readline()
if acc_username == "\n":
return
acc_username = acc_username.replace("\n", "")
print "Your password (e.g. secret): ",
acc_passwd = sys.stdin.readline()
if acc_passwd == "\n":
return
acc_passwd = acc_passwd.replace("\n", "")
# Configure account configuration
acc_cfg = py_pjsua.acc_config_default()
acc_cfg.id = "sip:" + acc_username + "@" + acc_domain
acc_cfg.reg_uri = "sip:" + acc_domain
cred_info = py_pjsua.Pjsip_Cred_Info()
cred_info.realm = "*"
cred_info.scheme = "digest"
cred_info.username = acc_username
cred_info.data_type = 0
cred_info.data = acc_passwd
acc_cfg.cred_info.append(1)
acc_cfg.cred_info[0] = cred_info
# Add new SIP account
status, acc_id = py_pjsua.acc_add(acc_cfg, 1)
if status != 0:
py_pjsua.perror(THIS_FILE, "Error adding SIP account", status)
else:
g_acc_id = acc_id
write_log(3, "Account " + acc_cfg.id + " added")
def add_player():
global g_wav_files
global g_wav_id
global g_wav_port
file_name = ""
status = -1
wav_id = 0
print "Enter the path of the file player(e.g. /tmp/audio.wav): ",
file_name = sys.stdin.readline()
if file_name == "\n":
return
file_name = file_name.replace("\n", "")
status, wav_id = py_pjsua.player_create(file_name, 0)
if status != 0:
py_pjsua.perror(THIS_FILE, "Error adding file player ", status)
else:
g_wav_files.append(file_name)
if g_wav_id == 0:
g_wav_id = wav_id
g_wav_port = py_pjsua.player_get_conf_port(wav_id)
write_log(3, "File player " + file_name + " added")
def add_recorder():
global g_rec_file
global g_rec_id
global g_rec_port
file_name = ""
status = -1
rec_id = 0
print "Enter the path of the file recorder(e.g. /tmp/audio.wav): ",
file_name = sys.stdin.readline()
if file_name == "\n":
return
file_name = file_name.replace("\n", "")
status, rec_id = py_pjsua.recorder_create(file_name, 0, None, 0, 0)
if status != 0:
py_pjsua.perror(THIS_FILE, "Error adding file recorder ", status)
else:
g_rec_file = file_name
g_rec_id = rec_id
g_rec_port = py_pjsua.recorder_get_conf_port(rec_id)
write_log(3, "File recorder " + file_name + " added")
def conf_list():
ports = None
print "Conference ports : "
ports = py_pjsua.enum_conf_ports()
for port in ports:
info = None
info = py_pjsua.conf_get_port_info(port)
txlist = ""
for listener in info.listeners:
txlist = txlist + "#" + `listener` + " "
print "Port #" + `info.slot_id` + "[" + `(info.clock_rate/1000)` + "KHz/" + `(info.samples_per_frame * 1000 / info.clock_rate)` + "ms] " + info.name + " transmitting to: " + txlist
def connect_port():
src_port = 0
dst_port = 0
print "Connect src port # (empty to cancel): "
src_port = sys.stdin.readline()
if src_port == "\n":
return
src_port = src_port.replace("\n", "")
src_port = int(src_port)
print "To dst port # (empty to cancel): "
dst_port = sys.stdin.readline()
if dst_port == "\n":
return
dst_port = dst_port.replace("\n", "")
dst_port = int(dst_port)
status = py_pjsua.conf_connect(src_port, dst_port)
if status != 0:
py_pjsua.perror(THIS_FILE, "Error connecting port ", status)
else:
write_log(3, "Port connected from " + `src_port` + " to " + `dst_port`)
def disconnect_port():
src_port = 0
dst_port = 0
print "Disconnect src port # (empty to cancel): "
src_port = sys.stdin.readline()
if src_port == "\n":
return
src_port = src_port.replace("\n", "")
src_port = int(src_port)
print "From dst port # (empty to cancel): "
dst_port = sys.stdin.readline()
if dst_port == "\n":
return
dst_port = dst_port.replace("\n", "")
dst_port = int(dst_port)
status = py_pjsua.conf_disconnect(src_port, dst_port)
if status != 0:
py_pjsua.perror(THIS_FILE, "Error disconnecting port ", status)
else:
write_log(3, "Port disconnected " + `src_port` + " from " + `dst_port`)
def dump_call_quality():
global g_current_call
buf = ""
if g_current_call != -1:
buf = py_pjsua.call_dump(g_current_call, 1, 1024, " ")
write_log(3, "\n" + buf)
else:
write_log(3, "No current call")
def xfer_call():
global g_current_call
if g_current_call == -1:
write_log(3, "No current call")
else:
call = g_current_call
ci = py_pjsua.call_get_info(g_current_call)
print "Transfering current call ["+ `g_current_call` + "] " + ci.remote_info
print "Enter sip url : "
url = sys.stdin.readline()
if url == "\n":
return
url = url.replace("\n", "")
if call != g_current_call:
print "Call has been disconnected"
return
msg_data = py_pjsua.msg_data_init()
status = py_pjsua.call_xfer(g_current_call, url, msg_data);
if status != 0:
py_pjsua.perror(THIS_FILE, "Error transfering call ", status)
else:
write_log(3, "Call transfered to " + url)
def xfer_call_replaces():
if g_current_call == -1:
write_log(3, "No current call")
else:
call = g_current_call
ids = py_pjsua.enum_calls()
if len(ids) <= 1:
print "There are no other calls"
return
ci = py_pjsua.call_get_info(g_current_call)
print "Transfer call [" + `g_current_call` + "] " + ci.remote_info + " to one of the following:"
for i in range(0, len(ids)):
if ids[i] == call:
continue
call_info = py_pjsua.call_get_info(ids[i])
print `ids[i]` + " " + call_info.remote_info + " [" + call_info.state_text + "]"
print "Enter call number to be replaced : "
buf = sys.stdin.readline()
buf = buf.replace("\n","")
if buf == "":
return
dst_call = int(buf)
if call != g_current_call:
print "Call has been disconnected"
return
if dst_call == call:
print "Destination call number must not be the same as the call being transfered"
return
if dst_call >= py_pjsua.PJSUA_MAX_CALLS:
print "Invalid destination call number"
return
if py_pjsua.call_is_active(dst_call) == 0:
print "Invalid destination call number"
return
py_pjsua.call_xfer_replaces(call, dst_call, 0, None)
#
# Worker thread function.
# Python doesn't like it when it's called from an alien thread
# (pjsua's worker thread, in this case), so for Python we must
# disable worker thread in pjsua and poll pjsua from Python instead.
#
def worker_thread_main(arg):
global C_QUIT
thread_desc = 0;
status = py_pjsua.thread_register("python worker", thread_desc)
if status != 0:
py_pjsua.perror(THIS_FILE, "Error registering thread", status)
else:
while C_QUIT == 0:
py_pjsua.handle_events(50)
print "Worker thread quitting.."
C_QUIT = 2
# Start pjsua
#
def app_start():
# Done with initialization, start pjsua!!
#
status = py_pjsua.start()
if status != 0:
err_exit("Error starting pjsua!", status)
# Start worker thread
thr = thread.start_new(worker_thread_main, (0,))
print "PJSUA Started!!"
# Print account and buddy list
def print_acc_buddy_list():
global g_acc_id
acc_ids = py_pjsua.enum_accs()
print "Account list:"
for acc_id in acc_ids:
acc_info = py_pjsua.acc_get_info(acc_id)
if acc_info.has_registration == 0:
acc_status = acc_info.status_text
else:
acc_status = `acc_info.status` + "/" + acc_info.status_text + " (expires=" + `acc_info.expires` + ")"
if acc_id == g_acc_id:
print " *",
else:
print " ",
print "[" + `acc_id` + "] " + acc_info.acc_uri + ": " + acc_status
print " Presence status: ",
if acc_info.online_status != 0:
print "Online"
else:
print "Invisible"
if py_pjsua.get_buddy_count() > 0:
print ""
print "Buddy list:"
buddy_ids = py_pjsua.enum_buddies()
for buddy_id in buddy_ids:
bi = py_pjsua.buddy_get_info(buddy_id)
print " [" + `buddy_id` + "] " + bi.status_text + " " + bi.uri
# Print application menu
#
def print_menu():
print ""
print ">>>"
print_acc_buddy_list()
print """
+============================================================================+
| Call Commands : | Buddy, IM & Presence: | Account: |
| | | |
| m Make call | +b Add buddy | +a Add account |
| a Answer current call | -b Delete buddy | -a Delete accnt |
| h Hangup current call | | |
| H Hold call | i Send instant message | rr register |
| v re-inVite (release Hold) | s Subscribe presence | ru Unregister |
| # Send DTMF string | u Unsubscribe presence | |
| dq Dump curr. call quality | t ToGgle Online status | |
| +--------------------------+------------------+
| x Xfer call | Media Commands: | Status: |
| X Xfer with Replaces | | |
| | cl List ports | d Dump status |
| | cc Connect port | dd Dump detail |
| | cd Disconnect port | |
| | +p Add file player | |
|------------------------------+ +r Add file recorder | |
| q Quit application | | |
+============================================================================+"""
print "You have " + `py_pjsua.call_get_count()` + " active call(s)"
print ">>>",
# Menu
#
def app_menu():
global g_acc_id
global g_current_call
quit = 0
while quit == 0:
print_menu()
choice = sys.stdin.readline()
if choice[0] == "q":
quit = 1
elif choice[0] == "i":
# Sending IM
print "Send IM to SIP URL: ",
url = sys.stdin.readline()
if url == "\n":
continue
# Send typing indication
py_pjsua.im_typing(g_acc_id, url, 1, None)
print "The content: ",
message = sys.stdin.readline()
if message == "\n":
py_pjsua.im_typing(g_acc_id, url, 0, None)
continue
# Send the IM!
py_pjsua.im_send(g_acc_id, url, None, message, None, 0)
elif choice[0] == "m":
# Make call
print "Using account ", g_acc_id
print "Make call to SIP URL: ",
url = sys.stdin.readline()
url = url.replace("\n", "")
if url == "":
continue
# Initiate the call!
status, call_id = py_pjsua.call_make_call(g_acc_id, url, 0, 0, None)
if status != 0:
py_pjsua.perror(THIS_FILE, "Error making call", status)
else:
g_current_call = call_id
elif choice[0] == "+" and choice[1] == "b":
# Add new buddy
bc = py_pjsua.Buddy_Config()
print "Buddy URL: ",
bc.uri = sys.stdin.readline()
if bc.uri == "\n":
continue
bc.uri = bc.uri.replace("\n", "")
bc.subscribe = 1
status, buddy_id = py_pjsua.buddy_add(bc)
if status != 0:
py_pjsua.perror(THIS_FILE, "Error adding buddy", status)
elif choice[0] == "-" and choice[1] == "b":
print "Enter buddy ID to delete : "
buf = sys.stdin.readline()
buf = buf.replace("\n","")
if buf == "":
continue
i = int(buf)
if py_pjsua.buddy_is_valid(i) == 0:
print "Invalid buddy id " + `i`
else:
py_pjsua.buddy_del(i)
print "Buddy " + `i` + " deleted"
elif choice[0] == "+" and choice[1] == "a":
# Add account
add_account()
elif choice[0] == "-" and choice[1] == "a":
print "Enter account ID to delete : "
buf = sys.stdin.readline()
buf = buf.replace("\n","")
if buf == "":
continue
i = int(buf)
if py_pjsua.acc_is_valid(i) == 0:
print "Invalid account id " + `i`
else:
py_pjsua.acc_del(i)
print "Account " + `i` + " deleted"
elif choice[0] == "+" and choice[1] == "p":
add_player()
elif choice[0] == "+" and choice[1] == "r":
add_recorder()
elif choice[0] == "c" and choice[1] == "l":
conf_list()
elif choice[0] == "c" and choice[1] == "c":
connect_port()
elif choice[0] == "c" and choice[1] == "d":
disconnect_port()
elif choice[0] == "d" and choice[1] == "q":
dump_call_quality()
elif choice[0] == "x":
xfer_call()
elif choice[0] == "X":
xfer_call_replaces()
elif choice[0] == "h":
if g_current_call != py_pjsua.PJSUA_INVALID_ID:
py_pjsua.call_hangup(g_current_call, 603, None, None)
else:
print "No current call"
elif choice[0] == "H":
if g_current_call != py_pjsua.PJSUA_INVALID_ID:
py_pjsua.call_set_hold(g_current_call, None)
else:
print "No current call"
elif choice[0] == "v":
if g_current_call != py_pjsua.PJSUA_INVALID_ID:
py_pjsua.call_reinvite(g_current_call, 1, None);
else:
print "No current call"
elif choice[0] == "#":
if g_current_call == py_pjsua.PJSUA_INVALID_ID:
print "No current call"
elif py_pjsua.call_has_media(g_current_call) == 0:
print "Media is not established yet!"
else:
call = g_current_call
print "DTMF strings to send (0-9*#A-B)"
buf = sys.stdin.readline()
buf = buf.replace("\n", "")
if buf == "":
continue
if call != g_current_call:
print "Call has been disconnected"
continue
status = py_pjsua.call_dial_dtmf(g_current_call, buf)
if status != 0:
py_pjsua.perror(THIS_FILE, "Unable to send DTMF", status);
else:
print "DTMF digits enqueued for transmission"
elif choice[0] == "s":
print "Subscribe presence of (buddy id) : "
buf = sys.stdin.readline()
buf = buf.replace("\n","")
if buf == "":
continue
i = int(buf)
py_pjsua.buddy_subscribe_pres(i, 1)
elif choice[0] == "u":
print "Unsubscribe presence of (buddy id) : "
buf = sys.stdin.readline()
buf = buf.replace("\n","")
if buf == "":
continue
i = int(buf)
py_pjsua.buddy_subscribe_pres(i, 0)
elif choice[0] == "t":
acc_info = py_pjsua.acc_get_info(g_acc_id)
if acc_info.online_status == 0:
acc_info.online_status = 1
else:
acc_info.online_status = 0
py_pjsua.acc_set_online_status(g_acc_id, acc_info.online_status)
st = ""
if acc_info.online_status == 0:
st = "offline"
else:
st = "online"
print "Setting " + acc_info.acc_uri + " online status to " + st
elif choice[0] == "r":
if choice[1] == "r":
py_pjsua.acc_set_registration(g_acc_id, 1)
elif choice[1] == "u":
py_pjsua.acc_set_registration(g_acc_id, 0)
elif choice[0] == "d":
py_pjsua.dump(choice[1] == "d")
elif choice[0] == "a":
if g_current_call != py_pjsua.PJSUA_INVALID_ID:
py_pjsua.call_answer(g_current_call, 200, None, None)
else:
print "No current call"
#
# main
#
app_init()
app_start()
app_menu()
#
# Done, quitting..
#
print "PJSUA shutting down.."
C_QUIT = 1
# Give the worker thread chance to quit itself
while C_QUIT != 2:
py_pjsua.handle_events(50)
print "PJSUA destroying.."
py_pjsua.destroy()
|
AnhellO/DAS_Sistemas | refs/heads/development | Ago-Dic-2017/Ivan Carreon/practica-1/Vendedor.py | 1 | from Persona import Persona
class Vendedor(Persona):
def __init__(self,nombre,apellidoPaterno,apellidoMaterno,sexo,edad,domicilio,telefono,idVendedor):
super().__init__(nombre,apellidoPaterno,apellidoMaterno,sexo,edad,domicilio,telefono)
self.idVendedor = idVendedor
def getIDVendedor(self):
return self.idVendedor
def setIDVendedor(self,idVendedor):
self.idVendedor = idVendedor
Vendedor1 = Vendedor('Carlos','Guerrero','Jaramillo','Masculino','31','saltillo 2000','8446795248','01')
Vendedor2 = Vendedor('Eduardo','Carranza','Lopez','Masculino','29','oceania','8447532159','02')
Vendedor3 = Vendedor('Alejandra','Montoya','Silva','Femenino','26','landin','8447845153','03')
|
SpectraLogic/samba | refs/heads/master | wintest/wintest.py | 36 | #!/usr/bin/env python
'''automated testing library for testing Samba against windows'''
import pexpect, subprocess
import optparse
import sys, os, time, re
class wintest():
'''testing of Samba against windows VMs'''
def __init__(self):
self.vars = {}
self.list_mode = False
self.vms = None
os.environ['PYTHONUNBUFFERED'] = '1'
self.parser = optparse.OptionParser("wintest")
def check_prerequesites(self):
self.info("Checking prerequesites")
self.setvar('HOSTNAME', self.cmd_output("hostname -s").strip())
if os.getuid() != 0:
raise Exception("You must run this script as root")
self.run_cmd('ifconfig ${INTERFACE} ${INTERFACE_NET} up')
if self.getvar('INTERFACE_IPV6'):
self.run_cmd('ifconfig ${INTERFACE} inet6 del ${INTERFACE_IPV6}/64', checkfail=False)
self.run_cmd('ifconfig ${INTERFACE} inet6 add ${INTERFACE_IPV6}/64 up')
self.run_cmd('ifconfig ${NAMED_INTERFACE} ${NAMED_INTERFACE_NET} up')
if self.getvar('NAMED_INTERFACE_IPV6'):
self.run_cmd('ifconfig ${NAMED_INTERFACE} inet6 del ${NAMED_INTERFACE_IPV6}/64', checkfail=False)
self.run_cmd('ifconfig ${NAMED_INTERFACE} inet6 add ${NAMED_INTERFACE_IPV6}/64 up')
def stop_vms(self):
'''Shut down any existing alive VMs, so they do not collide with what we are doing'''
self.info('Shutting down any of our VMs already running')
vms = self.get_vms()
for v in vms:
self.vm_poweroff(v, checkfail=False)
def setvar(self, varname, value):
'''set a substitution variable'''
self.vars[varname] = value
def getvar(self, varname):
'''return a substitution variable'''
if not varname in self.vars:
return None
return self.vars[varname]
def setwinvars(self, vm, prefix='WIN'):
'''setup WIN_XX vars based on a vm name'''
for v in ['VM', 'HOSTNAME', 'USER', 'PASS', 'SNAPSHOT', 'REALM', 'DOMAIN', 'IP']:
vname = '%s_%s' % (vm, v)
if vname in self.vars:
self.setvar("%s_%s" % (prefix,v), self.substitute("${%s}" % vname))
else:
self.vars.pop("%s_%s" % (prefix,v), None)
if self.getvar("WIN_REALM"):
self.setvar("WIN_REALM", self.getvar("WIN_REALM").upper())
self.setvar("WIN_LCREALM", self.getvar("WIN_REALM").lower())
dnsdomain = self.getvar("WIN_REALM")
self.setvar("WIN_BASEDN", "DC=" + dnsdomain.replace(".", ",DC="))
if self.getvar("WIN_USER") is None:
self.setvar("WIN_USER", "administrator")
def info(self, msg):
'''print some information'''
if not self.list_mode:
print(self.substitute(msg))
def load_config(self, fname):
'''load the config file'''
f = open(fname)
for line in f:
line = line.strip()
if len(line) == 0 or line[0] == '#':
continue
colon = line.find(':')
if colon == -1:
raise RuntimeError("Invalid config line '%s'" % line)
varname = line[0:colon].strip()
value = line[colon+1:].strip()
self.setvar(varname, value)
def list_steps_mode(self):
'''put wintest in step listing mode'''
self.list_mode = True
def set_skip(self, skiplist):
'''set a list of tests to skip'''
self.skiplist = skiplist.split(',')
def set_vms(self, vms):
'''set a list of VMs to test'''
if vms is not None:
self.vms = []
for vm in vms.split(','):
vm = vm.upper()
self.vms.append(vm)
def skip(self, step):
'''return True if we should skip a step'''
if self.list_mode:
print("\t%s" % step)
return True
return step in self.skiplist
def substitute(self, text):
"""Substitute strings of the form ${NAME} in text, replacing
with substitutions from vars.
"""
if isinstance(text, list):
ret = text[:]
for i in range(len(ret)):
ret[i] = self.substitute(ret[i])
return ret
"""We may have objects such as pexpect.EOF that are not strings"""
if not isinstance(text, str):
return text
while True:
var_start = text.find("${")
if var_start == -1:
return text
var_end = text.find("}", var_start)
if var_end == -1:
return text
var_name = text[var_start+2:var_end]
if not var_name in self.vars:
raise RuntimeError("Unknown substitution variable ${%s}" % var_name)
text = text.replace("${%s}" % var_name, self.vars[var_name])
return text
def have_var(self, varname):
'''see if a variable has been set'''
return varname in self.vars
def have_vm(self, vmname):
'''see if a VM should be used'''
if not self.have_var(vmname + '_VM'):
return False
if self.vms is None:
return True
return vmname in self.vms
def putenv(self, key, value):
'''putenv with substitution'''
os.environ[key] = self.substitute(value)
def chdir(self, dir):
'''chdir with substitution'''
os.chdir(self.substitute(dir))
def del_files(self, dirs):
'''delete all files in the given directory'''
for d in dirs:
self.run_cmd("find %s -type f | xargs rm -f" % d)
def write_file(self, filename, text, mode='w'):
'''write to a file'''
f = open(self.substitute(filename), mode=mode)
f.write(self.substitute(text))
f.close()
def run_cmd(self, cmd, dir=".", show=None, output=False, checkfail=True):
'''run a command'''
cmd = self.substitute(cmd)
if isinstance(cmd, list):
self.info('$ ' + " ".join(cmd))
else:
self.info('$ ' + cmd)
if output:
return subprocess.Popen([cmd], shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=dir).communicate()[0]
if isinstance(cmd, list):
shell=False
else:
shell=True
if checkfail:
return subprocess.check_call(cmd, shell=shell, cwd=dir)
else:
return subprocess.call(cmd, shell=shell, cwd=dir)
def run_child(self, cmd, dir="."):
'''create a child and return the Popen handle to it'''
cwd = os.getcwd()
cmd = self.substitute(cmd)
if isinstance(cmd, list):
self.info('$ ' + " ".join(cmd))
else:
self.info('$ ' + cmd)
if isinstance(cmd, list):
shell=False
else:
shell=True
os.chdir(dir)
ret = subprocess.Popen(cmd, shell=shell, stderr=subprocess.STDOUT)
os.chdir(cwd)
return ret
def cmd_output(self, cmd):
'''return output from and command'''
cmd = self.substitute(cmd)
return self.run_cmd(cmd, output=True)
def cmd_contains(self, cmd, contains, nomatch=False, ordered=False, regex=False,
casefold=True):
'''check that command output contains the listed strings'''
if isinstance(contains, str):
contains = [contains]
out = self.cmd_output(cmd)
self.info(out)
for c in self.substitute(contains):
if regex:
if casefold:
c = c.upper()
out = out.upper()
m = re.search(c, out)
if m is None:
start = -1
end = -1
else:
start = m.start()
end = m.end()
elif casefold:
start = out.upper().find(c.upper())
end = start + len(c)
else:
start = out.find(c)
end = start + len(c)
if nomatch:
if start != -1:
raise RuntimeError("Expected to not see %s in %s" % (c, cmd))
else:
if start == -1:
raise RuntimeError("Expected to see %s in %s" % (c, cmd))
if ordered and start != -1:
out = out[end:]
def retry_cmd(self, cmd, contains, retries=30, delay=2, wait_for_fail=False,
ordered=False, regex=False, casefold=True):
'''retry a command a number of times'''
while retries > 0:
try:
self.cmd_contains(cmd, contains, nomatch=wait_for_fail,
ordered=ordered, regex=regex, casefold=casefold)
return
except:
time.sleep(delay)
retries -= 1
self.info("retrying (retries=%u delay=%u)" % (retries, delay))
raise RuntimeError("Failed to find %s" % contains)
def pexpect_spawn(self, cmd, timeout=60, crlf=True, casefold=True):
'''wrapper around pexpect spawn'''
cmd = self.substitute(cmd)
self.info("$ " + cmd)
ret = pexpect.spawn(cmd, logfile=sys.stdout, timeout=timeout)
def sendline_sub(line):
line = self.substitute(line)
if crlf:
line = line.replace('\n', '\r\n') + '\r'
return ret.old_sendline(line)
def expect_sub(line, timeout=ret.timeout, casefold=casefold):
line = self.substitute(line)
if casefold:
if isinstance(line, list):
for i in range(len(line)):
if isinstance(line[i], str):
line[i] = '(?i)' + line[i]
elif isinstance(line, str):
line = '(?i)' + line
return ret.old_expect(line, timeout=timeout)
ret.old_sendline = ret.sendline
ret.sendline = sendline_sub
ret.old_expect = ret.expect
ret.expect = expect_sub
return ret
def get_nameserver(self):
'''Get the current nameserver from /etc/resolv.conf'''
child = self.pexpect_spawn('cat /etc/resolv.conf', crlf=False)
i = child.expect(['Generated by wintest', 'nameserver'])
if i == 0:
child.expect('your original resolv.conf')
child.expect('nameserver')
child.expect('\d+.\d+.\d+.\d+')
return child.after
def rndc_cmd(self, cmd, checkfail=True):
'''run a rndc command'''
self.run_cmd("${RNDC} -c ${PREFIX}/etc/rndc.conf %s" % cmd, checkfail=checkfail)
def named_supports_gssapi_keytab(self):
'''see if named supports tkey-gssapi-keytab'''
self.write_file("${PREFIX}/named.conf.test",
'options { tkey-gssapi-keytab "test"; };')
try:
self.run_cmd("${NAMED_CHECKCONF} ${PREFIX}/named.conf.test")
except subprocess.CalledProcessError:
return False
return True
def set_nameserver(self, nameserver):
'''set the nameserver in resolv.conf'''
self.write_file("/etc/resolv.conf.wintest", '''
# Generated by wintest, the Samba v Windows automated testing system
nameserver %s
# your original resolv.conf appears below:
''' % self.substitute(nameserver))
child = self.pexpect_spawn("cat /etc/resolv.conf", crlf=False)
i = child.expect(['your original resolv.conf appears below:', pexpect.EOF])
if i == 0:
child.expect(pexpect.EOF)
contents = child.before.lstrip().replace('\r', '')
self.write_file('/etc/resolv.conf.wintest', contents, mode='a')
self.write_file('/etc/resolv.conf.wintest-bak', contents)
self.run_cmd("mv -f /etc/resolv.conf.wintest /etc/resolv.conf")
self.resolv_conf_backup = '/etc/resolv.conf.wintest-bak';
def configure_bind(self, kerberos_support=False, include=None):
self.chdir('${PREFIX}')
if self.getvar('NAMED_INTERFACE_IPV6'):
ipv6_listen = 'listen-on-v6 port 53 { ${NAMED_INTERFACE_IPV6}; };'
else:
ipv6_listen = ''
self.setvar('BIND_LISTEN_IPV6', ipv6_listen)
if not kerberos_support:
self.setvar("NAMED_TKEY_OPTION", "")
elif self.getvar('NAMESERVER_BACKEND') != 'SAMBA_INTERNAL':
if self.named_supports_gssapi_keytab():
self.setvar("NAMED_TKEY_OPTION",
'tkey-gssapi-keytab "${PREFIX}/private/dns.keytab";')
else:
self.info("LCREALM=${LCREALM}")
self.setvar("NAMED_TKEY_OPTION",
'''tkey-gssapi-credential "DNS/${LCREALM}";
tkey-domain "${LCREALM}";
''')
self.putenv('KEYTAB_FILE', '${PREFIX}/private/dns.keytab')
self.putenv('KRB5_KTNAME', '${PREFIX}/private/dns.keytab')
else:
self.setvar("NAMED_TKEY_OPTION", "")
if include and self.getvar('NAMESERVER_BACKEND') != 'SAMBA_INTERNAL':
self.setvar("NAMED_INCLUDE", 'include "%s";' % include)
else:
self.setvar("NAMED_INCLUDE", '')
self.run_cmd("mkdir -p ${PREFIX}/etc")
self.write_file("etc/named.conf", '''
options {
listen-on port 53 { ${NAMED_INTERFACE_IP}; };
${BIND_LISTEN_IPV6}
directory "${PREFIX}/var/named";
dump-file "${PREFIX}/var/named/data/cache_dump.db";
pid-file "${PREFIX}/var/named/named.pid";
statistics-file "${PREFIX}/var/named/data/named_stats.txt";
memstatistics-file "${PREFIX}/var/named/data/named_mem_stats.txt";
allow-query { any; };
recursion yes;
${NAMED_TKEY_OPTION}
max-cache-ttl 10;
max-ncache-ttl 10;
forward only;
forwarders {
${DNSSERVER};
};
};
key "rndc-key" {
algorithm hmac-md5;
secret "lA/cTrno03mt5Ju17ybEYw==";
};
controls {
inet ${NAMED_INTERFACE_IP} port 953
allow { any; } keys { "rndc-key"; };
};
${NAMED_INCLUDE}
''')
if self.getvar('NAMESERVER_BACKEND') == 'SAMBA_INTERNAL':
self.write_file('etc/named.conf',
'''
zone "%s" IN {
type forward;
forward only;
forwarders {
%s;
};
};
''' % (self.getvar('LCREALM'), self.getvar('INTERFACE_IP')),
mode='a')
# add forwarding for the windows domains
domains = self.get_domains()
for d in domains:
self.write_file('etc/named.conf',
'''
zone "%s" IN {
type forward;
forward only;
forwarders {
%s;
};
};
''' % (d, domains[d]),
mode='a')
self.write_file("etc/rndc.conf", '''
# Start of rndc.conf
key "rndc-key" {
algorithm hmac-md5;
secret "lA/cTrno03mt5Ju17ybEYw==";
};
options {
default-key "rndc-key";
default-server ${NAMED_INTERFACE_IP};
default-port 953;
};
''')
def stop_bind(self):
'''Stop our private BIND from listening and operating'''
self.rndc_cmd("stop", checkfail=False)
self.port_wait("${NAMED_INTERFACE_IP}", 53, wait_for_fail=True)
self.run_cmd("rm -rf var/named")
def start_bind(self):
'''restart the test environment version of bind'''
self.info("Restarting bind9")
self.chdir('${PREFIX}')
self.set_nameserver(self.getvar('NAMED_INTERFACE_IP'))
self.run_cmd("mkdir -p var/named/data")
self.run_cmd("chown -R ${BIND_USER} var/named")
self.bind_child = self.run_child("${BIND9} -u ${BIND_USER} -n 1 -c ${PREFIX}/etc/named.conf -g")
self.port_wait("${NAMED_INTERFACE_IP}", 53)
self.rndc_cmd("flush")
def restart_bind(self, kerberos_support=False, include=None):
self.configure_bind(kerberos_support=kerberos_support, include=include)
self.stop_bind()
self.start_bind()
def restore_resolv_conf(self):
'''restore the /etc/resolv.conf after testing is complete'''
if getattr(self, 'resolv_conf_backup', False):
self.info("restoring /etc/resolv.conf")
self.run_cmd("mv -f %s /etc/resolv.conf" % self.resolv_conf_backup)
def vm_poweroff(self, vmname, checkfail=True):
'''power off a VM'''
self.setvar('VMNAME', vmname)
self.run_cmd("${VM_POWEROFF}", checkfail=checkfail)
def vm_reset(self, vmname):
'''reset a VM'''
self.setvar('VMNAME', vmname)
self.run_cmd("${VM_RESET}")
def vm_restore(self, vmname, snapshot):
'''restore a VM'''
self.setvar('VMNAME', vmname)
self.setvar('SNAPSHOT', snapshot)
self.run_cmd("${VM_RESTORE}")
def ping_wait(self, hostname):
'''wait for a hostname to come up on the network'''
hostname = self.substitute(hostname)
loops=10
while loops > 0:
try:
self.run_cmd("ping -c 1 -w 10 %s" % hostname)
break
except:
loops = loops - 1
if loops == 0:
raise RuntimeError("Failed to ping %s" % hostname)
self.info("Host %s is up" % hostname)
def port_wait(self, hostname, port, retries=200, delay=3, wait_for_fail=False):
'''wait for a host to come up on the network'''
while retries > 0:
child = self.pexpect_spawn("nc -v -z -w 1 %s %u" % (hostname, port), crlf=False, timeout=1)
child.expect([pexpect.EOF, pexpect.TIMEOUT])
child.close()
i = child.exitstatus
if wait_for_fail:
#wait for timeout or fail
if i == None or i > 0:
return
else:
if i == 0:
return
time.sleep(delay)
retries -= 1
self.info("retrying (retries=%u delay=%u)" % (retries, delay))
raise RuntimeError("gave up waiting for %s:%d" % (hostname, port))
def run_net_time(self, child):
'''run net time on windows'''
child.sendline("net time \\\\${HOSTNAME} /set")
child.expect("Do you want to set the local computer")
child.sendline("Y")
child.expect("The command completed successfully")
def run_date_time(self, child, time_tuple=None):
'''run date and time on windows'''
if time_tuple is None:
time_tuple = time.localtime()
child.sendline("date")
child.expect("Enter the new date:")
i = child.expect(["dd-mm-yy", "mm-dd-yy"])
if i == 0:
child.sendline(time.strftime("%d-%m-%y", time_tuple))
else:
child.sendline(time.strftime("%m-%d-%y", time_tuple))
child.expect("C:")
child.sendline("time")
child.expect("Enter the new time:")
child.sendline(time.strftime("%H:%M:%S", time_tuple))
child.expect("C:")
def get_ipconfig(self, child):
'''get the IP configuration of the child'''
child.sendline("ipconfig /all")
child.expect('Ethernet adapter ')
child.expect("[\w\s]+")
self.setvar("WIN_NIC", child.after)
child.expect(['IPv4 Address', 'IP Address'])
child.expect('\d+.\d+.\d+.\d+')
self.setvar('WIN_IPV4_ADDRESS', child.after)
child.expect('Subnet Mask')
child.expect('\d+.\d+.\d+.\d+')
self.setvar('WIN_SUBNET_MASK', child.after)
child.expect('Default Gateway')
i = child.expect(['\d+.\d+.\d+.\d+', "C:"])
if i == 0:
self.setvar('WIN_DEFAULT_GATEWAY', child.after)
child.expect("C:")
def get_is_dc(self, child):
'''check if a windows machine is a domain controller'''
child.sendline("dcdiag")
i = child.expect(["is not a [Directory Server|DC]",
"is not recognized as an internal or external command",
"Home Server = ",
"passed test Replications"])
if i == 0:
return False
if i == 1 or i == 3:
child.expect("C:")
child.sendline("net config Workstation")
child.expect("Workstation domain")
child.expect('[\S]+')
domain = child.after
i = child.expect(["Workstation Domain DNS Name", "Logon domain"])
'''If we get the Logon domain first, we are not in an AD domain'''
if i == 1:
return False
if domain.upper() == self.getvar("WIN_DOMAIN").upper():
return True
child.expect('[\S]+')
hostname = child.after
if hostname.upper() == self.getvar("WIN_HOSTNAME").upper():
return True
def set_noexpire(self, child, username):
"""Ensure this user's password does not expire"""
child.sendline('wmic useraccount where name="%s" set PasswordExpires=FALSE' % username)
child.expect("update successful")
child.expect("C:")
def run_tlntadmn(self, child):
'''remove the annoying telnet restrictions'''
child.sendline('tlntadmn config maxconn=1024')
child.expect(["The settings were successfully updated", "Access is denied"])
child.expect("C:")
def disable_firewall(self, child):
'''remove the annoying firewall'''
child.sendline('netsh advfirewall set allprofiles state off')
i = child.expect(["Ok", "The following command was not found: advfirewall set allprofiles state off", "The requested operation requires elevation", "Access is denied"])
child.expect("C:")
if i == 1:
child.sendline('netsh firewall set opmode mode = DISABLE profile = ALL')
i = child.expect(["Ok", "The following command was not found", "Access is denied"])
if i != 0:
self.info("Firewall disable failed - ignoring")
child.expect("C:")
def set_dns(self, child):
child.sendline('netsh interface ip set dns "${WIN_NIC}" static ${NAMED_INTERFACE_IP} primary')
i = child.expect(['C:', pexpect.EOF, pexpect.TIMEOUT], timeout=5)
if i > 0:
return True
else:
return False
def set_ip(self, child):
"""fix the IP address to the same value it had when we
connected, but don't use DHCP, and force the DNS server to our
DNS server. This allows DNS updates to run"""
self.get_ipconfig(child)
if self.getvar("WIN_IPV4_ADDRESS") != self.getvar("WIN_IP"):
raise RuntimeError("ipconfig address %s != nmblookup address %s" % (self.getvar("WIN_IPV4_ADDRESS"),
self.getvar("WIN_IP")))
child.sendline('netsh')
child.expect('netsh>')
child.sendline('offline')
child.expect('netsh>')
child.sendline('routing ip add persistentroute dest=0.0.0.0 mask=0.0.0.0 name="${WIN_NIC}" nhop=${WIN_DEFAULT_GATEWAY}')
child.expect('netsh>')
child.sendline('interface ip set address "${WIN_NIC}" static ${WIN_IPV4_ADDRESS} ${WIN_SUBNET_MASK} ${WIN_DEFAULT_GATEWAY} 1 store=persistent')
i = child.expect(['The syntax supplied for this command is not valid. Check help for the correct syntax', 'netsh>', pexpect.EOF, pexpect.TIMEOUT], timeout=5)
if i == 0:
child.sendline('interface ip set address "${WIN_NIC}" static ${WIN_IPV4_ADDRESS} ${WIN_SUBNET_MASK} ${WIN_DEFAULT_GATEWAY} 1')
child.expect('netsh>')
child.sendline('commit')
child.sendline('online')
child.sendline('exit')
child.expect([pexpect.EOF, pexpect.TIMEOUT], timeout=5)
return True
def resolve_ip(self, hostname, retries=60, delay=5):
'''resolve an IP given a hostname, assuming NBT'''
while retries > 0:
child = self.pexpect_spawn("bin/nmblookup %s" % hostname)
i = 0
while i == 0:
i = child.expect(["querying", '\d+.\d+.\d+.\d+', hostname, "Lookup failed"])
if i == 0:
child.expect("\r")
if i == 1:
return child.after
retries -= 1
time.sleep(delay)
self.info("retrying (retries=%u delay=%u)" % (retries, delay))
raise RuntimeError("Failed to resolve IP of %s" % hostname)
def open_telnet(self, hostname, username, password, retries=60, delay=5, set_time=False, set_ip=False,
disable_firewall=True, run_tlntadmn=True, set_noexpire=False):
'''open a telnet connection to a windows server, return the pexpect child'''
set_route = False
set_dns = False
set_telnetclients = True
start_telnet = True
if self.getvar('WIN_IP'):
ip = self.getvar('WIN_IP')
else:
ip = self.resolve_ip(hostname)
self.setvar('WIN_IP', ip)
while retries > 0:
child = self.pexpect_spawn("telnet " + ip + " -l '" + username + "'")
i = child.expect(["Welcome to Microsoft Telnet Service",
"Denying new connections due to the limit on number of connections",
"No more connections are allowed to telnet server",
"Unable to connect to remote host",
"No route to host",
"Connection refused",
pexpect.EOF])
if i != 0:
child.close()
time.sleep(delay)
retries -= 1
self.info("retrying (retries=%u delay=%u)" % (retries, delay))
continue
child.expect("password:")
child.sendline(password)
i = child.expect(["C:",
"TelnetClients",
"Denying new connections due to the limit on number of connections",
"No more connections are allowed to telnet server",
"Unable to connect to remote host",
"No route to host",
"Connection refused",
pexpect.EOF])
if i == 1:
if set_telnetclients:
self.run_cmd('bin/net rpc group add TelnetClients -S $WIN_IP -U$WIN_USER%$WIN_PASS')
self.run_cmd('bin/net rpc group addmem TelnetClients "authenticated users" -S $WIN_IP -U$WIN_USER%$WIN_PASS')
child.close()
retries -= 1
set_telnetclients = False
self.info("retrying (retries=%u delay=%u)" % (retries, delay))
continue
else:
raise RuntimeError("Failed to connect with telnet due to missing TelnetClients membership")
if i == 6:
# This only works if it is installed and enabled, but not started. Not entirely likely, but possible
self.run_cmd('bin/net rpc service start TlntSvr -S $WIN_IP -U$WIN_USER%$WIN_PASS')
child.close()
start_telnet = False
retries -= 1
self.info("retrying (retries=%u delay=%u)" % (retries, delay))
continue
if i != 0:
child.close()
time.sleep(delay)
retries -= 1
self.info("retrying (retries=%u delay=%u)" % (retries, delay))
continue
if set_dns:
set_dns = False
if self.set_dns(child):
continue;
if set_route:
child.sendline('route add 0.0.0.0 mask 0.0.0.0 ${WIN_DEFAULT_GATEWAY}')
child.expect("C:")
set_route = False
if set_time:
self.run_date_time(child, None)
set_time = False
if run_tlntadmn:
self.run_tlntadmn(child)
run_tlntadmn = False
if set_noexpire:
self.set_noexpire(child, username)
set_noexpire = False
if disable_firewall:
self.disable_firewall(child)
disable_firewall = False
if set_ip:
set_ip = False
if self.set_ip(child):
set_route = True
set_dns = True
continue
return child
raise RuntimeError("Failed to connect with telnet")
def kinit(self, username, password):
'''use kinit to setup a credentials cache'''
self.run_cmd("kdestroy")
self.putenv('KRB5CCNAME', "${PREFIX}/ccache.test")
username = self.substitute(username)
s = username.split('@')
if len(s) > 0:
s[1] = s[1].upper()
username = '@'.join(s)
child = self.pexpect_spawn('kinit ' + username)
child.expect("Password")
child.sendline(password)
child.expect(pexpect.EOF)
child.close()
if child.exitstatus != 0:
raise RuntimeError("kinit failed with status %d" % child.exitstatus)
def get_domains(self):
'''return a dictionary of DNS domains and IPs for named.conf'''
ret = {}
for v in self.vars:
if v[-6:] == "_REALM":
base = v[:-6]
if base + '_IP' in self.vars:
ret[self.vars[base + '_REALM']] = self.vars[base + '_IP']
return ret
def wait_reboot(self, retries=3):
'''wait for a VM to reboot'''
# first wait for it to shutdown
self.port_wait("${WIN_IP}", 139, wait_for_fail=True, delay=6)
# now wait for it to come back. If it fails to come back
# then try resetting it
while retries > 0:
try:
self.port_wait("${WIN_IP}", 139)
return
except:
retries -= 1
self.vm_reset("${WIN_VM}")
self.info("retrying reboot (retries=%u)" % retries)
raise RuntimeError(self.substitute("VM ${WIN_VM} failed to reboot"))
def get_vms(self):
'''return a dictionary of all the configured VM names'''
ret = []
for v in self.vars:
if v[-3:] == "_VM":
ret.append(self.vars[v])
return ret
def run_dcpromo_as_first_dc(self, vm, func_level=None):
self.setwinvars(vm)
self.info("Configuring a windows VM ${WIN_VM} at the first DC in the domain using dcpromo")
child = self.open_telnet("${WIN_HOSTNAME}", "administrator", "${WIN_PASS}", set_time=True)
if self.get_is_dc(child):
return
if func_level == '2008r2':
self.setvar("FUNCTION_LEVEL_INT", str(4))
elif func_level == '2003':
self.setvar("FUNCTION_LEVEL_INT", str(1))
else:
self.setvar("FUNCTION_LEVEL_INT", str(0))
child = self.open_telnet("${WIN_HOSTNAME}", "administrator", "${WIN_PASS}", set_ip=True, set_noexpire=True)
"""This server must therefore not yet be a directory server, so we must promote it"""
child.sendline("copy /Y con answers.txt")
child.sendline('''
[DCInstall]
; New forest promotion
ReplicaOrNewDomain=Domain
NewDomain=Forest
NewDomainDNSName=${WIN_REALM}
ForestLevel=${FUNCTION_LEVEL_INT}
DomainNetbiosName=${WIN_DOMAIN}
DomainLevel=${FUNCTION_LEVEL_INT}
InstallDNS=Yes
ConfirmGc=Yes
CreateDNSDelegation=No
DatabasePath="C:\Windows\NTDS"
LogPath="C:\Windows\NTDS"
SYSVOLPath="C:\Windows\SYSVOL"
; Set SafeModeAdminPassword to the correct value prior to using the unattend file
SafeModeAdminPassword=${WIN_PASS}
; Run-time flags (optional)
RebootOnCompletion=No
''')
child.expect("copied.")
child.expect("C:")
child.expect("C:")
child.sendline("dcpromo /answer:answers.txt")
i = child.expect(["You must restart this computer", "failed", "Active Directory Domain Services was not installed", "C:", pexpect.TIMEOUT], timeout=240)
if i == 1 or i == 2:
raise Exception("dcpromo failed")
if i == 4: # timeout
child = self.open_telnet("${WIN_HOSTNAME}", "administrator", "${WIN_PASS}")
child.sendline("shutdown -r -t 0")
self.port_wait("${WIN_IP}", 139, wait_for_fail=True)
self.port_wait("${WIN_IP}", 139)
child = self.open_telnet("${WIN_HOSTNAME}", "administrator", "${WIN_PASS}")
# Check if we became a DC by now
if not self.get_is_dc(child):
raise Exception("dcpromo failed (and wasn't a DC even after rebooting)")
# Give DNS registration a kick
child.sendline("ipconfig /registerdns")
self.retry_cmd("host -t SRV _ldap._tcp.${WIN_REALM} ${WIN_IP}", ['has SRV record'], retries=60, delay=5 )
def start_winvm(self, vm):
'''start a Windows VM'''
self.setwinvars(vm)
self.info("Joining a windows box to the domain")
self.vm_poweroff("${WIN_VM}", checkfail=False)
self.vm_restore("${WIN_VM}", "${WIN_SNAPSHOT}")
def run_winjoin(self, vm, domain, username="administrator", password="${PASSWORD1}"):
'''join a windows box to a domain'''
child = self.open_telnet("${WIN_HOSTNAME}", "${WIN_USER}", "${WIN_PASS}", set_time=True, set_ip=True, set_noexpire=True)
retries = 5
while retries > 0:
child.sendline("ipconfig /flushdns")
child.expect("C:")
child.sendline("netdom join ${WIN_HOSTNAME} /Domain:%s /UserD:%s /PasswordD:%s" % (domain, username, password))
i = child.expect(["The command completed successfully",
"The specified domain either does not exist or could not be contacted."], timeout=120)
if i == 0:
break
time.sleep(10)
retries -= 1
child.expect("C:")
child.sendline("shutdown /r -t 0")
self.wait_reboot()
child = self.open_telnet("${WIN_HOSTNAME}", "${WIN_USER}", "${WIN_PASS}", set_time=True, set_ip=True)
child.sendline("ipconfig /registerdns")
child.expect("Registration of the DNS resource records for all adapters of this computer has been initiated. Any errors will be reported in the Event Viewer")
child.expect("C:")
def test_remote_smbclient(self, vm, username="${WIN_USER}", password="${WIN_PASS}", args=""):
'''test smbclient against remote server'''
self.setwinvars(vm)
self.info('Testing smbclient')
self.chdir('${PREFIX}')
smbclient = self.getvar("smbclient")
self.cmd_contains("%s --version" % (smbclient), ["${SAMBA_VERSION}"])
self.retry_cmd('%s -L ${WIN_HOSTNAME} -U%s%%%s %s' % (smbclient, username, password, args), ["IPC"], retries=60, delay=5)
def test_net_use(self, vm, realm, domain, username, password):
self.setwinvars(vm)
self.info('Testing net use against Samba3 member')
child = self.open_telnet("${WIN_HOSTNAME}", "%s\\%s" % (domain, username), password)
child.sendline("net use t: \\\\${HOSTNAME}.%s\\test" % realm)
child.expect("The command completed successfully")
def setup(self, testname, subdir):
'''setup for main tests, parsing command line'''
self.parser.add_option("--conf", type='string', default='', help='config file')
self.parser.add_option("--skip", type='string', default='', help='list of steps to skip (comma separated)')
self.parser.add_option("--vms", type='string', default=None, help='list of VMs to use (comma separated)')
self.parser.add_option("--list", action='store_true', default=False, help='list the available steps')
self.parser.add_option("--rebase", action='store_true', default=False, help='do a git pull --rebase')
self.parser.add_option("--clean", action='store_true', default=False, help='clean the tree')
self.parser.add_option("--prefix", type='string', default=None, help='override install prefix')
self.parser.add_option("--sourcetree", type='string', default=None, help='override sourcetree location')
self.parser.add_option("--nocleanup", action='store_true', default=False, help='disable cleanup code')
self.parser.add_option("--use-ntvfs", action='store_true', default=False, help='use NTVFS for the fileserver')
self.parser.add_option("--dns-backend", type="choice",
choices=["SAMBA_INTERNAL", "BIND9_FLATFILE", "BIND9_DLZ", "NONE"],
help="The DNS server backend. SAMBA_INTERNAL is the builtin name server (default), " \
"BIND9_FLATFILE uses bind9 text database to store zone information, " \
"BIND9_DLZ uses samba4 AD to store zone information, " \
"NONE skips the DNS setup entirely (not recommended)",
default="SAMBA_INTERNAL")
self.opts, self.args = self.parser.parse_args()
if not self.opts.conf:
print("Please specify a config file with --conf")
sys.exit(1)
# we don't need fsync safety in these tests
self.putenv('TDB_NO_FSYNC', '1')
self.load_config(self.opts.conf)
nameserver = self.get_nameserver()
if nameserver == self.getvar('NAMED_INTERFACE_IP'):
raise RuntimeError("old /etc/resolv.conf must not contain %s as a nameserver, this will create loops with the generated dns configuration" % nameserver)
self.setvar('DNSSERVER', nameserver)
self.set_skip(self.opts.skip)
self.set_vms(self.opts.vms)
if self.opts.list:
self.list_steps_mode()
if self.opts.prefix:
self.setvar('PREFIX', self.opts.prefix)
if self.opts.sourcetree:
self.setvar('SOURCETREE', self.opts.sourcetree)
if self.opts.rebase:
self.info('rebasing')
self.chdir('${SOURCETREE}')
self.run_cmd('git pull --rebase')
if self.opts.clean:
self.info('cleaning')
self.chdir('${SOURCETREE}/' + subdir)
self.run_cmd('make clean')
if self.opts.use_ntvfs:
self.setvar('USE_NTVFS', "--use-ntvfs")
else:
self.setvar('USE_NTVFS', "")
self.setvar('NAMESERVER_BACKEND', self.opts.dns_backend)
self.setvar('DNS_FORWARDER', "--option=dns forwarder=%s" % nameserver)
|
dacjames/docker-graphite-statsd | refs/heads/master | conf/opt/graphite/webapp/graphite/local_settings.py | 1 | ## Graphite local_settings.py
# Edit this file to customize the default Graphite webapp settings
#
# Additional customizations to Django settings can be added to this file as well
#####################################
# General Configuration #
#####################################
# Set this to a long, random unique string to use as a secret key for this
# install. This key is used for salting of hashes used in auth tokens,
# CRSF middleware, cookie storage, etc. This should be set identically among
# instances if used behind a load balancer.
#SECRET_KEY = 'UNSAFE_DEFAULT'
# In Django 1.5+ set this to the list of hosts your graphite instances is
# accessible as. See:
# https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-ALLOWED_HOSTS
#ALLOWED_HOSTS = [ '*' ]
# Set your local timezone (Django's default is America/Chicago)
# If your graphs appear to be offset by a couple hours then this probably
# needs to be explicitly set to your local timezone.
#TIME_ZONE = 'America/Los_Angeles'
# Override this to provide documentation specific to your Graphite deployment
#DOCUMENTATION_URL = "http://graphite.readthedocs.org/"
# Logging
#LOG_RENDERING_PERFORMANCE = True
#LOG_CACHE_PERFORMANCE = True
#LOG_METRIC_ACCESS = True
# Enable full debug page display on exceptions (Internal Server Error pages)
#DEBUG = True
# If using RRD files and rrdcached, set to the address or socket of the daemon
#FLUSHRRDCACHED = 'unix:/var/run/rrdcached.sock'
# This lists the memcached servers that will be used by this webapp.
# If you have a cluster of webapps you should ensure all of them
# have the *exact* same value for this setting. That will maximize cache
# efficiency. Setting MEMCACHE_HOSTS to be empty will turn off use of
# memcached entirely.
#
# You should not use the loopback address (127.0.0.1) here if using clustering
# as every webapp in the cluster should use the exact same values to prevent
# unneeded cache misses. Set to [] to disable caching of images and fetched data
#MEMCACHE_HOSTS = ['10.10.10.10:11211', '10.10.10.11:11211', '10.10.10.12:11211']
#DEFAULT_CACHE_DURATION = 60 # Cache images and data for 1 minute
#####################################
# Filesystem Paths #
#####################################
# Change only GRAPHITE_ROOT if your install is merely shifted from /opt/graphite
# to somewhere else
#GRAPHITE_ROOT = '/opt/graphite'
# Most installs done outside of a separate tree such as /opt/graphite will only
# need to change these three settings. Note that the default settings for each
# of these is relative to GRAPHITE_ROOT
#CONF_DIR = '/opt/graphite/conf'
STORAGE_DIR = '/var/lib/graphite/storage'
#CONTENT_DIR = '/opt/graphite/webapp/content'
# To further or fully customize the paths, modify the following. Note that the
# default settings for each of these are relative to CONF_DIR and STORAGE_DIR
#
## Webapp config files
#DASHBOARD_CONF = '/opt/graphite/conf/dashboard.conf'
#GRAPHTEMPLATES_CONF = '/opt/graphite/conf/graphTemplates.conf'
## Data directories
# NOTE: If any directory is unreadable in DATA_DIRS it will break metric browsing
WHISPER_DIR = '/var/lib/graphite/storage/whisper'
RRD_DIR = '/var/lib/graphite/storage/rrd'
DATA_DIRS = [WHISPER_DIR, RRD_DIR] # Default: set from the above variables
#LOG_DIR = '/opt/graphite/storage/log/webapp'
#INDEX_FILE = '/opt/graphite/storage/index' # Search index file
#####################################
# Email Configuration #
#####################################
# This is used for emailing rendered Graphs
# Default backend is SMTP
#EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
#EMAIL_HOST = 'localhost'
#EMAIL_PORT = 25
#EMAIL_HOST_USER = ''
#EMAIL_HOST_PASSWORD = ''
#EMAIL_USE_TLS = False
# To drop emails on the floor, enable the Dummy backend:
#EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend'
#####################################
# Authentication Configuration #
#####################################
## LDAP / ActiveDirectory authentication setup
#USE_LDAP_AUTH = True
#LDAP_SERVER = "ldap.mycompany.com"
#LDAP_PORT = 389
# OR
#LDAP_URI = "ldaps://ldap.mycompany.com:636"
#LDAP_SEARCH_BASE = "OU=users,DC=mycompany,DC=com"
#LDAP_BASE_USER = "CN=some_readonly_account,DC=mycompany,DC=com"
#LDAP_BASE_PASS = "readonly_account_password"
#LDAP_USER_QUERY = "(username=%s)" #For Active Directory use "(sAMAccountName=%s)"
#
# If you want to further customize the ldap connection options you should
# directly use ldap.set_option to set the ldap module's global options.
# For example:
#
#import ldap
#ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_ALLOW)
#ldap.set_option(ldap.OPT_X_TLS_CACERTDIR, "/etc/ssl/ca")
#ldap.set_option(ldap.OPT_X_TLS_CERTFILE, "/etc/ssl/mycert.pem")
#ldap.set_option(ldap.OPT_X_TLS_KEYFILE, "/etc/ssl/mykey.pem")
# See http://www.python-ldap.org/ for further details on these options.
## REMOTE_USER authentication. See: https://docs.djangoproject.com/en/dev/howto/auth-remote-user/
#USE_REMOTE_USER_AUTHENTICATION = True
# Override the URL for the login link (e.g. for django_openid_auth)
#LOGIN_URL = '/account/login'
##########################
# Database Configuration #
##########################
# By default sqlite is used. If you cluster multiple webapps you will need
# to setup an external database (such as MySQL) and configure all of the webapp
# instances to use the same database. Note that this database is only used to store
# Django models such as saved graphs, dashboards, user preferences, etc.
# Metric data is not stored here.
#
# DO NOT FORGET TO RUN 'manage.py syncdb' AFTER SETTING UP A NEW DATABASE
#
# The following built-in database engines are available:
# django.db.backends.postgresql # Removed in Django 1.4
# django.db.backends.postgresql_psycopg2
# django.db.backends.mysql
# django.db.backends.sqlite3
# django.db.backends.oracle
#
# The default is 'django.db.backends.sqlite3' with file 'graphite.db'
# located in STORAGE_DIR
#
#DATABASES = {
# 'default': {
# 'NAME': '/opt/graphite/storage/graphite.db',
# 'ENGINE': 'django.db.backends.sqlite3',
# 'USER': '',
# 'PASSWORD': '',
# 'HOST': '',
# 'PORT': ''
# }
#}
#
#########################
# Cluster Configuration #
#########################
# (To avoid excessive DNS lookups you want to stick to using IP addresses only in this entire section)
#
# This should list the IP address (and optionally port) of the webapp on each
# remote server in the cluster. These servers must each have local access to
# metric data. Note that the first server to return a match for a query will be
# used.
#CLUSTER_SERVERS = ["10.0.2.2:80", "10.0.2.3:80"]
## These are timeout values (in seconds) for requests to remote webapps
#REMOTE_STORE_FETCH_TIMEOUT = 6 # Timeout to fetch series data
#REMOTE_STORE_FIND_TIMEOUT = 2.5 # Timeout for metric find requests
#REMOTE_STORE_RETRY_DELAY = 60 # Time before retrying a failed remote webapp
#REMOTE_FIND_CACHE_DURATION = 300 # Time to cache remote metric find results
## Remote rendering settings
# Set to True to enable rendering of Graphs on a remote webapp
#REMOTE_RENDERING = True
# List of IP (and optionally port) of the webapp on each remote server that
# will be used for rendering. Note that each rendering host should have local
# access to metric data or should have CLUSTER_SERVERS configured
#RENDERING_HOSTS = []
#REMOTE_RENDER_CONNECT_TIMEOUT = 1.0
# If you are running multiple carbon-caches on this machine (typically behind a relay using
# consistent hashing), you'll need to list the ip address, cache query port, and instance name of each carbon-cache
# instance on the local machine (NOT every carbon-cache in the entire cluster). The default cache query port is 7002
# and a common scheme is to use 7102 for instance b, 7202 for instance c, etc.
#
# You *should* use 127.0.0.1 here in most cases
#CARBONLINK_HOSTS = ["127.0.0.1:7002:a", "127.0.0.1:7102:b", "127.0.0.1:7202:c"]
#CARBONLINK_TIMEOUT = 1.0
#####################################
# Additional Django Settings #
#####################################
# Uncomment the following line for direct access to Django settings such as
# MIDDLEWARE_CLASSES or APPS
#from graphite.app_settings import *
import os
os.environ.setdefault('LANG','en_US')
LOG_DIR = '/var/log/graphite'
SECRET_KEY = '$(date +%s | sha256sum | base64 | head -c 64)'
|
RevolutionMC/Revolution | refs/heads/master | plugin.video.PsychoTV/_ytplist.py | 173 | import urllib
import urllib2,json
import xbmcvfs
import requests,time
import os,xbmc,xbmcaddon,xbmcgui,re
addon = xbmcaddon.Addon('plugin.video.live.streamspro')
profile = xbmc.translatePath(addon.getAddonInfo('profile').decode('utf-8'))
cacheDir = os.path.join(profile, 'cachedir')
clean_cache=os.path.join(cacheDir,'cleancacheafter1month')
headers=dict({'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; rv:32.0) Gecko/20100101 Firefox/32.0'})
if not cacheDir.startswith(('smb://', 'nfs://', 'upnp://', 'ftp://')) and not os.path.isdir(cacheDir):
os.mkdir(cacheDir)
if xbmcvfs.exists(clean_cache) and (time.time()-os.path.getmtime(clean_cache) > 60*60*24*30):
print 'time of creation of ff',str(time.time()-os.path.getmtime(clean_cache))
import shutil
shutil.rmtree(cacheDir)
else:
with open(clean_cache,'w') as f:
f.write('')
utubeid = 'www.youtube.*?v(?:=|%3D)([0-9A-Za-z_-]{11})'
def YoUTube(page_data,youtube=None,duration=None,max_page=20,nosave=None):
pDialog = xbmcgui.DialogProgress()
pDialog.create('Updating list', 'Downloading ...')
base_yt_url ='http://gdata.youtube.com/feeds/api'
if 'search' in page_data:
youtube = youtube.replace(' ','+')#Lana Del Rey
build_url= base_yt_url + '/videos?q=%s&max-results=50&v=2&alt=json&orderby=published&start-index=%s'
if addon.getSetting('searchlongvideos') == 'true': #duration: #medium or long
build_url = base_yt_url + '/videos?q=%s&max-results=20&v=2&alt=json&duration=long&start-index=%s'
else:
build_url = 'http://www.youtube.com/watch?v=%s' %page_data
count = 1
allurls ={}
for i in range(1,max_page):
url = build_url %(youtube,str(count))
#print url
try:
content = cache(url,int(addon.getSetting("Youtube")))
print len(content)
jcontent = json.loads(content)
entry = jcontent['feed']['entry']
except Exception:
break
for myUrl in entry:
count += 1
allitem = 'item' + str(count)
item = {}
item['title']= removeNonAscii(myUrl['title']['$t']).encode('utf-8')
item['date']= myUrl['published']['$t'].encode('utf-8')
try:
item['desc']= removeNonAscii(myUrl['media$group']['media$description']['$t']).encode('utf-8')
except Exception:
desc = 'UNAVAIABLE'
link = myUrl['link'][0]['href'].encode('utf-8','ignore')
item['url']= re_me(link,utubeid)
allurls[allitem] = item
print len(allurls)
if nosave:
return allurls
pDialog.close()
def re_me(data, re_patten):
match = ''
m = re.search(re_patten, data,re.I)
if m != None:
match = m.group(1)
else:
match = ''
return match
def notification(header="", message="", sleep=3000):
""" Will display a notification dialog with the specified header and message,
in addition you can set the length of time it displays in milliseconds and a icon image.
"""
xbmc.executebuiltin("XBMC.Notification(%s,%s,%i)" % ( header, message, sleep ))
def removeNonAscii(s): return "".join(filter(lambda x: ord(x)<128, s))
def makeRequest(url,referer=None,post=None,body={}):
if referer:
headers.update=({'Referer':referer})
else:
req = urllib2.Request(url,None,headers)
response = urllib2.urlopen(req)
data = response.read()
response.close()
return data
# from AddonScriptorde X:\plugin.video.my_music_tv\default.py
def cache(url, duration=0):
cacheFile = os.path.join(cacheDir, (''.join(c for c in unicode(url, 'utf-8') if c not in '/\\:?"*|<>')).strip())
if os.path.exists(cacheFile) and duration!=0 and (time.time()-os.path.getmtime(cacheFile) < 60*60*24*duration):
fh = xbmcvfs.File(cacheFile, 'r')
content = fh.read()
fh.close()
return content
else:
content = makeRequest(url)
fh = xbmcvfs.File(cacheFile, 'w')
fh.write(content)
fh.close()
return content
|
dknlght/dkodi | refs/heads/master | src/plugin.video.animehere/servers/filebox.py | 35 | # -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Conector para filebox
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os
from core import scrapertools
from core import logger
from core import config
def test_video_exists( page_url ):
logger.info("[streamcloud.py] test_video_exists(page_url='%s')" % page_url)
data = scrapertools.cache_page( url = page_url )
if "<b>File Not Found</b>" in data:
return False,"El archivo no existe<br/>en filebox o ha sido borrado."
else:
return True,""
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
logger.info("[filebox.py] get_video_url(page_url='%s')" % page_url)
video_urls = []
'''
<input type="hidden" name="op" value="download2">
<input type="hidden" name="id" value="235812b1j9w1">
<input type="hidden" name="rand" value="na73zeeooqyfkndsv4uxzzpbajwi6mhbmixtogi">
<input type="hidden" name="referer" value="http://www.seriesyonkis.com/s/ngo/2/5/1/8/773">
'''
logger.info("[filebox.py] URL ")
data = scrapertools.cache_page(page_url)
# Espera los 5 segundos
try:
from platformcode.xbmc import xbmctools
xbmctools.handle_wait(5,"filebox",'')
except:
import time
time.sleep(5)
codigo = scrapertools.get_match(data,'<input type="hidden" name="id" value="([^"]+)">[^<]+')
rand = scrapertools.get_match(data,'<input type="hidden" name="rand" value="([^"]+)">')
#op=download2&id=xuquejiv6xdf&rand=r6dq7hn7so2ygpnxv2zg2i3cu3sbdsunf57gtni&referer=&method_free=&method_premium=&down_direct=1
post = "op=download2&id="+codigo+"&rand="+rand+"&referer=&method_free=&method_premium=&down_direct=1"
data = scrapertools.cache_page( page_url , post=post, headers=[['User-Agent','Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.8.1.14) Gecko/20080404 Firefox/2.0.0.14'],['Referer',page_url]] )
logger.info("data="+data)
media_url = scrapertools.get_match(data,"this.play\('([^']+)'")
video_urls.append( [ scrapertools.get_filename_from_url(media_url)[-4:]+" [filebox]",media_url])
for video_url in video_urls:
logger.info("[filebox.py] %s - %s" % (video_url[0],video_url[1]))
return video_urls
# Encuentra vídeos del servidor en el texto pasado
def find_videos(data):
encontrados = set()
devuelve = []
# http://www.filebox.com/embed-wa5p8wzh7tlq-700x385.html
patronvideos = 'filebox.com/embed-([0-9a-zA-Z]+)'
logger.info("[filebox.py] find_videos #"+patronvideos+"#")
matches = re.compile(patronvideos,re.DOTALL).findall(data)
for match in matches:
titulo = "[filebox]"
url = "http://www.filebox.com/"+match
if url not in encontrados:
logger.info(" url="+url)
devuelve.append( [ titulo , url , 'filebox' ] )
encontrados.add(url)
else:
logger.info(" url duplicada="+url)
# http://www.filebox.com/729x1eo9zrx1
patronvideos = 'filebox.com/([0-9a-zA-Z]+)'
logger.info("[filebox.py] find_videos #"+patronvideos+"#")
matches = re.compile(patronvideos,re.DOTALL).findall(data)
for match in matches:
titulo = "[filebox]"
url = "http://www.filebox.com/"+match
if url!="http://www.filebox.com/embed" and url not in encontrados:
logger.info(" url="+url)
devuelve.append( [ titulo , url , 'filebox' ] )
encontrados.add(url)
else:
logger.info(" url duplicada="+url)
return devuelve
def test():
video_urls = get_video_url("http://www.filebox.com/sstr2hlxt398")
return len(video_urls)>0
|
VisheshHanda/production_backup | refs/heads/master | erpnext/patches/repair_tools/set_stock_balance_as_per_serial_no.py | 90 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
def execute():
from erpnext.stock.stock_balance import set_stock_balance_as_per_serial_no
frappe.db.auto_commit_on_many_writes = 1
set_stock_balance_as_per_serial_no()
frappe.db.auto_commit_on_many_writes = 0
|
lhfei/spark-in-action | refs/heads/master | spark-2.x/src/main/python/ml/bisecting_k_means_example.py | 1 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
An example demonstrating bisecting k-means clustering.
Run with:
bin/spark-submit examples/src/main/python/ml/bisecting_k_means_example.py
"""
from __future__ import print_function
# $example on$
from pyspark.ml.clustering import BisectingKMeans
from pyspark.ml.evaluation import ClusteringEvaluator
# $example off$
from pyspark.sql import SparkSession
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("BisectingKMeansExample")\
.getOrCreate()
# $example on$
# Loads data.
dataset = spark.read.format("libsvm").load("data/mllib/sample_kmeans_data.txt")
# Trains a bisecting k-means model.
bkm = BisectingKMeans().setK(2).setSeed(1)
model = bkm.fit(dataset)
# Make predictions
predictions = model.transform(dataset)
# Evaluate clustering by computing Silhouette score
evaluator = ClusteringEvaluator()
silhouette = evaluator.evaluate(predictions)
print("Silhouette with squared euclidean distance = " + str(silhouette))
# Shows the result.
print("Cluster Centers: ")
centers = model.clusterCenters()
for center in centers:
print(center)
# $example off$
spark.stop()
|
SeattleTestbed/repy_v2 | refs/heads/master | emulfile.py | 1 | """
Author: Justin Cappos, Armon Dadgar
Start Date: 27 June 2008
V.2 Start Date: January 14th 2009
Description:
This is a collection of functions, etc. that need to be emulated in order
to provide the programmer with a reasonable environment. This is used
by repy.py to provide a highly restricted (but usable) environment.
"""
import nanny
# Used for path and file manipulation
import os
import os.path
# Used to handle a fatal exception
import tracebackrepy
# Used to get a lock object
import threading
# Get access to the current working directory
import repy_constants
# Import all the exceptions
from exception_hierarchy import *
# Fix for SeattleTestbed/attic#983.
# By retaining a reference to unicode, we prevent os.path.abspath from
# failing in some versions of python when the unicode builtin is overwritten.
os.path.unicode = unicode
# Store a reference to open, so that we retain access
# after the builtin's are disabled
safe_open = open
##### Constants
# This restricts the number of characters in filenames
MAX_FILENAME_LENGTH = 120
# This is the set of characters which are allowed in a file name
ALLOWED_FILENAME_CHAR_SET = set('abcdefghijklmnopqrstuvwxyz0123456789._-')
# This is the set of filenames which are forbidden.
ILLEGAL_FILENAMES = set(["", ".", ".."])
##### Module data
# This set contains the filenames of every file which is open
# Access to this set should be serialized via the OPEN_FILES_LOCK
OPEN_FILES_LOCK = threading.Lock()
OPEN_FILES = set([])
##### Public Functions
def listfiles():
"""
<Purpose>
Allows the user program to get a list of files in their vessel.
<Arguments>
None
<Exceptions>
None
<Side Effects>
None
<Resource Consumption>
Consumes 4K of fileread.
<Returns>
A list of strings (file names)
"""
# We will consume 4K of fileread
nanny.tattle_quantity('fileread', 4096)
# Get the list of files from the current directory
files = os.listdir(repy_constants.REPY_CURRENT_DIR)
# Return the files
return files
def removefile(filename):
"""
<Purpose>
Allows the user program to remove a file in their area.
<Arguments>
filename: the name of the file to remove. It must not contain
characters other than 'a-z0-9.-_' and cannot start with a period or
the empty string.
<Exceptions>
RepyArgumentError is raised if the filename is invalid.
FileInUseError is raised if the file is already open.
FileNotFoundError is raised if the file does not exist
<Side Effects>
None
<Resource Consumption>
Consumes 4K of fileread. If successful, also consumes 4K of filewrite.
<Returns>
None
"""
# raise an RepyArgumentError if the filename isn't valid
_assert_is_allowed_filename(filename)
OPEN_FILES_LOCK.acquire()
try:
# Check if the file is in use
if filename in OPEN_FILES:
raise FileInUseError('Cannot remove file "'+filename+'" because it is in use!')
# Get the absolute file name
absolute_filename = os.path.abspath(os.path.join(repy_constants.REPY_CURRENT_DIR, filename))
# Check if the file exists
nanny.tattle_quantity('fileread', 4096)
if not os.path.isfile(absolute_filename):
raise FileNotFoundError('Cannot remove non-existent file "'+filename+'".')
# Consume the filewrite resources
nanny.tattle_quantity('filewrite',4096)
# Remove the file (failure is an internal error)
os.remove(absolute_filename)
finally:
OPEN_FILES_LOCK.release()
def emulated_open(filename, create):
"""
<Purpose>
Allows the user program to open a file safely.
<Arguments>
filename:
The file that should be operated on. It must not contain characters
other than 'a-z0-9.-_' and cannot be '.', '..', the empty string or
begin with a period.
create:
A Boolean flag which specifies if the file should be created
if it does not exist.
<Exceptions>
RepyArgumentError is raised if the filename is invalid.
FileInUseError is raised if a handle to the file is already open.
ResourceExhaustedError is raised if there are no available file handles.
FileNotFoundError is raised if the filename is not found, and create is False.
<Side Effects>
Opens a file on disk, uses a file descriptor.
<Resource Consumption>
Consumes 4K of fileread. If the file is created, then 4K of filewrite is used.
If a handle to the object is created, then a file descriptor is used.
<Returns>
A file-like object.
"""
# Call directly into our private initializer
return emulated_file(filename, create)
##### Private functions
def _assert_is_allowed_filename(filename):
"""
<Purpose>
Private method to check if a filename is allowed.
<Arguments>
filename:
The filename to check.
<Exceptions>
Raises a RepyArgumentError if the filename is not allowed.
<Returns>
None
"""
# Check the type
if type(filename) is not str:
raise RepyArgumentError("Filename is not a string!")
# Check the length of the filename
if len(filename) > MAX_FILENAME_LENGTH:
raise RepyArgumentError("Filename exceeds maximum length! Maximum: "+str(MAX_FILENAME_LENGTH))
# Check if the filename is forbidden
if filename in ILLEGAL_FILENAMES:
raise RepyArgumentError("Illegal filename provided!")
# Check that each character in the filename is allowed
for char in filename:
if char not in ALLOWED_FILENAME_CHAR_SET:
raise RepyArgumentError("Filename has disallowed character '"+char+"'")
# Check to make sure the filename does not start with a period.
if filename.startswith('.'):
raise RepyArgumentError("Filename starts with a period, this is not allowed!")
# make a copy of the function.
check_repy_filename = _assert_is_allowed_filename
##### Class Definitions
class emulated_file (object):
"""
A safe class which enables a very primitive file interaction.
We only allow reading and writing at a provided index.
"""
# We use the following instance variables.
# filename is the name of the file we've opened,
# abs_filename is the absolute path to the file we've opened,
# and is the unique handle used to tattle the "filesopened" to nanny.
#
# fobj is the actual underlying file-object from python.
# seek_lock is a Lock object to serialize seeking
# size is the byte size of the file, to detect seeking past the end.
__slots__ = ["filename", "abs_filename", "fobj", "seek_lock", "filesize"]
def __init__(self, filename, create):
"""
This is an internal initializer. See emulated_open for details.
"""
# Initialize the fields, otherwise __del__ gets confused
# when we throw an exception. This was not a problem when the
# logic was in emulated_open, since we would never throw an
# exception
self.filename = filename
self.abs_filename = None
self.fobj = None
self.seek_lock = threading.Lock()
self.filesize = 0
# raise an RepyArgumentError if the filename isn't valid
_assert_is_allowed_filename(filename)
# Check the type of create
if type(create) is not bool:
raise RepyArgumentError("Create argument type is invalid! Must be a Boolean!")
OPEN_FILES_LOCK.acquire()
try:
# Check if the file is in use
if filename in OPEN_FILES:
raise FileInUseError('Cannot open file "'+filename+'" because it is already open!')
# Get the absolute file name
self.abs_filename = os.path.abspath(os.path.join(repy_constants.REPY_CURRENT_DIR, filename))
# Here is where we try to allocate a "file" resource from the
# nanny system. We will restore this below if there is an exception
# This may raise a ResourceExhautedError
nanny.tattle_add_item('filesopened', self.abs_filename)
# charge for checking if the file exists.
nanny.tattle_quantity('fileread', 4096)
exists = os.path.isfile(self.abs_filename)
# if there isn't a file already...
if not exists:
# if we shouldn't create it, it's an error
if not create:
raise FileNotFoundError('Cannot openfile non-existent file "'+filename+'" without creating it!')
# okay, we should create it...
nanny.tattle_quantity('filewrite', 4096)
safe_open(self.abs_filename, "w").close() # Forces file creation
# Store a file handle
# Always open in mode r+b, this avoids Windows text-mode
# quirks, and allows reading and writing
self.fobj = safe_open(self.abs_filename, "r+b")
# Add the filename to the open files
OPEN_FILES.add(filename)
# Get the file's size
self.filesize = os.path.getsize(self.abs_filename)
except RepyException:
# Restore the file handle we tattled
nanny.tattle_remove_item('filesopened', self.abs_filename)
raise
finally:
OPEN_FILES_LOCK.release()
def close(self):
"""
<Purpose>
Allows the user program to close the handle to the file.
<Arguments>
None.
<Exceptions>
FileClosedError is raised if the file is already closed.
<Resource Consumption>
Releases a file handle.
<Returns>
None.
"""
# Acquire the lock to the set
OPEN_FILES_LOCK.acquire()
# Tell nanny we're gone.
nanny.tattle_remove_item('filesopened', self.abs_filename)
# Acquire the seek lock
self.seek_lock.acquire()
try:
# Release the file object
fobj = self.fobj
if fobj is not None:
fobj.close()
self.fobj = None
else:
raise FileClosedError("File '"+str(self.filename)+"' is already closed!")
# Remove this file from the list of open files
OPEN_FILES.remove(self.filename)
finally:
# Release the two locks we hold
self.seek_lock.release()
OPEN_FILES_LOCK.release()
def readat(self,sizelimit,offset):
"""
<Purpose>
Reads from a file handle. Reading 0 bytes informs you if you have read
past the end-of-file, but returns no data.
<Arguments>
sizelimit:
The maximum number of bytes to read from the file. Reading EOF will
read less. By setting this value to None, the entire file is read.
offset:
Seek to a specific absolute offset before reading.
<Exceptions>
RepyArgumentError is raised if the offset or size is negative.
FileClosedError is raised if the file is already closed.
SeekPastEndOfFileError is raised if trying to read past the end of the file.
<Resource Consumption>
Consumes 4K of fileread for each 4K aligned-block of the file read.
All reads will consume at least 4K.
<Returns>
The data that was read. This may be the empty string if we have reached the
end of the file, or if the sizelimit was 0.
"""
# Check the arguments
if sizelimit < 0 and sizelimit != None:
raise RepyArgumentError("Negative sizelimit specified!")
if offset < 0:
raise RepyArgumentError("Negative read offset speficied!")
# Get the seek lock
self.seek_lock.acquire()
try:
# Get the underlying file object
fobj = self.fobj
if fobj is None:
raise FileClosedError("File '"+self.filename+"' is already closed!")
# Check the provided offset
if offset > self.filesize:
raise SeekPastEndOfFileError("Seek offset extends past the EOF!")
# Seek to the correct location
fobj.seek(offset)
# Wait for available file read resources
nanny.tattle_quantity('fileread',0)
if sizelimit != None:
# Read the data
data = fobj.read(sizelimit)
else:
# read all the data...
data = fobj.read()
finally:
# Release the seek lock
self.seek_lock.release()
# Check how much we've read, in terms of 4K "blocks"
end_offset = len(data) + offset
disk_blocks_read = end_offset / 4096 - offset / 4096
if end_offset % 4096 > 0:
disk_blocks_read += 1
# Charge 4K per block
nanny.tattle_quantity('fileread', disk_blocks_read*4096)
# Return the data
return data
def writeat(self,data,offset):
"""
<Purpose>
Allows the user program to write data to a file.
<Arguments>
data: The data to write
offset: An absolute offset into the file to write
<Exceptions>
RepyArgumentError is raised if the offset is negative or the data is not
a string.
FileClosedError is raised if the file is already closed.
SeekPastEndOfFileError is raised if trying to write past the EOF.
<Side Effects>
Writes to persistent storage.
<Resource Consumption>
Consumes 4K of filewrite for each 4K aligned-block of the file written.
All writes consume at least 4K.
<Returns>
Nothing
"""
# Check the arguments
if offset < 0:
raise RepyArgumentError("Negative read offset speficied!")
if type(data) is not str:
raise RepyArgumentError("Data must be specified as a string!")
# Get the seek lock
self.seek_lock.acquire()
try:
# Get the underlying file object
fobj = self.fobj
if fobj is None:
raise FileClosedError("File '"+self.filename+"' is already closed!")
# Check the provided offset
if offset > self.filesize:
raise SeekPastEndOfFileError("Seek offset extends past the EOF!")
# Seek to the correct location
fobj.seek(offset)
# Wait for available file write resources
nanny.tattle_quantity('filewrite',0)
# Write the data and flush to disk
fobj.write(data)
fobj.flush()
# Check if we expanded the file size
if offset + len(data) > self.filesize:
self.filesize = offset + len(data)
finally:
# Release the seek lock
self.seek_lock.release()
# Check how much we've written, in terms of 4K "blocks"
end_offset = len(data) + offset
disk_blocks_written = end_offset / 4096 - offset / 4096
if end_offset % 4096 > 0:
disk_blocks_written += 1
# Charge 4K per block
nanny.tattle_quantity('filewrite', disk_blocks_written*4096)
def __del__(self):
# this ensures that during interpreter cleanup, that the order of
# freed memory doesn't matter. If we don't have this, then
# OPEN_FILES_LOCK and other objects might get cleaned up first and cause
# the close call below to print an exception
if OPEN_FILES_LOCK == None:
return
# Make sure we are closed
try:
self.close()
except FileClosedError:
pass # Good, we are already closed.
# End of emulated_file class
|
ahamilton55/ansible | refs/heads/devel | lib/ansible/modules/network/f5/bigip_selfip.py | 59 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2016 F5 Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: bigip_selfip
short_description: Manage Self-IPs on a BIG-IP system
description:
- Manage Self-IPs on a BIG-IP system
version_added: "2.2"
options:
address:
description:
- The IP addresses for the new self IP. This value is ignored upon update
as addresses themselves cannot be changed after they are created.
allow_service:
description:
- Configure port lockdown for the Self IP. By default, the Self IP has a
"default deny" policy. This can be changed to allow TCP and UDP ports
as well as specific protocols. This list should contain C(protocol):C(port)
values.
name:
description:
- The self IP to create.
required: true
default: Value of C(address)
netmask:
description:
- The netmasks for the self IP.
required: true
state:
description:
- The state of the variable on the system. When C(present), guarantees
that the Self-IP exists with the provided attributes. When C(absent),
removes the Self-IP from the system.
required: false
default: present
choices:
- absent
- present
traffic_group:
description:
- The traffic group for the self IP addresses in an active-active,
redundant load balancer configuration.
required: false
vlan:
description:
- The VLAN that the new self IPs will be on.
required: true
route_domain:
description:
- The route domain id of the system.
If none, id of the route domain will be "0" (default route domain)
required: false
default: none
version_added: 2.3
notes:
- Requires the f5-sdk Python package on the host. This is as easy as pip
install f5-sdk.
- Requires the netaddr Python package on the host.
extends_documentation_fragment: f5
requirements:
- netaddr
- f5-sdk
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = '''
- name: Create Self IP
bigip_selfip:
address: "10.10.10.10"
name: "self1"
netmask: "255.255.255.0"
password: "secret"
server: "lb.mydomain.com"
user: "admin"
validate_certs: "no"
vlan: "vlan1"
delegate_to: localhost
- name: Create Self IP with a Route Domain
bigip_selfip:
server: "lb.mydomain.com"
user: "admin"
password: "secret"
validate_certs: "no"
name: "self1"
address: "10.10.10.10"
netmask: "255.255.255.0"
vlan: "vlan1"
route_domain: "10"
allow_service: "default"
delegate_to: localhost
- name: Delete Self IP
bigip_selfip:
name: "self1"
password: "secret"
server: "lb.mydomain.com"
state: "absent"
user: "admin"
validate_certs: "no"
delegate_to: localhost
- name: Allow management web UI to be accessed on this Self IP
bigip_selfip:
name: "self1"
password: "secret"
server: "lb.mydomain.com"
state: "absent"
user: "admin"
validate_certs: "no"
allow_service:
- "tcp:443"
delegate_to: localhost
- name: Allow HTTPS and SSH access to this Self IP
bigip_selfip:
name: "self1"
password: "secret"
server: "lb.mydomain.com"
state: "absent"
user: "admin"
validate_certs: "no"
allow_service:
- "tcp:443"
- "tpc:22"
delegate_to: localhost
- name: Allow all services access to this Self IP
bigip_selfip:
name: "self1"
password: "secret"
server: "lb.mydomain.com"
state: "absent"
user: "admin"
validate_certs: "no"
allow_service:
- all
delegate_to: localhost
- name: Allow only GRE and IGMP protocols access to this Self IP
bigip_selfip:
name: "self1"
password: "secret"
server: "lb.mydomain.com"
state: "absent"
user: "admin"
validate_certs: "no"
allow_service:
- gre:0
- igmp:0
delegate_to: localhost
- name: Allow all TCP, but no other protocols access to this Self IP
bigip_selfip:
name: "self1"
password: "secret"
server: "lb.mydomain.com"
state: "absent"
user: "admin"
validate_certs: "no"
allow_service:
- tcp:0
delegate_to: localhost
'''
RETURN = '''
allow_service:
description: Services that allowed via this Self IP
returned: changed
type: list
sample: ['igmp:0','tcp:22','udp:53']
address:
description: The address for the Self IP
returned: created
type: string
sample: "192.0.2.10"
name:
description: The name of the Self IP
returned: created, changed or deleted
type: string
sample: "self1"
netmask:
description: The netmask of the Self IP
returned: created or changed
type: string
sample: "255.255.255.0"
traffic_group:
description: The traffic group that the Self IP is a member of
returned: changed or created
type: string
sample: "traffic-group-local-only"
vlan:
description: The VLAN set on the Self IP
returned: created or changed
type: string
sample: "vlan1"
'''
try:
from f5.bigip import ManagementRoot
from icontrol.session import iControlUnexpectedHTTPError
HAS_F5SDK = True
except ImportError:
HAS_F5SDK = False
try:
from netaddr import IPNetwork, AddrFormatError
HAS_NETADDR = True
except ImportError:
HAS_NETADDR = False
FLOAT = ['enabled', 'disabled']
DEFAULT_TG = 'traffic-group-local-only'
ALLOWED_PROTOCOLS = ['eigrp', 'egp', 'gre', 'icmp', 'igmp', 'igp', 'ipip',
'l2tp', 'ospf', 'pim', 'tcp', 'udp']
class BigIpSelfIp(object):
def __init__(self, *args, **kwargs):
if not HAS_F5SDK:
raise F5ModuleError("The python f5-sdk module is required")
# The params that change in the module
self.cparams = dict()
# Stores the params that are sent to the module
self.params = kwargs
self.api = ManagementRoot(kwargs['server'],
kwargs['user'],
kwargs['password'],
port=kwargs['server_port'])
def present(self):
changed = False
if self.exists():
changed = self.update()
else:
changed = self.create()
return changed
def absent(self):
changed = False
if self.exists():
changed = self.delete()
return changed
def read(self):
"""Read information and transform it
The values that are returned by BIG-IP in the f5-sdk can have encoding
attached to them as well as be completely missing in some cases.
Therefore, this method will transform the data from the BIG-IP into a
format that is more easily consumable by the rest of the class and the
parameters that are supported by the module.
:return: List of values currently stored in BIG-IP, formatted for use
in this class.
"""
p = dict()
name = self.params['name']
partition = self.params['partition']
r = self.api.tm.net.selfips.selfip.load(
name=name,
partition=partition
)
if hasattr(r, 'address'):
p['route_domain'] = str(None)
if '%' in r.address:
ipaddr = []
ipaddr = r.address.split('%', 1)
rdmask = ipaddr[1].split('/', 1)
r.address = "%s/%s" % (ipaddr[0], rdmask[1])
p['route_domain'] = str(rdmask[0])
ipnet = IPNetwork(r.address)
p['address'] = str(ipnet.ip)
p['netmask'] = str(ipnet.netmask)
if hasattr(r, 'trafficGroup'):
p['traffic_group'] = str(r.trafficGroup)
if hasattr(r, 'vlan'):
p['vlan'] = str(r.vlan)
if hasattr(r, 'allowService'):
if r.allowService == 'all':
p['allow_service'] = set(['all'])
else:
p['allow_service'] = set([str(x) for x in r.allowService])
else:
p['allow_service'] = set(['none'])
p['name'] = name
return p
def verify_services(self):
"""Verifies that a supplied service string has correct format
The string format for port lockdown is PROTOCOL:PORT. This method
will verify that the provided input matches the allowed protocols
and the port ranges before submitting to BIG-IP.
The only allowed exceptions to this rule are the following values
* all
* default
* none
These are special cases that are handled differently in the API.
"all" is set as a string, "default" is set as a one item list, and
"none" removes the key entirely from the REST API.
:raises F5ModuleError:
"""
result = []
for svc in self.params['allow_service']:
if svc in ['all', 'none', 'default']:
result = [svc]
break
tmp = svc.split(':')
if tmp[0] not in ALLOWED_PROTOCOLS:
raise F5ModuleError(
"The provided protocol '%s' is invalid" % (tmp[0])
)
try:
port = int(tmp[1])
except Exception:
raise F5ModuleError(
"The provided port '%s' is not a number" % (tmp[1])
)
if port < 0 or port > 65535:
raise F5ModuleError(
"The provided port '%s' must be between 0 and 65535"
% (port)
)
else:
result.append(svc)
return set(result)
def fmt_services(self, services):
"""Returns services formatted for consumption by f5-sdk update
The BIG-IP endpoint for services takes different values depending on
what you want the "allowed services" to be. It can be any of the
following
- a list containing "protocol:port" values
- the string "all"
- a null value, or None
This is a convenience function to massage the values the user has
supplied so that they are formatted in such a way that BIG-IP will
accept them and apply the specified policy.
:param services: The services to format. This is always a Python set
:return:
"""
result = list(services)
if result[0] == 'all':
return 'all'
elif result[0] == 'none':
return None
else:
return list(services)
def traffic_groups(self):
result = []
groups = self.api.tm.cm.traffic_groups.get_collection()
for group in groups:
# Just checking for the addition of the partition here for
# different versions of BIG-IP
if '/' + self.params['partition'] + '/' in group.name:
result.append(group.name)
else:
full_name = '/%s/%s' % (self.params['partition'], group.name)
result.append(str(full_name))
return result
def update(self):
changed = False
svcs = []
params = dict()
current = self.read()
check_mode = self.params['check_mode']
address = self.params['address']
allow_service = self.params['allow_service']
name = self.params['name']
netmask = self.params['netmask']
partition = self.params['partition']
traffic_group = self.params['traffic_group']
vlan = self.params['vlan']
route_domain = self.params['route_domain']
if address is not None and address != current['address']:
raise F5ModuleError(
'Self IP addresses cannot be updated'
)
if netmask is not None:
# I ignore the address value here even if they provide it because
# you are not allowed to change it.
try:
address = IPNetwork(current['address'])
new_addr = "%s/%s" % (address.ip, netmask)
nipnet = IPNetwork(new_addr)
if route_domain is not None:
nipnet = "%s%s%s" % (address.ip, route_domain, netmask)
cur_addr = "%s/%s" % (current['address'], current['netmask'])
cipnet = IPNetwork(cur_addr)
if route_domain is not None:
cipnet = "%s%s%s" % (current['address'], current['route_domain'], current['netmask'])
if nipnet != cipnet:
if route_domain is not None:
address = "%s%s%s/%s" % (address.ip, '%', route_domain, netmask)
else:
address = "%s/%s" % (nipnet.ip, nipnet.prefixlen)
params['address'] = address
except AddrFormatError:
raise F5ModuleError(
'The provided address/netmask value was invalid'
)
if traffic_group is not None:
traffic_group = "/%s/%s" % (partition, traffic_group)
if traffic_group not in self.traffic_groups():
raise F5ModuleError(
'The specified traffic group was not found'
)
if 'traffic_group' in current:
if traffic_group != current['traffic_group']:
params['trafficGroup'] = traffic_group
else:
params['trafficGroup'] = traffic_group
if vlan is not None:
vlans = self.get_vlans()
vlan = "/%s/%s" % (partition, vlan)
if 'vlan' in current:
if vlan != current['vlan']:
params['vlan'] = vlan
else:
params['vlan'] = vlan
if vlan not in vlans:
raise F5ModuleError(
'The specified VLAN was not found'
)
if allow_service is not None:
svcs = self.verify_services()
if 'allow_service' in current:
if svcs != current['allow_service']:
params['allowService'] = self.fmt_services(svcs)
else:
params['allowService'] = self.fmt_services(svcs)
if params:
changed = True
params['name'] = name
params['partition'] = partition
if check_mode:
return changed
self.cparams = camel_dict_to_snake_dict(params)
if svcs:
self.cparams['allow_service'] = list(svcs)
else:
return changed
r = self.api.tm.net.selfips.selfip.load(
name=name,
partition=partition
)
r.update(**params)
r.refresh()
return True
def get_vlans(self):
"""Returns formatted list of VLANs
The VLAN values stored in BIG-IP are done so using their fully
qualified name which includes the partition. Therefore, "correct"
values according to BIG-IP look like this
/Common/vlan1
This is in contrast to the formats that most users think of VLANs
as being stored as
vlan1
To provide for the consistent user experience while not turfing
BIG-IP, we need to massage the values that are provided by the
user so that they include the partition.
:return: List of vlans formatted with preceding partition
"""
partition = self.params['partition']
vlans = self.api.tm.net.vlans.get_collection()
return [str("/" + partition + "/" + x.name) for x in vlans]
def create(self):
params = dict()
svcs = []
check_mode = self.params['check_mode']
address = self.params['address']
allow_service = self.params['allow_service']
name = self.params['name']
netmask = self.params['netmask']
partition = self.params['partition']
traffic_group = self.params['traffic_group']
vlan = self.params['vlan']
route_domain = self.params['route_domain']
if address is None or netmask is None:
raise F5ModuleError(
'An address and a netmask must be specififed'
)
if vlan is None:
raise F5ModuleError(
'A VLAN name must be specified'
)
else:
vlan = "/%s/%s" % (partition, vlan)
try:
ipin = "%s/%s" % (address, netmask)
ipnet = IPNetwork(ipin)
if route_domain is not None:
params['address'] = "%s%s%s/%s" % (ipnet.ip, '%', route_domain, ipnet.prefixlen)
else:
params['address'] = "%s/%s" % (ipnet.ip, ipnet.prefixlen)
except AddrFormatError:
raise F5ModuleError(
'The provided address/netmask value was invalid'
)
if traffic_group is None:
params['trafficGroup'] = "/%s/%s" % (partition, DEFAULT_TG)
else:
traffic_group = "/%s/%s" % (partition, traffic_group)
if traffic_group in self.traffic_groups():
params['trafficGroup'] = traffic_group
else:
raise F5ModuleError(
'The specified traffic group was not found'
)
vlans = self.get_vlans()
if vlan in vlans:
params['vlan'] = vlan
else:
raise F5ModuleError(
'The specified VLAN was not found'
)
if allow_service is not None:
svcs = self.verify_services()
params['allowService'] = self.fmt_services(svcs)
params['name'] = name
params['partition'] = partition
self.cparams = camel_dict_to_snake_dict(params)
if svcs:
self.cparams['allow_service'] = list(svcs)
if check_mode:
return True
d = self.api.tm.net.selfips.selfip
d.create(**params)
if self.exists():
return True
else:
raise F5ModuleError("Failed to create the self IP")
def delete(self):
params = dict()
check_mode = self.params['check_mode']
params['name'] = self.params['name']
params['partition'] = self.params['partition']
self.cparams = camel_dict_to_snake_dict(params)
if check_mode:
return True
dc = self.api.tm.net.selfips.selfip.load(**params)
dc.delete()
if self.exists():
raise F5ModuleError("Failed to delete the self IP")
return True
def exists(self):
name = self.params['name']
partition = self.params['partition']
return self.api.tm.net.selfips.selfip.exists(
name=name,
partition=partition
)
def flush(self):
result = dict()
state = self.params['state']
try:
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
result.update(**self.cparams)
result.update(dict(changed=changed))
return result
def main():
argument_spec = f5_argument_spec()
meta_args = dict(
address=dict(required=False, default=None),
allow_service=dict(type='list', default=None),
name=dict(required=True),
netmask=dict(required=False, default=None),
traffic_group=dict(required=False, default=None),
vlan=dict(required=False, default=None),
route_domain=dict(required=False, default=None)
)
argument_spec.update(meta_args)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
try:
if not HAS_NETADDR:
raise F5ModuleError(
"The netaddr python module is required."
)
obj = BigIpSelfIp(check_mode=module.check_mode, **module.params)
result = obj.flush()
module.exit_json(**result)
except F5ModuleError as e:
module.fail_json(msg=str(e))
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import camel_dict_to_snake_dict
from ansible.module_utils.f5_utils import *
if __name__ == '__main__':
main()
|
shead-custom-design/pipecat | refs/heads/master | pipecat/record.py | 1 | # Copyright 2016 Timothy M. Shead
#
# This file is part of Pipecat.
#
# Pipecat is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pipecat is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Pipecat. If not, see <http://www.gnu.org/licenses/>.
"""Functions for manipulating data records."""
from __future__ import absolute_import, division, print_function
import sys
import pipecat
def add_field(record, key, value):
"""Add a key-value pair to a record.
Parameters
----------
record: dict, required
Dictionary of key-value pairs that constitute a record.
key: :ref:`Record key <record-keys>`, required
Record key to be overwritten.
value: object
New record value.
"""
if key in record:
pipecat.log.warning("Overwriting %s=%s with %s=%s", key, record[key], key, value) # pragma: no cover
record[key] = value
def dump(record, fobj=sys.stdout):
"""Dump a human-readable text representation of a record to a file-like object.
Parameters
----------
record: dict, required
Dictionary of key-value pairs to be written-out.
fobj: file-like object, optional
"""
fields = [("/".join(key) if isinstance(key, tuple) else key, value) for key, value in record.items()]
fields = sorted(fields)
for key, value in fields:
fobj.write(u"%s: %s\n" % (key, value))
fobj.write(u"\n")
fobj.flush()
def remove_field(record, key):
"""Remove a key-value pair from a record.
Parameters
----------
record: dict, required
Dictionary of key-value pairs that constitute a record.
key: :ref:`Record key <record-keys>`, required
Record key to be removed.
"""
record.pop(key, None)
|
lichi6174/django-api-lab | refs/heads/master | src/posts/22.py | 2 | #!/usr/bin/env python
#encoding:utf-8
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
__author__ = 'lichi6174'
|
jojanper/draalcore | refs/heads/master | draalcore/auth/tests/test_templatetags.py | 1 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from mock import MagicMock
from django.conf import settings
from draalcore.auth.templatetags.tags import social_auth
from draalcore.test_utils.basetest import BaseTest
class TemplateTagsTestCase(BaseTest):
def test_social_auth(self):
"""Test user's social auth status"""
# No social auth in use
user = MagicMock(password='pw')
self.assertTrue(social_auth(user))
# Social auth in use
user.password = settings.SOCIAL_AUTH_USER_PASSWORD
self.assertFalse(social_auth(user))
|
ycl2045/nova-master | refs/heads/master | nova/openstack/common/processutils.py | 8 | # Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
System-level utilities and helper functions.
"""
import errno
import logging as stdlib_logging
import os
import random
import shlex
import signal
from eventlet.green import subprocess
from eventlet import greenthread
import six
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class InvalidArgumentError(Exception):
def __init__(self, message=None):
super(InvalidArgumentError, self).__init__(message)
class UnknownArgumentError(Exception):
def __init__(self, message=None):
super(UnknownArgumentError, self).__init__(message)
class ProcessExecutionError(Exception):
def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None,
description=None):
self.exit_code = exit_code
self.stderr = stderr
self.stdout = stdout
self.cmd = cmd
self.description = description
if description is None:
description = _("Unexpected error while running command.")
if exit_code is None:
exit_code = '-'
message = _('%(description)s\n'
'Command: %(cmd)s\n'
'Exit code: %(exit_code)s\n'
'Stdout: %(stdout)r\n'
'Stderr: %(stderr)r') % {'description': description,
'cmd': cmd,
'exit_code': exit_code,
'stdout': stdout,
'stderr': stderr}
super(ProcessExecutionError, self).__init__(message)
class NoRootWrapSpecified(Exception):
def __init__(self, message=None):
super(NoRootWrapSpecified, self).__init__(message)
def _subprocess_setup():
# Python installs a SIGPIPE handler by default. This is usually not what
# non-Python subprocesses expect.
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
def execute(*cmd, **kwargs):
"""Helper method to shell out and execute a command through subprocess.
Allows optional retry.
:param cmd: Passed to subprocess.Popen.
:type cmd: string
:param process_input: Send to opened process.
:type process_input: string
:param check_exit_code: Single bool, int, or list of allowed exit
codes. Defaults to [0]. Raise
:class:`ProcessExecutionError` unless
program exits with one of these code.
:type check_exit_code: boolean, int, or [int]
:param delay_on_retry: True | False. Defaults to True. If set to True,
wait a short amount of time before retrying.
:type delay_on_retry: boolean
:param attempts: How many times to retry cmd.
:type attempts: int
:param run_as_root: True | False. Defaults to False. If set to True,
the command is prefixed by the command specified
in the root_helper kwarg.
:type run_as_root: boolean
:param root_helper: command to prefix to commands called with
run_as_root=True
:type root_helper: string
:param shell: whether or not there should be a shell used to
execute this command. Defaults to false.
:type shell: boolean
:param loglevel: log level for execute commands.
:type loglevel: int. (Should be stdlib_logging.DEBUG or
stdlib_logging.INFO)
:returns: (stdout, stderr) from process execution
:raises: :class:`UnknownArgumentError` on
receiving unknown arguments
:raises: :class:`ProcessExecutionError`
"""
process_input = kwargs.pop('process_input', None)
check_exit_code = kwargs.pop('check_exit_code', [0])
ignore_exit_code = False
delay_on_retry = kwargs.pop('delay_on_retry', True)
attempts = kwargs.pop('attempts', 1)
run_as_root = kwargs.pop('run_as_root', False)
root_helper = kwargs.pop('root_helper', '')
shell = kwargs.pop('shell', False)
loglevel = kwargs.pop('loglevel', stdlib_logging.DEBUG)
if isinstance(check_exit_code, bool):
ignore_exit_code = not check_exit_code
check_exit_code = [0]
elif isinstance(check_exit_code, int):
check_exit_code = [check_exit_code]
if kwargs:
raise UnknownArgumentError(_('Got unknown keyword args '
'to utils.execute: %r') % kwargs)
if run_as_root and hasattr(os, 'geteuid') and os.geteuid() != 0:
if not root_helper:
raise NoRootWrapSpecified(
message=_('Command requested root, but did not '
'specify a root helper.'))
cmd = shlex.split(root_helper) + list(cmd)
cmd = map(str, cmd)
while attempts > 0:
attempts -= 1
try:
LOG.log(loglevel, _('Running cmd (subprocess): %s'), ' '.join(cmd))
_PIPE = subprocess.PIPE # pylint: disable=E1101
if os.name == 'nt':
preexec_fn = None
close_fds = False
else:
preexec_fn = _subprocess_setup
close_fds = True
obj = subprocess.Popen(cmd,
stdin=_PIPE,
stdout=_PIPE,
stderr=_PIPE,
close_fds=close_fds,
preexec_fn=preexec_fn,
shell=shell)
result = None
for _i in six.moves.range(20):
# NOTE(russellb) 20 is an arbitrary number of retries to
# prevent any chance of looping forever here.
try:
if process_input is not None:
result = obj.communicate(process_input)
else:
result = obj.communicate()
except OSError as e:
if e.errno in (errno.EAGAIN, errno.EINTR):
continue
raise
break
obj.stdin.close() # pylint: disable=E1101
_returncode = obj.returncode # pylint: disable=E1101
LOG.log(loglevel, _('Result was %s') % _returncode)
if not ignore_exit_code and _returncode not in check_exit_code:
(stdout, stderr) = result
raise ProcessExecutionError(exit_code=_returncode,
stdout=stdout,
stderr=stderr,
cmd=' '.join(cmd))
return result
except ProcessExecutionError:
if not attempts:
raise
else:
LOG.log(loglevel, _('%r failed. Retrying.'), cmd)
if delay_on_retry:
greenthread.sleep(random.randint(20, 200) / 100.0)
finally:
# NOTE(termie): this appears to be necessary to let the subprocess
# call clean something up in between calls, without
# it two execute calls in a row hangs the second one
greenthread.sleep(0)
def trycmd(*args, **kwargs):
"""A wrapper around execute() to more easily handle warnings and errors.
Returns an (out, err) tuple of strings containing the output of
the command's stdout and stderr. If 'err' is not empty then the
command can be considered to have failed.
:discard_warnings True | False. Defaults to False. If set to True,
then for succeeding commands, stderr is cleared
"""
discard_warnings = kwargs.pop('discard_warnings', False)
try:
out, err = execute(*args, **kwargs)
failed = False
except ProcessExecutionError as exn:
out, err = '', str(exn)
failed = True
if not failed and discard_warnings and err:
# Handle commands that output to stderr but otherwise succeed
err = ''
return out, err
def ssh_execute(ssh, cmd, process_input=None,
addl_env=None, check_exit_code=True):
LOG.debug(_('Running cmd (SSH): %s'), cmd)
if addl_env:
raise InvalidArgumentError(_('Environment not supported over SSH'))
if process_input:
# This is (probably) fixable if we need it...
raise InvalidArgumentError(_('process_input not supported over SSH'))
stdin_stream, stdout_stream, stderr_stream = ssh.exec_command(cmd)
channel = stdout_stream.channel
# NOTE(justinsb): This seems suspicious...
# ...other SSH clients have buffering issues with this approach
stdout = stdout_stream.read()
stderr = stderr_stream.read()
stdin_stream.close()
exit_status = channel.recv_exit_status()
# exit_status == -1 if no exit code was returned
if exit_status != -1:
LOG.debug(_('Result was %s') % exit_status)
if check_exit_code and exit_status != 0:
raise ProcessExecutionError(exit_code=exit_status,
stdout=stdout,
stderr=stderr,
cmd=cmd)
return (stdout, stderr)
|
lukas-hetzenecker/home-assistant | refs/heads/dev | homeassistant/components/notify/nma.py | 2 | """
NMA (Notify My Android) notification service.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/notify.nma/
"""
import logging
import xml.etree.ElementTree as ET
import requests
import voluptuous as vol
from homeassistant.components.notify import (
ATTR_TITLE, ATTR_TITLE_DEFAULT, PLATFORM_SCHEMA, BaseNotificationService)
from homeassistant.const import CONF_API_KEY
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
_RESOURCE = 'https://www.notifymyandroid.com/publicapi/'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_API_KEY): cv.string,
})
def get_service(hass, config):
"""Get the NMA notification service."""
response = requests.get(_RESOURCE + 'verify',
params={"apikey": config[CONF_API_KEY]})
tree = ET.fromstring(response.content)
if tree[0].tag == 'error':
_LOGGER.error("Wrong API key supplied. %s", tree[0].text)
return None
return NmaNotificationService(config[CONF_API_KEY])
class NmaNotificationService(BaseNotificationService):
"""Implement the notification service for NMA."""
def __init__(self, api_key):
"""Initialize the service."""
self._api_key = api_key
def send_message(self, message="", **kwargs):
"""Send a message to a user."""
data = {
"apikey": self._api_key,
"application": 'home-assistant',
"event": kwargs.get(ATTR_TITLE, ATTR_TITLE_DEFAULT),
"description": message,
"priority": 0,
}
response = requests.get(_RESOURCE + 'notify', params=data)
tree = ET.fromstring(response.content)
if tree[0].tag == 'error':
_LOGGER.exception(
"Unable to perform request. Error: %s", tree[0].text)
|
jk1/intellij-community | refs/heads/master | python/testData/intentions/paramTypeInNumpyDocStringCombinedParams.py | 52 | def f(x, <caret>y, z):
"""
Parameters
----------
x, y, z
Description
""" |
gonzolino/heat | refs/heads/master | heat_integrationtests/scenario/test_server_software_config.py | 4 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heatclient.common import template_utils
import six
from heat_integrationtests.common import exceptions
from heat_integrationtests.scenario import scenario_base
CFG1_SH = '''#!/bin/sh
echo "Writing to /tmp/$bar"
echo $foo > /tmp/$bar
echo -n "The file /tmp/$bar contains `cat /tmp/$bar` for server \
$deploy_server_id during $deploy_action" > $heat_outputs_path.result
echo "Written to /tmp/$bar"
echo "Output to stderr" 1>&2
'''
CFG3_PP = '''file {'barfile':
ensure => file,
mode => 0644,
path => "/tmp/$::bar",
content => "$::foo",
}
file {'output_result':
ensure => file,
path => "$::heat_outputs_path.result",
mode => 0644,
content => "The file /tmp/$::bar contains $::foo for server \
$::deploy_server_id during $::deploy_action",
}'''
class SoftwareConfigIntegrationTest(scenario_base.ScenarioTestsBase):
def setUp(self):
super(SoftwareConfigIntegrationTest, self).setUp()
self.stack_name = self._stack_rand_name()
def check_stack(self):
sid = self.stack_identifier
# Check that all stack resources were created
for res in ('cfg2a', 'cfg2b', 'cfg1', 'cfg3', 'server'):
self._wait_for_resource_status(
sid, res, 'CREATE_COMPLETE')
server_resource = self.client.resources.get(sid, 'server')
server_id = server_resource.physical_resource_id
server = self.compute_client.servers.get(server_id)
# Waiting for each deployment to contribute their
# config to resource
try:
for res in ('dep2b', 'dep1', 'dep3'):
self._wait_for_resource_status(
sid, res, 'CREATE_IN_PROGRESS')
server_metadata = self.client.resources.metadata(
sid, 'server')
deployments = dict((d['name'], d) for d in
server_metadata['deployments'])
for res in ('dep2a', 'dep2b', 'dep1', 'dep3'):
self._wait_for_resource_status(
sid, res, 'CREATE_COMPLETE')
except (exceptions.StackResourceBuildErrorException,
exceptions.TimeoutException) as e:
raise e
finally:
# attempt to log the server console regardless of deployments
# going to complete. This allows successful and failed boot
# logs to be compared
self._log_console_output(servers=[server])
complete_server_metadata = self.client.resources.metadata(
sid, 'server')
# Ensure any previously available deployments haven't changed so
# config isn't re-triggered
complete_deployments = dict((d['name'], d) for d in
complete_server_metadata['deployments'])
for k, v in six.iteritems(deployments):
self.assertEqual(v, complete_deployments[k])
stack = self.client.stacks.get(sid)
res1 = self._stack_output(stack, 'res1')
self.assertEqual(
'The file %s contains %s for server %s during %s' % (
'/tmp/baaaaa', 'fooooo', server_id, 'CREATE'),
res1['result'])
self.assertEqual(0, res1['status_code'])
self.assertEqual('Output to stderr\n', res1['stderr'])
self.assertTrue(len(res1['stdout']) > 0)
res2 = self._stack_output(stack, 'res2')
self.assertEqual(
'The file %s contains %s for server %s during %s' % (
'/tmp/cfn-init-foo', 'barrr', server_id, 'CREATE'),
res2['result'])
self.assertEqual(0, res2['status_code'])
self.assertEqual('', res2['stderr'])
self.assertEqual('', res2['stdout'])
res3 = self._stack_output(stack, 'res3')
self.assertEqual(
'The file %s contains %s for server %s during %s' % (
'/tmp/ba', 'fo', server_id, 'CREATE'),
res3['result'])
self.assertEqual(0, res3['status_code'])
self.assertEqual('', res3['stderr'])
self.assertTrue(len(res1['stdout']) > 0)
dep1_resource = self.client.resources.get(sid, 'dep1')
dep1_id = dep1_resource.physical_resource_id
dep1_dep = self.client.software_deployments.get(dep1_id)
if hasattr(dep1_dep, 'updated_time'):
# Only check updated_time if the attribute exists.
# This allows latest heat agent code to be tested with
# Juno heat (which doesn't expose updated_time)
self.assertIsNotNone(dep1_dep.updated_time)
self.assertNotEqual(
dep1_dep.updated_time,
dep1_dep.creation_time)
def test_server_software_config(self):
"""Check that passed files with scripts are executed on created server.
The alternative scenario is the following:
1. Create a stack and pass files with scripts.
2. Check that all stack resources are created successfully.
3. Wait for all deployments.
4. Check that stack was created.
5. Check stack outputs.
"""
parameters = {
'key_name': self.keypair_name,
'flavor': self.conf.instance_type,
'image': self.conf.image_ref,
'network': self.net['id']
}
files = {
'cfg1.sh': CFG1_SH,
'cfg3.pp': CFG3_PP
}
env_files, env = template_utils.process_environment_and_files(
self.conf.boot_config_env)
# Launch stack
self.stack_identifier = self.launch_stack(
stack_name=self.stack_name,
template_name='test_server_software_config.yaml',
parameters=parameters,
files=dict(list(files.items()) + list(env_files.items())),
expected_status=None,
environment=env
)
# Check stack
self.check_stack()
|
ff-/pineal | refs/heads/master | thirdparty/OSC.py | 10 | #!/usr/bin/python
"""
This module contains an OpenSoundControl implementation (in Pure Python), based
(somewhat) on the good old 'SimpleOSC' implementation by Daniel Holth & Clinton
McChesney.
This implementation is intended to still be 'simple' to the user, but much more
complete (with OSCServer & OSCClient classes) and much more powerful (the
OSCMultiClient supports subscriptions & message-filtering, OSCMessage &
OSCBundle are now proper container-types)
===============================================================================
OpenSoundControl
===============================================================================
OpenSoundControl is a network-protocol for sending (small) packets of addressed
data over network sockets. This OSC-implementation supports the classical
UDP/IP protocol for sending and receiving packets but provides as well support
for TCP/IP streaming, whereas the message size is prepended as int32 (big
endian) before each message/packet.
OSC-packets come in two kinds:
- OSC-messages consist of an 'address'-string (not to be confused with a
(host:port) network-address!), followed by a string of 'typetags'
associated with the message's arguments (ie. 'payload'), and finally the
arguments themselves, encoded in an OSC-specific way. The OSCMessage class
makes it easy to create & manipulate OSC-messages of this kind in a
'pythonesque' way (that is, OSCMessage-objects behave a lot like lists)
- OSC-bundles are a special type of OSC-message containing only
OSC-messages as 'payload'. Recursively. (meaning; an OSC-bundle could
contain other OSC-bundles, containing OSC-bundles etc.)
OSC-bundles start with the special keyword '#bundle' and do not have an
OSC-address (but the OSC-messages a bundle contains will have OSC-addresses!).
Also, an OSC-bundle can have a timetag, essentially telling the receiving
server to 'hold' the bundle until the specified time. The OSCBundle class
allows easy cration & manipulation of OSC-bundles.
For further information see also http://opensoundcontrol.org/spec-1_0
-------------------------------------------------------------------------------
To send OSC-messages, you need an OSCClient, and to receive OSC-messages you
need an OSCServer.
The OSCClient uses an 'AF_INET / SOCK_DGRAM' type socket (see the 'socket'
module) to send binary representations of OSC-messages to a remote host:port
address.
The OSCServer listens on an 'AF_INET / SOCK_DGRAM' type socket bound to a local
port, and handles incoming requests. Either one-after-the-other (OSCServer) or
in a multi-threaded / multi-process fashion (ThreadingOSCServer/
ForkingOSCServer). If the Server has a callback-function (a.k.a. handler)
registered to 'deal with' (i.e. handle) the received message's OSC-address,
that function is called, passing it the (decoded) message.
The different OSCServers implemented here all support the (recursive) un-
bundling of OSC-bundles, and OSC-bundle timetags.
In fact, this implementation supports:
- OSC-messages with 'i' (int32), 'f' (float32), 'd' (double), 's' (string) and
'b' (blob / binary data) types
- OSC-bundles, including timetag-support
- OSC-address patterns including '*', '?', '{,}' and '[]' wildcards.
(please *do* read the OSC-spec! http://opensoundcontrol.org/spec-1_0 it
explains what these things mean.)
In addition, the OSCMultiClient supports:
- Sending a specific OSC-message to multiple remote servers
- Remote server subscription / unsubscription (through OSC-messages, of course)
- Message-address filtering.
-------------------------------------------------------------------------------
SimpleOSC:
Copyright (c) Daniel Holth & Clinton McChesney.
pyOSC:
Copyright (c) 2008-2010, Artem Baguinski <artm@v2.nl> et al., Stock, V2_Lab, Rotterdam, Netherlands.
Streaming support (OSC over TCP):
Copyright (c) 2010 Uli Franke <uli.franke@weiss.ch>, Weiss Engineering, Uster, Switzerland.
-------------------------------------------------------------------------------
Changelog:
-------------------------------------------------------------------------------
v0.3.0 - 27 Dec. 2007
Started out to extend the 'SimpleOSC' implementation (v0.2.3) by Daniel Holth & Clinton McChesney.
Rewrote OSCMessage
Added OSCBundle
v0.3.1 - 3 Jan. 2008
Added OSClient
Added OSCRequestHandler, loosely based on the original CallbackManager
Added OSCServer
Removed original CallbackManager
Adapted testing-script (the 'if __name__ == "__main__":' block at the end) to use new Server & Client
v0.3.2 - 5 Jan. 2008
Added 'container-type emulation' methods (getitem(), setitem(), __iter__() & friends) to OSCMessage
Added ThreadingOSCServer & ForkingOSCServer
- 6 Jan. 2008
Added OSCMultiClient
Added command-line options to testing-script (try 'python OSC.py --help')
v0.3.3 - 9 Jan. 2008
Added OSC-timetag support to OSCBundle & OSCRequestHandler
Added ThreadingOSCRequestHandler
v0.3.4 - 13 Jan. 2008
Added message-filtering to OSCMultiClient
Added subscription-handler to OSCServer
Added support fon numpy/scipy int & float types. (these get converted to 'standard' 32-bit OSC ints / floats!)
Cleaned-up and added more Docstrings
v0.3.5 - 14 aug. 2008
Added OSCServer.reportErr(...) method
v0.3.6 - 19 April 2010
Added Streaming support (OSC over TCP)
Updated documentation
Moved pattern matching stuff into separate class (OSCAddressSpace) to
facilitate implementation of different server and client architectures.
Callbacks feature now a context (object oriented) but dynamic function
inspection keeps the code backward compatible
Moved testing code into separate testbench (testbench.py)
-----------------
Original Comments
-----------------
> Open SoundControl for Python
> Copyright (C) 2002 Daniel Holth, Clinton McChesney
>
> This library is free software; you can redistribute it and/or modify it under
> the terms of the GNU Lesser General Public License as published by the Free
> Software Foundation; either version 2.1 of the License, or (at your option) any
> later version.
>
> This library is distributed in the hope that it will be useful, but WITHOUT ANY
> WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
> PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
> details.
>
> You should have received a copy of the GNU Lesser General Public License along
> with this library; if not, write to the Free Software Foundation, Inc., 59
> Temple Place, Suite 330, Boston, MA 02111-1307 USA
>
> For questions regarding this module contact Daniel Holth <dholth@stetson.edu>
> or visit http://www.stetson.edu/~ProctoLogic/
>
> Changelog:
> 15 Nov. 2001:
> Removed dependency on Python 2.0 features.
> - dwh
> 13 Feb. 2002:
> Added a generic callback handler.
> - dwh
"""
import math, re, socket, select, string, struct, sys, threading, time, types, array, errno, inspect
from SocketServer import UDPServer, DatagramRequestHandler, ForkingMixIn, ThreadingMixIn, StreamRequestHandler, TCPServer
from contextlib import closing
global version
version = ("0.3","6", "$Rev: 6382 $"[6:-2])
global FloatTypes
FloatTypes = [types.FloatType]
global IntTypes
IntTypes = [types.IntType]
global NTP_epoch
from calendar import timegm
NTP_epoch = timegm((1900,1,1,0,0,0)) # NTP time started in 1 Jan 1900
del timegm
global NTP_units_per_second
NTP_units_per_second = 0x100000000 # about 232 picoseconds
##
# numpy/scipy support:
##
try:
from numpy import typeDict
for ftype in ['float32', 'float64', 'float128']:
try:
FloatTypes.append(typeDict[ftype])
except KeyError:
pass
for itype in ['int8', 'int16', 'int32', 'int64']:
try:
IntTypes.append(typeDict[itype])
IntTypes.append(typeDict['u' + itype])
except KeyError:
pass
# thanks for those...
del typeDict, ftype, itype
except ImportError:
pass
######
#
# OSCMessage classes
#
######
class OSCMessage(object):
""" Builds typetagged OSC messages.
OSCMessage objects are container objects for building OSC-messages.
On the 'front' end, they behave much like list-objects, and on the 'back' end
they generate a binary representation of the message, which can be sent over a network socket.
OSC-messages consist of an 'address'-string (not to be confused with a (host, port) IP-address!),
followed by a string of 'typetags' associated with the message's arguments (ie. 'payload'),
and finally the arguments themselves, encoded in an OSC-specific way.
On the Python end, OSCMessage are lists of arguments, prepended by the message's address.
The message contents can be manipulated much like a list:
>>> msg = OSCMessage("/my/osc/address")
>>> msg.append('something')
>>> msg.insert(0, 'something else')
>>> msg[1] = 'entirely'
>>> msg.extend([1,2,3.])
>>> msg += [4, 5, 6.]
>>> del msg[3:6]
>>> msg.pop(-2)
5
>>> print msg
/my/osc/address ['something else', 'entirely', 1, 6.0]
OSCMessages can be concatenated with the + operator. In this case, the resulting OSCMessage
inherits its address from the left-hand operand. The right-hand operand's address is ignored.
To construct an 'OSC-bundle' from multiple OSCMessage, see OSCBundle!
Additional methods exist for retreiving typetags or manipulating items as (typetag, value) tuples.
"""
def __init__(self, address="", *args):
"""Instantiate a new OSCMessage.
The OSC-address can be specified with the 'address' argument.
The rest of the arguments are appended as data.
"""
self.clear(address)
if len(args)>0:
self.append(*args)
def setAddress(self, address):
"""Set or change the OSC-address
"""
self.address = address
def clear(self, address=""):
"""Clear (or set a new) OSC-address and clear any arguments appended so far
"""
self.address = address
self.clearData()
def clearData(self):
"""Clear any arguments appended so far
"""
self.typetags = ","
self.message = ""
def append(self, argument, typehint=None):
"""Appends data to the message, updating the typetags based on
the argument's type. If the argument is a blob (counted
string) pass in 'b' as typehint.
'argument' may also be a list or tuple, in which case its elements
will get appended one-by-one, all using the provided typehint
"""
if type(argument) == types.DictType:
argument = argument.items()
elif isinstance(argument, OSCMessage):
raise TypeError("Can only append 'OSCMessage' to 'OSCBundle'")
if hasattr(argument, '__iter__'):
for arg in argument:
self.append(arg, typehint)
return
if typehint == 'b':
binary = OSCBlob(argument)
tag = 'b'
elif typehint == 't':
binary = OSCTimeTag(argument)
tag = 't'
else:
tag, binary = OSCArgument(argument, typehint)
self.typetags += tag
self.message += binary
def getBinary(self):
"""Returns the binary representation of the message
"""
binary = OSCString(self.address)
binary += OSCString(self.typetags)
binary += self.message
return binary
def __repr__(self):
"""Returns a string containing the decode Message
"""
return str(decodeOSC(self.getBinary()))
def __str__(self):
"""Returns the Message's address and contents as a string.
"""
return "%s %s" % (self.address, str(self.values()))
def __len__(self):
"""Returns the number of arguments appended so far
"""
return (len(self.typetags) - 1)
def __eq__(self, other):
"""Return True if two OSCMessages have the same address & content
"""
if not isinstance(other, self.__class__):
return False
return (self.address == other.address) and (self.typetags == other.typetags) and (self.message == other.message)
def __ne__(self, other):
"""Return (not self.__eq__(other))
"""
return not self.__eq__(other)
def __add__(self, values):
"""Returns a copy of self, with the contents of 'values' appended
(see the 'extend()' method, below)
"""
msg = self.copy()
msg.extend(values)
return msg
def __iadd__(self, values):
"""Appends the contents of 'values'
(equivalent to 'extend()', below)
Returns self
"""
self.extend(values)
return self
def __radd__(self, values):
"""Appends the contents of this OSCMessage to 'values'
Returns the extended 'values' (list or tuple)
"""
out = list(values)
out.extend(self.values())
if type(values) == types.TupleType:
return tuple(out)
return out
def _reencode(self, items):
"""Erase & rebuild the OSCMessage contents from the given
list of (typehint, value) tuples"""
self.clearData()
for item in items:
self.append(item[1], item[0])
def values(self):
"""Returns a list of the arguments appended so far
"""
return decodeOSC(self.getBinary())[2:]
def tags(self):
"""Returns a list of typetags of the appended arguments
"""
return list(self.typetags.lstrip(','))
def items(self):
"""Returns a list of (typetag, value) tuples for
the arguments appended so far
"""
out = []
values = self.values()
typetags = self.tags()
for i in range(len(values)):
out.append((typetags[i], values[i]))
return out
def __contains__(self, val):
"""Test if the given value appears in the OSCMessage's arguments
"""
return (val in self.values())
def __getitem__(self, i):
"""Returns the indicated argument (or slice)
"""
return self.values()[i]
def __delitem__(self, i):
"""Removes the indicated argument (or slice)
"""
items = self.items()
del items[i]
self._reencode(items)
def _buildItemList(self, values, typehint=None):
if isinstance(values, OSCMessage):
items = values.items()
elif type(values) == types.ListType:
items = []
for val in values:
if type(val) == types.TupleType:
items.append(val[:2])
else:
items.append((typehint, val))
elif type(values) == types.TupleType:
items = [values[:2]]
else:
items = [(typehint, values)]
return items
def __setitem__(self, i, val):
"""Set indicatated argument (or slice) to a new value.
'val' can be a single int/float/string, or a (typehint, value) tuple.
Or, if 'i' is a slice, a list of these or another OSCMessage.
"""
items = self.items()
new_items = self._buildItemList(val)
if type(i) != types.SliceType:
if len(new_items) != 1:
raise TypeError("single-item assignment expects a single value or a (typetag, value) tuple")
new_items = new_items[0]
# finally...
items[i] = new_items
self._reencode(items)
def setItem(self, i, val, typehint=None):
"""Set indicated argument to a new value (with typehint)
"""
items = self.items()
items[i] = (typehint, val)
self._reencode(items)
def copy(self):
"""Returns a deep copy of this OSCMessage
"""
msg = self.__class__(self.address)
msg.typetags = self.typetags
msg.message = self.message
return msg
def count(self, val):
"""Returns the number of times the given value occurs in the OSCMessage's arguments
"""
return self.values().count(val)
def index(self, val):
"""Returns the index of the first occurence of the given value in the OSCMessage's arguments.
Raises ValueError if val isn't found
"""
return self.values().index(val)
def extend(self, values):
"""Append the contents of 'values' to this OSCMessage.
'values' can be another OSCMessage, or a list/tuple of ints/floats/strings
"""
items = self.items() + self._buildItemList(values)
self._reencode(items)
def insert(self, i, val, typehint = None):
"""Insert given value (with optional typehint) into the OSCMessage
at the given index.
"""
items = self.items()
for item in reversed(self._buildItemList(val)):
items.insert(i, item)
self._reencode(items)
def popitem(self, i):
"""Delete the indicated argument from the OSCMessage, and return it
as a (typetag, value) tuple.
"""
items = self.items()
item = items.pop(i)
self._reencode(items)
return item
def pop(self, i):
"""Delete the indicated argument from the OSCMessage, and return it.
"""
return self.popitem(i)[1]
def reverse(self):
"""Reverses the arguments of the OSCMessage (in place)
"""
items = self.items()
items.reverse()
self._reencode(items)
def remove(self, val):
"""Removes the first argument with the given value from the OSCMessage.
Raises ValueError if val isn't found.
"""
items = self.items()
# this is not very efficient...
i = 0
for (t, v) in items:
if (v == val):
break
i += 1
else:
raise ValueError("'%s' not in OSCMessage" % str(m))
# but more efficient than first calling self.values().index(val),
# then calling self.items(), which would in turn call self.values() again...
del items[i]
self._reencode(items)
def __iter__(self):
"""Returns an iterator of the OSCMessage's arguments
"""
return iter(self.values())
def __reversed__(self):
"""Returns a reverse iterator of the OSCMessage's arguments
"""
return reversed(self.values())
def itervalues(self):
"""Returns an iterator of the OSCMessage's arguments
"""
return iter(self.values())
def iteritems(self):
"""Returns an iterator of the OSCMessage's arguments as
(typetag, value) tuples
"""
return iter(self.items())
def itertags(self):
"""Returns an iterator of the OSCMessage's arguments' typetags
"""
return iter(self.tags())
class OSCBundle(OSCMessage):
"""Builds a 'bundle' of OSC messages.
OSCBundle objects are container objects for building OSC-bundles of OSC-messages.
An OSC-bundle is a special kind of OSC-message which contains a list of OSC-messages
(And yes, OSC-bundles may contain other OSC-bundles...)
OSCBundle objects behave much the same as OSCMessage objects, with these exceptions:
- if an item or items to be appended or inserted are not OSCMessage objects,
OSCMessage objectss are created to encapsulate the item(s)
- an OSC-bundle does not have an address of its own, only the contained OSC-messages do.
The OSCBundle's 'address' is inherited by any OSCMessage the OSCBundle object creates.
- OSC-bundles have a timetag to tell the receiver when the bundle should be processed.
The default timetag value (0) means 'immediately'
"""
def __init__(self, address="", time=0):
"""Instantiate a new OSCBundle.
The default OSC-address for newly created OSCMessages
can be specified with the 'address' argument
The bundle's timetag can be set with the 'time' argument
"""
super(OSCBundle, self).__init__(address)
self.timetag = time
def __str__(self):
"""Returns the Bundle's contents (and timetag, if nonzero) as a string.
"""
if (self.timetag > 0.):
out = "#bundle (%s) [" % self.getTimeTagStr()
else:
out = "#bundle ["
if self.__len__():
for val in self.values():
out += "%s, " % str(val)
out = out[:-2] # strip trailing space and comma
return out + "]"
def setTimeTag(self, time):
"""Set or change the OSCBundle's TimeTag
In 'Python Time', that's floating seconds since the Epoch
"""
if time >= 0:
self.timetag = time
def getTimeTagStr(self):
"""Return the TimeTag as a human-readable string
"""
fract, secs = math.modf(self.timetag)
out = time.ctime(secs)[11:19]
out += ("%.3f" % fract)[1:]
return out
def append(self, argument, typehint = None):
"""Appends data to the bundle, creating an OSCMessage to encapsulate
the provided argument unless this is already an OSCMessage.
Any newly created OSCMessage inherits the OSCBundle's address at the time of creation.
If 'argument' is an iterable, its elements will be encapsuated by a single OSCMessage.
Finally, 'argument' can be (or contain) a dict, which will be 'converted' to an OSCMessage;
- if 'addr' appears in the dict, its value overrides the OSCBundle's address
- if 'args' appears in the dict, its value(s) become the OSCMessage's arguments
"""
if isinstance(argument, OSCMessage):
binary = OSCBlob(argument.getBinary())
else:
msg = OSCMessage(self.address)
if type(argument) == types.DictType:
if 'addr' in argument:
msg.setAddress(argument['addr'])
if 'args' in argument:
msg.append(argument['args'], typehint)
else:
msg.append(argument, typehint)
binary = OSCBlob(msg.getBinary())
self.message += binary
self.typetags += 'b'
def getBinary(self):
"""Returns the binary representation of the message
"""
binary = OSCString("#bundle")
binary += OSCTimeTag(self.timetag)
binary += self.message
return binary
def _reencapsulate(self, decoded):
if decoded[0] == "#bundle":
msg = OSCBundle()
msg.setTimeTag(decoded[1])
for submsg in decoded[2:]:
msg.append(self._reencapsulate(submsg))
else:
msg = OSCMessage(decoded[0])
tags = decoded[1].lstrip(',')
for i in range(len(tags)):
msg.append(decoded[2+i], tags[i])
return msg
def values(self):
"""Returns a list of the OSCMessages appended so far
"""
out = []
for decoded in decodeOSC(self.getBinary())[2:]:
out.append(self._reencapsulate(decoded))
return out
def __eq__(self, other):
"""Return True if two OSCBundles have the same timetag & content
"""
if not isinstance(other, self.__class__):
return False
return (self.timetag == other.timetag) and (self.typetags == other.typetags) and (self.message == other.message)
def copy(self):
"""Returns a deep copy of this OSCBundle
"""
copy = super(OSCBundle, self).copy()
copy.timetag = self.timetag
return copy
######
#
# OSCMessage encoding functions
#
######
def OSCString(next):
"""Convert a string into a zero-padded OSC String.
The length of the resulting string is always a multiple of 4 bytes.
The string ends with 1 to 4 zero-bytes ('\x00')
"""
OSCstringLength = math.ceil((len(next)+1) / 4.0) * 4
return struct.pack(">%ds" % (OSCstringLength), str(next))
def OSCBlob(next):
"""Convert a string into an OSC Blob.
An OSC-Blob is a binary encoded block of data, prepended by a 'size' (int32).
The size is always a mutiple of 4 bytes.
The blob ends with 0 to 3 zero-bytes ('\x00')
"""
if type(next) in types.StringTypes:
OSCblobLength = math.ceil((len(next)) / 4.0) * 4
binary = struct.pack(">i%ds" % (OSCblobLength), OSCblobLength, next)
else:
binary = ""
return binary
def OSCArgument(next, typehint=None):
""" Convert some Python types to their
OSC binary representations, returning a
(typetag, data) tuple.
"""
if not typehint:
if type(next) in FloatTypes:
binary = struct.pack(">f", float(next))
tag = 'f'
elif type(next) in IntTypes:
binary = struct.pack(">i", int(next))
tag = 'i'
else:
binary = OSCString(next)
tag = 's'
elif typehint == 'd':
try:
binary = struct.pack(">d", float(next))
tag = 'd'
except ValueError:
binary = OSCString(next)
tag = 's'
elif typehint == 'f':
try:
binary = struct.pack(">f", float(next))
tag = 'f'
except ValueError:
binary = OSCString(next)
tag = 's'
elif typehint == 'i':
try:
binary = struct.pack(">i", int(next))
tag = 'i'
except ValueError:
binary = OSCString(next)
tag = 's'
else:
binary = OSCString(next)
tag = 's'
return (tag, binary)
def OSCTimeTag(time):
"""Convert a time in floating seconds to its
OSC binary representation
"""
if time > 0:
fract, secs = math.modf(time)
secs = secs - NTP_epoch
binary = struct.pack('>LL', long(secs), long(fract * NTP_units_per_second))
else:
binary = struct.pack('>LL', 0L, 1L)
return binary
######
#
# OSCMessage decoding functions
#
######
def _readString(data):
"""Reads the next (null-terminated) block of data
"""
length = string.find(data,"\0")
nextData = int(math.ceil((length+1) / 4.0) * 4)
return (data[0:length], data[nextData:])
def _readBlob(data):
"""Reads the next (numbered) block of data
"""
length = struct.unpack(">i", data[0:4])[0]
nextData = int(math.ceil((length) / 4.0) * 4) + 4
return (data[4:length+4], data[nextData:])
def _readInt(data):
"""Tries to interpret the next 4 bytes of the data
as a 32-bit integer. """
if(len(data)<4):
print "Error: too few bytes for int", data, len(data)
rest = data
integer = 0
else:
integer = struct.unpack(">i", data[0:4])[0]
rest = data[4:]
return (integer, rest)
def _readLong(data):
"""Tries to interpret the next 8 bytes of the data
as a 64-bit signed integer.
"""
high, low = struct.unpack(">ll", data[0:8])
big = (long(high) << 32) + low
rest = data[8:]
return (big, rest)
def _readTimeTag(data):
"""Tries to interpret the next 8 bytes of the data
as a TimeTag.
"""
high, low = struct.unpack(">LL", data[0:8])
if (high == 0) and (low <= 1):
time = 0.0
else:
time = int(NTP_epoch + high) + float(low / NTP_units_per_second)
rest = data[8:]
return (time, rest)
def _readFloat(data):
"""Tries to interpret the next 4 bytes of the data
as a 32-bit float.
"""
if(len(data)<4):
print "Error: too few bytes for float", data, len(data)
rest = data
float = 0
else:
float = struct.unpack(">f", data[0:4])[0]
rest = data[4:]
return (float, rest)
def _readDouble(data):
"""Tries to interpret the next 8 bytes of the data
as a 64-bit float.
"""
if(len(data)<8):
print "Error: too few bytes for double", data, len(data)
rest = data
float = 0
else:
float = struct.unpack(">d", data[0:8])[0]
rest = data[8:]
return (float, rest)
def decodeOSC(data):
"""Converts a binary OSC message to a Python list.
"""
table = {"i":_readInt, "f":_readFloat, "s":_readString, "b":_readBlob, "d":_readDouble, "t":_readTimeTag}
decoded = []
address, rest = _readString(data)
if address.startswith(","):
typetags = address
address = ""
else:
typetags = ""
if address == "#bundle":
time, rest = _readTimeTag(rest)
decoded.append(address)
decoded.append(time)
while len(rest)>0:
length, rest = _readInt(rest)
decoded.append(decodeOSC(rest[:length]))
rest = rest[length:]
elif len(rest)>0:
if not len(typetags):
typetags, rest = _readString(rest)
decoded.append(address)
decoded.append(typetags)
if typetags.startswith(","):
for tag in typetags[1:]:
value, rest = table[tag](rest)
decoded.append(value)
else:
raise OSCError("OSCMessage's typetag-string lacks the magic ','")
return decoded
######
#
# Utility functions
#
######
def hexDump(bytes):
""" Useful utility; prints the string in hexadecimal.
"""
print "byte 0 1 2 3 4 5 6 7 8 9 A B C D E F"
num = len(bytes)
for i in range(num):
if (i) % 16 == 0:
line = "%02X0 : " % (i/16)
line += "%02X " % ord(bytes[i])
if (i+1) % 16 == 0:
print "%s: %s" % (line, repr(bytes[i-15:i+1]))
line = ""
bytes_left = num % 16
if bytes_left:
print "%s: %s" % (line.ljust(54), repr(bytes[-bytes_left:]))
def getUrlStr(*args):
"""Convert provided arguments to a string in 'host:port/prefix' format
Args can be:
- (host, port)
- (host, port), prefix
- host, port
- host, port, prefix
"""
if not len(args):
return ""
if type(args[0]) == types.TupleType:
host = args[0][0]
port = args[0][1]
args = args[1:]
else:
host = args[0]
port = args[1]
args = args[2:]
if len(args):
prefix = args[0]
else:
prefix = ""
if len(host) and (host != '0.0.0.0'):
try:
(host, _, _) = socket.gethostbyaddr(host)
except socket.error:
pass
else:
host = 'localhost'
if type(port) == types.IntType:
return "%s:%d%s" % (host, port, prefix)
else:
return host + prefix
def parseUrlStr(url):
"""Convert provided string in 'host:port/prefix' format to it's components
Returns ((host, port), prefix)
"""
if not (type(url) in types.StringTypes and len(url)):
return (None, '')
i = url.find("://")
if i > -1:
url = url[i+3:]
i = url.find(':')
if i > -1:
host = url[:i].strip()
tail = url[i+1:].strip()
else:
host = ''
tail = url
for i in range(len(tail)):
if not tail[i].isdigit():
break
else:
i += 1
portstr = tail[:i].strip()
tail = tail[i:].strip()
found = len(tail)
for c in ('/', '+', '-', '*'):
i = tail.find(c)
if (i > -1) and (i < found):
found = i
head = tail[:found].strip()
prefix = tail[found:].strip()
prefix = prefix.strip('/')
if len(prefix) and prefix[0] not in ('+', '-', '*'):
prefix = '/' + prefix
if len(head) and not len(host):
host = head
if len(host):
try:
host = socket.gethostbyname(host)
except socket.error:
pass
try:
port = int(portstr)
except ValueError:
port = None
return ((host, port), prefix)
######
#
# OSCClient class
#
######
class OSCClient(object):
"""Simple OSC Client. Handles the sending of OSC-Packets (OSCMessage or OSCBundle) via a UDP-socket
"""
# set outgoing socket buffer size
sndbuf_size = 4096 * 8
def __init__(self, server=None):
"""Construct an OSC Client.
- server: Local OSCServer-instance this client will use the socket of for transmissions.
If none is supplied, a socket will be created.
"""
self.socket = None
self.setServer(server)
self.client_address = None
def _setSocket(self, skt):
"""Set and configure client socket"""
if self.socket != None:
self.close()
self.socket = skt
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, self.sndbuf_size)
self._fd = self.socket.fileno()
def _ensureConnected(self, address):
"""Make sure client has a socket connected to address"""
if not self.socket:
if len(address) == 4:
address_family = socket.AF_INET6
else:
address_family = socket.AF_INET
self._setSocket(socket.socket(address_family, socket.SOCK_DGRAM))
self.socket.connect(address)
def setServer(self, server):
"""Associate this Client with given server.
The Client will send from the Server's socket.
The Server will use this Client instance to send replies.
"""
if server == None:
if hasattr(self,'server') and self.server:
if self.server.client != self:
raise OSCClientError("Internal inconsistency")
self.server.client.close()
self.server.client = None
self.server = None
return
if not isinstance(server, OSCServer):
raise ValueError("'server' argument is not a valid OSCServer object")
self._setSocket(server.socket.dup())
self.server = server
if self.server.client != None:
self.server.client.close()
self.server.client = self
def close(self):
"""Disconnect & close the Client's socket
"""
if self.socket != None:
self.socket.close()
self.socket = None
def __str__(self):
"""Returns a string containing this Client's Class-name, software-version
and the remote-address it is connected to (if any)
"""
out = self.__class__.__name__
out += " v%s.%s-%s" % version
addr = self.address()
if addr:
out += " connected to osc://%s" % getUrlStr(addr)
else:
out += " (unconnected)"
return out
def __eq__(self, other):
"""Compare function.
"""
if not isinstance(other, self.__class__):
return False
if self.socket and other.socket:
sockEqual = cmp(self.socket._sock, other.socket._sock)
else:
sockEqual = (self.socket == None and other.socket == None)
if not sockEqual:
return False
if self.server and other.server:
return cmp(self.server, other.server)
else:
return self.server == None and other.server == None
def __ne__(self, other):
"""Compare function.
"""
return not self.__eq__(other)
def address(self):
"""Returns a (host,port) tuple of the remote server this client is
connected to or None if not connected to any server.
"""
try:
if self.socket:
return self.socket.getpeername()
else:
return None
except socket.error:
return None
def connect(self, address):
"""Bind to a specific OSC server:
the 'address' argument is a (host, port) tuple
- host: hostname of the remote OSC server,
- port: UDP-port the remote OSC server listens to.
"""
try:
self._ensureConnected(address)
self.client_address = address
except socket.error, e:
self.client_address = None
raise OSCClientError("SocketError: %s" % str(e))
if self.server != None:
self.server.return_port = address[1]
def sendto(self, msg, address, timeout=None):
"""Send the given OSCMessage to the specified address.
- msg: OSCMessage (or OSCBundle) to be sent
- address: (host, port) tuple specifing remote server to send the message to
- timeout: A timeout value for attempting to send. If timeout == None,
this call blocks until socket is available for writing.
Raises OSCClientError when timing out while waiting for the socket.
"""
if not isinstance(msg, OSCMessage):
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
ret = select.select([],[self._fd], [], timeout)
try:
ret[1].index(self._fd)
except:
# for the very rare case this might happen
raise OSCClientError("Timed out waiting for file descriptor")
try:
self._ensureConnected(address)
self.socket.sendall(msg.getBinary())
if self.client_address:
self.socket.connect(self.client_address)
except socket.error, e:
if e[0] in (7, 65): # 7 = 'no address associated with nodename', 65 = 'no route to host'
raise e
else:
raise OSCClientError("while sending to %s: %s" % (str(address), str(e)))
def send(self, msg, timeout=None):
"""Send the given OSCMessage.
The Client must be already connected.
- msg: OSCMessage (or OSCBundle) to be sent
- timeout: A timeout value for attempting to send. If timeout == None,
this call blocks until socket is available for writing.
Raises OSCClientError when timing out while waiting for the socket,
or when the Client isn't connected to a remote server.
"""
if not isinstance(msg, OSCMessage):
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
if not self.socket:
raise OSCClientError("Called send() on non-connected client")
ret = select.select([],[self._fd], [], timeout)
try:
ret[1].index(self._fd)
except:
# for the very rare case this might happen
raise OSCClientError("Timed out waiting for file descriptor")
try:
self.socket.sendall(msg.getBinary())
except socket.error, e:
if e[0] in (7, 65): # 7 = 'no address associated with nodename', 65 = 'no route to host'
raise e
else:
raise OSCClientError("while sending: %s" % str(e))
######
#
# FilterString Utility functions
#
######
def parseFilterStr(args):
"""Convert Message-Filter settings in '+<addr> -<addr> ...' format to a dict of the form
{ '<addr>':True, '<addr>':False, ... }
Returns a list: ['<prefix>', filters]
"""
out = {}
if type(args) in types.StringTypes:
args = [args]
prefix = None
for arg in args:
head = None
for plus in arg.split('+'):
minus = plus.split('-')
plusfs = minus.pop(0).strip()
if len(plusfs):
plusfs = '/' + plusfs.strip('/')
if (head == None) and (plusfs != "/*"):
head = plusfs
elif len(plusfs):
if plusfs == '/*':
out = { '/*':True } # reset all previous filters
else:
out[plusfs] = True
for minusfs in minus:
minusfs = minusfs.strip()
if len(minusfs):
minusfs = '/' + minusfs.strip('/')
if minusfs == '/*':
out = { '/*':False } # reset all previous filters
else:
out[minusfs] = False
if prefix == None:
prefix = head
return [prefix, out]
def getFilterStr(filters):
"""Return the given 'filters' dict as a list of
'+<addr>' | '-<addr>' filter-strings
"""
if not len(filters):
return []
if '/*' in filters.keys():
if filters['/*']:
out = ["+/*"]
else:
out = ["-/*"]
else:
if False in filters.values():
out = ["+/*"]
else:
out = ["-/*"]
for (addr, bool) in filters.items():
if addr == '/*':
continue
if bool:
out.append("+%s" % addr)
else:
out.append("-%s" % addr)
return out
# A translation-table for mapping OSC-address expressions to Python 're' expressions
OSCtrans = string.maketrans("{,}?","(|).")
def getRegEx(pattern):
"""Compiles and returns a 'regular expression' object for the given address-pattern.
"""
# Translate OSC-address syntax to python 're' syntax
pattern = pattern.replace(".", r"\.") # first, escape all '.'s in the pattern.
pattern = pattern.replace("(", r"\(") # escape all '('s.
pattern = pattern.replace(")", r"\)") # escape all ')'s.
pattern = pattern.replace("*", r".*") # replace a '*' by '.*' (match 0 or more characters)
pattern = pattern.translate(OSCtrans) # change '?' to '.' and '{,}' to '(|)'
return re.compile(pattern)
######
#
# OSCMultiClient class
#
######
class OSCMultiClient(OSCClient):
"""'Multiple-Unicast' OSC Client. Handles the sending of OSC-Packets (OSCMessage or OSCBundle) via a UDP-socket
This client keeps a dict of 'OSCTargets'. and sends each OSCMessage to each OSCTarget
The OSCTargets are simply (host, port) tuples, and may be associated with an OSC-address prefix.
the OSCTarget's prefix gets prepended to each OSCMessage sent to that target.
"""
def __init__(self, server=None):
"""Construct a "Multi" OSC Client.
- server: Local OSCServer-instance this client will use the socket of for transmissions.
If none is supplied, a socket will be created.
"""
super(OSCMultiClient, self).__init__(server)
self.targets = {}
def _searchHostAddr(self, host):
"""Search the subscribed OSCTargets for (the first occurence of) given host.
Returns a (host, port) tuple
"""
try:
host = socket.gethostbyname(host)
except socket.error:
pass
for addr in self.targets.keys():
if host == addr[0]:
return addr
raise NotSubscribedError((host, None))
def _updateFilters(self, dst, src):
"""Update a 'filters' dict with values form another 'filters' dict:
- src[a] == True and dst[a] == False: del dst[a]
- src[a] == False and dst[a] == True: del dst[a]
- a not in dst: dst[a] == src[a]
"""
if '/*' in src.keys(): # reset filters
dst.clear() # 'match everything' == no filters
if not src.pop('/*'):
dst['/*'] = False # 'match nothing'
for (addr, bool) in src.items():
if (addr in dst.keys()) and (dst[addr] != bool):
del dst[addr]
else:
dst[addr] = bool
def _setTarget(self, address, prefix=None, filters=None):
"""Add (i.e. subscribe) a new OSCTarget, or change the prefix for an existing OSCTarget.
- address ((host, port) tuple): IP-address & UDP-port
- prefix (string): The OSC-address prefix prepended to the address of each OSCMessage
sent to this OSCTarget (optional)
"""
if address not in self.targets.keys():
self.targets[address] = ["",{}]
if prefix != None:
if len(prefix):
# make sure prefix starts with ONE '/', and does not end with '/'
prefix = '/' + prefix.strip('/')
self.targets[address][0] = prefix
if filters != None:
if type(filters) in types.StringTypes:
(_, filters) = parseFilterStr(filters)
elif type(filters) != types.DictType:
raise TypeError("'filters' argument must be a dict with {addr:bool} entries")
self._updateFilters(self.targets[address][1], filters)
def setOSCTarget(self, address, prefix=None, filters=None):
"""Add (i.e. subscribe) a new OSCTarget, or change the prefix for an existing OSCTarget.
the 'address' argument can be a ((host, port) tuple) : The target server address & UDP-port
or a 'host' (string) : The host will be looked-up
- prefix (string): The OSC-address prefix prepended to the address of each OSCMessage
sent to this OSCTarget (optional)
"""
if type(address) in types.StringTypes:
address = self._searchHostAddr(address)
elif (type(address) == types.TupleType):
(host, port) = address[:2]
try:
host = socket.gethostbyname(host)
except:
pass
address = (host, port)
else:
raise TypeError("'address' argument must be a (host, port) tuple or a 'host' string")
self._setTarget(address, prefix, filters)
def setOSCTargetFromStr(self, url):
"""Adds or modifies a subscribed OSCTarget from the given string, which should be in the
'<host>:<port>[/<prefix>] [+/<filter>]|[-/<filter>] ...' format.
"""
(addr, tail) = parseUrlStr(url)
(prefix, filters) = parseFilterStr(tail)
self._setTarget(addr, prefix, filters)
def _delTarget(self, address, prefix=None):
"""Delete the specified OSCTarget from the Client's dict.
the 'address' argument must be a (host, port) tuple.
If the 'prefix' argument is given, the Target is only deleted if the address and prefix match.
"""
try:
if prefix == None:
del self.targets[address]
elif prefix == self.targets[address][0]:
del self.targets[address]
except KeyError:
raise NotSubscribedError(address, prefix)
def delOSCTarget(self, address, prefix=None):
"""Delete the specified OSCTarget from the Client's dict.
the 'address' argument can be a ((host, port) tuple), or a hostname.
If the 'prefix' argument is given, the Target is only deleted if the address and prefix match.
"""
if type(address) in types.StringTypes:
address = self._searchHostAddr(address)
if type(address) == types.TupleType:
(host, port) = address[:2]
try:
host = socket.gethostbyname(host)
except socket.error:
pass
address = (host, port)
self._delTarget(address, prefix)
def hasOSCTarget(self, address, prefix=None):
"""Return True if the given OSCTarget exists in the Client's dict.
the 'address' argument can be a ((host, port) tuple), or a hostname.
If the 'prefix' argument is given, the return-value is only True if the address and prefix match.
"""
if type(address) in types.StringTypes:
address = self._searchHostAddr(address)
if type(address) == types.TupleType:
(host, port) = address[:2]
try:
host = socket.gethostbyname(host)
except socket.error:
pass
address = (host, port)
if address in self.targets.keys():
if prefix == None:
return True
elif prefix == self.targets[address][0]:
return True
return False
def getOSCTargets(self):
"""Returns the dict of OSCTargets: {addr:[prefix, filters], ...}
"""
out = {}
for ((host, port), pf) in self.targets.items():
try:
(host, _, _) = socket.gethostbyaddr(host)
except socket.error:
pass
out[(host, port)] = pf
return out
def getOSCTarget(self, address):
"""Returns the OSCTarget matching the given address as a ((host, port), [prefix, filters]) tuple.
'address' can be a (host, port) tuple, or a 'host' (string), in which case the first matching OSCTarget is returned
Returns (None, ['',{}]) if address not found.
"""
if type(address) in types.StringTypes:
address = self._searchHostAddr(address)
if (type(address) == types.TupleType):
(host, port) = address[:2]
try:
host = socket.gethostbyname(host)
except socket.error:
pass
address = (host, port)
if (address in self.targets.keys()):
try:
(host, _, _) = socket.gethostbyaddr(host)
except socket.error:
pass
return ((host, port), self.targets[address])
return (None, ['',{}])
def clearOSCTargets(self):
"""Erases all OSCTargets from the Client's dict
"""
self.targets = {}
def updateOSCTargets(self, dict):
"""Update the Client's OSCTargets dict with the contents of 'dict'
The given dict's items MUST be of the form
{ (host, port):[prefix, filters], ... }
"""
for ((host, port), (prefix, filters)) in dict.items():
val = [prefix, {}]
self._updateFilters(val[1], filters)
try:
host = socket.gethostbyname(host)
except socket.error:
pass
self.targets[(host, port)] = val
def getOSCTargetStr(self, address):
"""Returns the OSCTarget matching the given address as a ('osc://<host>:<port>[<prefix>]', ['<filter-string>', ...])' tuple.
'address' can be a (host, port) tuple, or a 'host' (string), in which case the first matching OSCTarget is returned
Returns (None, []) if address not found.
"""
(addr, (prefix, filters)) = self.getOSCTarget(address)
if addr == None:
return (None, [])
return ("osc://%s" % getUrlStr(addr, prefix), getFilterStr(filters))
def getOSCTargetStrings(self):
"""Returns a list of all OSCTargets as ('osc://<host>:<port>[<prefix>]', ['<filter-string>', ...])' tuples.
"""
out = []
for (addr, (prefix, filters)) in self.targets.items():
out.append(("osc://%s" % getUrlStr(addr, prefix), getFilterStr(filters)))
return out
def connect(self, address):
"""The OSCMultiClient isn't allowed to connect to any specific
address.
"""
return NotImplemented
def sendto(self, msg, address, timeout=None):
"""Send the given OSCMessage.
The specified address is ignored. Instead this method calls send() to
send the message to all subscribed clients.
- msg: OSCMessage (or OSCBundle) to be sent
- address: (host, port) tuple specifing remote server to send the message to
- timeout: A timeout value for attempting to send. If timeout == None,
this call blocks until socket is available for writing.
Raises OSCClientError when timing out while waiting for the socket.
"""
self.send(msg, timeout)
def _filterMessage(self, filters, msg):
"""Checks the given OSCMessge against the given filters.
'filters' is a dict containing OSC-address:bool pairs.
If 'msg' is an OSCBundle, recursively filters its constituents.
Returns None if the message is to be filtered, else returns the message.
or
Returns a copy of the OSCBundle with the filtered messages removed.
"""
if isinstance(msg, OSCBundle):
out = msg.copy()
msgs = out.values()
out.clearData()
for m in msgs:
m = self._filterMessage(filters, m)
if m: # this catches 'None' and empty bundles.
out.append(m)
elif isinstance(msg, OSCMessage):
if '/*' in filters.keys():
if filters['/*']:
out = msg
else:
out = None
elif False in filters.values():
out = msg
else:
out = None
else:
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
expr = getRegEx(msg.address)
for addr in filters.keys():
if addr == '/*':
continue
match = expr.match(addr)
if match and (match.end() == len(addr)):
if filters[addr]:
out = msg
else:
out = None
break
return out
def _prefixAddress(self, prefix, msg):
"""Makes a copy of the given OSCMessage, then prepends the given prefix to
The message's OSC-address.
If 'msg' is an OSCBundle, recursively prepends the prefix to its constituents.
"""
out = msg.copy()
if isinstance(msg, OSCBundle):
msgs = out.values()
out.clearData()
for m in msgs:
out.append(self._prefixAddress(prefix, m))
elif isinstance(msg, OSCMessage):
out.setAddress(prefix + out.address)
else:
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
return out
def send(self, msg, timeout=None):
"""Send the given OSCMessage to all subscribed OSCTargets
- msg: OSCMessage (or OSCBundle) to be sent
- timeout: A timeout value for attempting to send. If timeout == None,
this call blocks until socket is available for writing.
Raises OSCClientError when timing out while waiting for the socket.
"""
for (address, (prefix, filters)) in self.targets.items():
if len(filters):
out = self._filterMessage(filters, msg)
if not out: # this catches 'None' and empty bundles.
continue
else:
out = msg
if len(prefix):
out = self._prefixAddress(prefix, msg)
binary = out.getBinary()
ret = select.select([],[self._fd], [], timeout)
try:
ret[1].index(self._fd)
except:
# for the very rare case this might happen
raise OSCClientError("Timed out waiting for file descriptor")
try:
while len(binary):
sent = self.socket.sendto(binary, address)
binary = binary[sent:]
except socket.error, e:
if e[0] in (7, 65): # 7 = 'no address associated with nodename', 65 = 'no route to host'
raise e
else:
raise OSCClientError("while sending to %s: %s" % (str(address), str(e)))
class OSCAddressSpace:
def __init__(self):
self.callbacks = {}
def addMsgHandler(self, address, callback):
"""Register a handler for an OSC-address
- 'address' is the OSC address-string.
the address-string should start with '/' and may not contain '*'
- 'callback' is the function called for incoming OSCMessages that match 'address'.
The callback-function will be called with the same arguments as the 'msgPrinter_handler' below
"""
for chk in '*?,[]{}# ':
if chk in address:
raise OSCServerError("OSC-address string may not contain any characters in '*?,[]{}# '")
if type(callback) not in (types.FunctionType, types.MethodType):
raise OSCServerError("Message callback '%s' is not callable" % repr(callback))
if address != 'default':
address = '/' + address.strip('/')
self.callbacks[address] = callback
def delMsgHandler(self, address):
"""Remove the registered handler for the given OSC-address
"""
del self.callbacks[address]
def getOSCAddressSpace(self):
"""Returns a list containing all OSC-addresses registerd with this Server.
"""
return self.callbacks.keys()
def dispatchMessage(self, pattern, tags, data, client_address):
"""Attmept to match the given OSC-address pattern, which may contain '*',
against all callbacks registered with the OSCServer.
Calls the matching callback and returns whatever it returns.
If no match is found, and a 'default' callback is registered, it calls that one,
or raises NoCallbackError if a 'default' callback is not registered.
- pattern (string): The OSC-address of the receied message
- tags (string): The OSC-typetags of the receied message's arguments, without ','
- data (list): The message arguments
"""
if len(tags) != len(data):
raise OSCServerError("Malformed OSC-message; got %d typetags [%s] vs. %d values" % (len(tags), tags, len(data)))
expr = getRegEx(pattern)
replies = []
matched = 0
for addr in self.callbacks.keys():
match = expr.match(addr)
if match and (match.end() == len(addr)):
reply = self.callbacks[addr](pattern, tags, data, client_address)
matched += 1
if isinstance(reply, OSCMessage):
replies.append(reply)
elif reply != None:
raise TypeError("Message-callback %s did not return OSCMessage or None: %s" % (self.server.callbacks[addr], type(reply)))
if matched == 0:
if 'default' in self.callbacks:
reply = self.callbacks['default'](pattern, tags, data, client_address)
if isinstance(reply, OSCMessage):
replies.append(reply)
elif reply != None:
raise TypeError("Message-callback %s did not return OSCMessage or None: %s" % (self.server.callbacks['default'], type(reply)))
else:
raise NoCallbackError(pattern)
return replies
######
#
# OSCRequestHandler classes
#
######
class OSCRequestHandler(DatagramRequestHandler):
"""RequestHandler class for the OSCServer
"""
def setup(self):
"""Prepare RequestHandler.
Unpacks request as (packet, source socket address)
Creates an empty list for replies.
"""
(self.packet, self.socket) = self.request
self.replies = []
def _unbundle(self, decoded):
"""Recursive bundle-unpacking function"""
if decoded[0] != "#bundle":
self.replies += self.server.dispatchMessage(decoded[0], decoded[1][1:], decoded[2:], self.client_address)
return
now = time.time()
timetag = decoded[1]
if (timetag > 0.) and (timetag > now):
time.sleep(timetag - now)
for msg in decoded[2:]:
self._unbundle(msg)
def handle(self):
"""Handle incoming OSCMessage
"""
decoded = decodeOSC(self.packet)
if not len(decoded):
return
self._unbundle(decoded)
def finish(self):
"""Finish handling OSCMessage.
Send any reply returned by the callback(s) back to the originating client
as an OSCMessage or OSCBundle
"""
if self.server.return_port:
self.client_address = (self.client_address[0], self.server.return_port)
if len(self.replies) > 1:
msg = OSCBundle()
for reply in self.replies:
msg.append(reply)
elif len(self.replies) == 1:
msg = self.replies[0]
else:
return
self.server.client.sendto(msg, self.client_address)
class ThreadingOSCRequestHandler(OSCRequestHandler):
"""Multi-threaded OSCRequestHandler;
Starts a new RequestHandler thread for each unbundled OSCMessage
"""
def _unbundle(self, decoded):
"""Recursive bundle-unpacking function
This version starts a new thread for each sub-Bundle found in the Bundle,
then waits for all its children to finish.
"""
if decoded[0] != "#bundle":
self.replies += self.server.dispatchMessage(decoded[0], decoded[1][1:], decoded[2:], self.client_address)
return
now = time.time()
timetag = decoded[1]
if (timetag > 0.) and (timetag > now):
time.sleep(timetag - now)
now = time.time()
children = []
for msg in decoded[2:]:
t = threading.Thread(target = self._unbundle, args = (msg,))
t.start()
children.append(t)
# wait for all children to terminate
for t in children:
t.join()
######
#
# OSCServer classes
#
######
class OSCServer(UDPServer, OSCAddressSpace):
"""A Synchronous OSCServer
Serves one request at-a-time, until the OSCServer is closed.
The OSC address-pattern is matched against a set of OSC-adresses
that have been registered to the server with a callback-function.
If the adress-pattern of the message machtes the registered address of a callback,
that function is called.
"""
# set the RequestHandlerClass, will be overridden by ForkingOSCServer & ThreadingOSCServer
RequestHandlerClass = OSCRequestHandler
# define a socket timeout, so the serve_forever loop can actually exit.
socket_timeout = 1
# DEBUG: print error-tracebacks (to stderr)?
print_tracebacks = False
def __init__(self, server_address, client=None, return_port=0):
"""Instantiate an OSCServer.
- server_address ((host, port) tuple): the local host & UDP-port
the server listens on
- client (OSCClient instance): The OSCClient used to send replies from this server.
If none is supplied (default) an OSCClient will be created.
- return_port (int): if supplied, sets the default UDP destination-port
for replies coming from this server.
"""
UDPServer.__init__(self, server_address, self.RequestHandlerClass)
OSCAddressSpace.__init__(self)
self.setReturnPort(return_port)
self.error_prefix = ""
self.info_prefix = "/info"
self.socket.settimeout(self.socket_timeout)
self.running = False
self.client = None
if client == None:
self.client = OSCClient(server=self)
else:
self.setClient(client)
def setClient(self, client):
"""Associate this Server with a new local Client instance, closing the Client this Server is currently using.
"""
if not isinstance(client, OSCClient):
raise ValueError("'client' argument is not a valid OSCClient object")
if client.server != None:
raise OSCServerError("Provided OSCClient already has an OSCServer-instance: %s" % str(client.server))
# Server socket is already listening at this point, so we can't use the client's socket.
# we'll have to force our socket on the client...
client_address = client.address() # client may be already connected
client.close() # shut-down that socket
# force our socket upon the client
client.setServer(self)
if client_address:
client.connect(client_address)
if not self.return_port:
self.return_port = client_address[1]
def serve_forever(self):
"""Handle one request at a time until server is closed."""
self.running = True
while self.running:
self.handle_request() # this times-out when no data arrives.
def close(self):
"""Stops serving requests, closes server (socket), closes used client
"""
self.running = False
self.client.close()
self.server_close()
def __str__(self):
"""Returns a string containing this Server's Class-name, software-version and local bound address (if any)
"""
out = self.__class__.__name__
out += " v%s.%s-%s" % version
addr = self.address()
if addr:
out += " listening on osc://%s" % getUrlStr(addr)
else:
out += " (unbound)"
return out
def __eq__(self, other):
"""Compare function.
"""
if not isinstance(other, self.__class__):
return False
return cmp(self.socket._sock, other.socket._sock)
def __ne__(self, other):
"""Compare function.
"""
return not self.__eq__(other)
def address(self):
"""Returns a (host,port) tuple of the local address this server is bound to,
or None if not bound to any address.
"""
try:
return self.socket.getsockname()
except socket.error:
return None
def setReturnPort(self, port):
"""Set the destination UDP-port for replies returning from this server to the remote client
"""
if (port > 1024) and (port < 65536):
self.return_port = port
else:
self.return_port = None
def setSrvInfoPrefix(self, pattern):
"""Set the first part of OSC-address (pattern) this server will use to reply to server-info requests.
"""
if len(pattern):
pattern = '/' + pattern.strip('/')
self.info_prefix = pattern
def setSrvErrorPrefix(self, pattern=""):
"""Set the OSC-address (pattern) this server will use to report errors occuring during
received message handling to the remote client.
If pattern is empty (default), server-errors are not reported back to the client.
"""
if len(pattern):
pattern = '/' + pattern.strip('/')
self.error_prefix = pattern
def addDefaultHandlers(self, prefix="", info_prefix="/info", error_prefix="/error"):
"""Register a default set of OSC-address handlers with this Server:
- 'default' -> noCallback_handler
the given prefix is prepended to all other callbacks registered by this method:
- '<prefix><info_prefix' -> serverInfo_handler
- '<prefix><error_prefix> -> msgPrinter_handler
- '<prefix>/print' -> msgPrinter_handler
and, if the used Client supports it;
- '<prefix>/subscribe' -> subscription_handler
- '<prefix>/unsubscribe' -> subscription_handler
Note: the given 'error_prefix' argument is also set as default 'error_prefix' for error-messages
*sent from* this server. This is ok, because error-messages generally do not elicit a reply from the receiver.
To do this with the serverInfo-prefixes would be a bad idea, because if a request received on '/info' (for example)
would send replies to '/info', this could potentially cause a never-ending loop of messages!
Do *not* set the 'info_prefix' here (for incoming serverinfo requests) to the same value as given to
the setSrvInfoPrefix() method (for *replies* to incoming serverinfo requests).
For example, use '/info' for incoming requests, and '/inforeply' or '/serverinfo' or even just '/print' as the
info-reply prefix.
"""
self.error_prefix = error_prefix
self.addMsgHandler('default', self.noCallback_handler)
self.addMsgHandler(prefix + info_prefix, self.serverInfo_handler)
self.addMsgHandler(prefix + error_prefix, self.msgPrinter_handler)
self.addMsgHandler(prefix + '/print', self.msgPrinter_handler)
if isinstance(self.client, OSCMultiClient):
self.addMsgHandler(prefix + '/subscribe', self.subscription_handler)
self.addMsgHandler(prefix + '/unsubscribe', self.subscription_handler)
def printErr(self, txt):
"""Writes 'OSCServer: txt' to sys.stderr
"""
sys.stderr.write("OSCServer: %s\n" % txt)
def sendOSCerror(self, txt, client_address):
"""Sends 'txt', encapsulated in an OSCMessage to the default 'error_prefix' OSC-addres.
Message is sent to the given client_address, with the default 'return_port' overriding
the client_address' port, if defined.
"""
lines = txt.split('\n')
if len(lines) == 1:
msg = OSCMessage(self.error_prefix)
msg.append(lines[0])
elif len(lines) > 1:
msg = OSCBundle(self.error_prefix)
for line in lines:
msg.append(line)
else:
return
if self.return_port:
client_address = (client_address[0], self.return_port)
self.client.sendto(msg, client_address)
def reportErr(self, txt, client_address):
"""Writes 'OSCServer: txt' to sys.stderr
If self.error_prefix is defined, sends 'txt' as an OSC error-message to the client(s)
(see printErr() and sendOSCerror())
"""
self.printErr(txt)
if len(self.error_prefix):
self.sendOSCerror(txt, client_address)
def sendOSCinfo(self, txt, client_address):
"""Sends 'txt', encapsulated in an OSCMessage to the default 'info_prefix' OSC-addres.
Message is sent to the given client_address, with the default 'return_port' overriding
the client_address' port, if defined.
"""
lines = txt.split('\n')
if len(lines) == 1:
msg = OSCMessage(self.info_prefix)
msg.append(lines[0])
elif len(lines) > 1:
msg = OSCBundle(self.info_prefix)
for line in lines:
msg.append(line)
else:
return
if self.return_port:
client_address = (client_address[0], self.return_port)
self.client.sendto(msg, client_address)
###
# Message-Handler callback functions
###
def handle_error(self, request, client_address):
"""Handle an exception in the Server's callbacks gracefully.
Writes the error to sys.stderr and, if the error_prefix (see setSrvErrorPrefix()) is set,
sends the error-message as reply to the client
"""
(e_type, e) = sys.exc_info()[:2]
self.printErr("%s on request from %s: %s" % (e_type.__name__, getUrlStr(client_address), str(e)))
if self.print_tracebacks:
import traceback
traceback.print_exc() # XXX But this goes to stderr!
if len(self.error_prefix):
self.sendOSCerror("%s: %s" % (e_type.__name__, str(e)), client_address)
def noCallback_handler(self, addr, tags, data, client_address):
"""Example handler for OSCMessages.
All registerd handlers must accept these three arguments:
- addr (string): The OSC-address pattern of the received Message
(the 'addr' string has already been matched against the handler's registerd OSC-address,
but may contain '*'s & such)
- tags (string): The OSC-typetags of the received message's arguments. (without the preceding comma)
- data (list): The OSCMessage's arguments
Note that len(tags) == len(data)
- client_address ((host, port) tuple): the host & port this message originated from.
a Message-handler function may return None, but it could also return an OSCMessage (or OSCBundle),
which then gets sent back to the client.
This handler prints a "No callback registered to handle ..." message.
Returns None
"""
self.reportErr("No callback registered to handle OSC-address '%s'" % addr, client_address)
def msgPrinter_handler(self, addr, tags, data, client_address):
"""Example handler for OSCMessages.
All registerd handlers must accept these three arguments:
- addr (string): The OSC-address pattern of the received Message
(the 'addr' string has already been matched against the handler's registerd OSC-address,
but may contain '*'s & such)
- tags (string): The OSC-typetags of the received message's arguments. (without the preceding comma)
- data (list): The OSCMessage's arguments
Note that len(tags) == len(data)
- client_address ((host, port) tuple): the host & port this message originated from.
a Message-handler function may return None, but it could also return an OSCMessage (or OSCBundle),
which then gets sent back to the client.
This handler prints the received message.
Returns None
"""
txt = "OSCMessage '%s' from %s: " % (addr, getUrlStr(client_address))
txt += str(data)
self.printErr(txt) # strip trailing comma & space
def serverInfo_handler(self, addr, tags, data, client_address):
"""Example handler for OSCMessages.
All registerd handlers must accept these three arguments:
- addr (string): The OSC-address pattern of the received Message
(the 'addr' string has already been matched against the handler's registerd OSC-address,
but may contain '*'s & such)
- tags (string): The OSC-typetags of the received message's arguments. (without the preceding comma)
- data (list): The OSCMessage's arguments
Note that len(tags) == len(data)
- client_address ((host, port) tuple): the host & port this message originated from.
a Message-handler function may return None, but it could also return an OSCMessage (or OSCBundle),
which then gets sent back to the client.
This handler returns a reply to the client, which can contain various bits of information
about this server, depending on the first argument of the received OSC-message:
- 'help' | 'info' : Reply contains server type & version info, plus a list of
available 'commands' understood by this handler
- 'list' | 'ls' : Reply is a bundle of 'address <string>' messages, listing the server's
OSC address-space.
- 'clients' | 'targets' : Reply is a bundle of 'target osc://<host>:<port>[<prefix>] [<filter>] [...]'
messages, listing the local Client-instance's subscribed remote clients.
"""
if len(data) == 0:
return None
cmd = data.pop(0)
reply = None
if cmd in ('help', 'info'):
reply = OSCBundle(self.info_prefix)
reply.append(('server', str(self)))
reply.append(('info_command', "ls | list : list OSC address-space"))
reply.append(('info_command', "clients | targets : list subscribed clients"))
elif cmd in ('ls', 'list'):
reply = OSCBundle(self.info_prefix)
for addr in self.callbacks.keys():
reply.append(('address', addr))
elif cmd in ('clients', 'targets'):
if hasattr(self.client, 'getOSCTargetStrings'):
reply = OSCBundle(self.info_prefix)
for trg in self.client.getOSCTargetStrings():
reply.append(('target',) + trg)
else:
cli_addr = self.client.address()
if cli_addr:
reply = OSCMessage(self.info_prefix)
reply.append(('target', "osc://%s/" % getUrlStr(cli_addr)))
else:
self.reportErr("unrecognized command '%s' in /info request from osc://%s. Try 'help'" % (cmd, getUrlStr(client_address)), client_address)
return reply
def _subscribe(self, data, client_address):
"""Handle the actual subscription. the provided 'data' is concatenated together to form a
'<host>:<port>[<prefix>] [<filter>] [...]' string, which is then passed to
parseUrlStr() & parseFilterStr() to actually retreive <host>, <port>, etc.
This 'long way 'round' approach (almost) guarantees that the subscription works,
regardless of how the bits of the <url> are encoded in 'data'.
"""
url = ""
have_port = False
for item in data:
if (type(item) == types.IntType) and not have_port:
url += ":%d" % item
have_port = True
elif type(item) in types.StringTypes:
url += item
(addr, tail) = parseUrlStr(url)
(prefix, filters) = parseFilterStr(tail)
if addr != None:
(host, port) = addr
if not host:
host = client_address[0]
if not port:
port = client_address[1]
addr = (host, port)
else:
addr = client_address
self.client._setTarget(addr, prefix, filters)
trg = self.client.getOSCTargetStr(addr)
if trg[0] != None:
reply = OSCMessage(self.info_prefix)
reply.append(('target',) + trg)
return reply
def _unsubscribe(self, data, client_address):
"""Handle the actual unsubscription. the provided 'data' is concatenated together to form a
'<host>:<port>[<prefix>]' string, which is then passed to
parseUrlStr() to actually retreive <host>, <port> & <prefix>.
This 'long way 'round' approach (almost) guarantees that the unsubscription works,
regardless of how the bits of the <url> are encoded in 'data'.
"""
url = ""
have_port = False
for item in data:
if (type(item) == types.IntType) and not have_port:
url += ":%d" % item
have_port = True
elif type(item) in types.StringTypes:
url += item
(addr, _) = parseUrlStr(url)
if addr == None:
addr = client_address
else:
(host, port) = addr
if not host:
host = client_address[0]
if not port:
try:
(host, port) = self.client._searchHostAddr(host)
except NotSubscribedError:
port = client_address[1]
addr = (host, port)
try:
self.client._delTarget(addr)
except NotSubscribedError, e:
txt = "%s: %s" % (e.__class__.__name__, str(e))
self.printErr(txt)
reply = OSCMessage(self.error_prefix)
reply.append(txt)
return reply
def subscription_handler(self, addr, tags, data, client_address):
"""Handle 'subscribe' / 'unsubscribe' requests from remote hosts,
if the local Client supports this (i.e. OSCMultiClient).
Supported commands:
- 'help' | 'info' : Reply contains server type & version info, plus a list of
available 'commands' understood by this handler
- 'list' | 'ls' : Reply is a bundle of 'target osc://<host>:<port>[<prefix>] [<filter>] [...]'
messages, listing the local Client-instance's subscribed remote clients.
- '[subscribe | listen | sendto | target] <url> [<filter> ...] : Subscribe remote client/server at <url>,
and/or set message-filters for messages being sent to the subscribed host, with the optional <filter>
arguments. Filters are given as OSC-addresses (or '*') prefixed by a '+' (send matching messages) or
a '-' (don't send matching messages). The wildcard '*', '+*' or '+/*' means 'send all' / 'filter none',
and '-*' or '-/*' means 'send none' / 'filter all' (which is not the same as unsubscribing!)
Reply is an OSCMessage with the (new) subscription; 'target osc://<host>:<port>[<prefix>] [<filter>] [...]'
- '[unsubscribe | silence | nosend | deltarget] <url> : Unsubscribe remote client/server at <url>
If the given <url> isn't subscribed, a NotSubscribedError-message is printed (and possibly sent)
The <url> given to the subscribe/unsubscribe handler should be of the form:
'[osc://][<host>][:<port>][<prefix>]', where any or all components can be omitted.
If <host> is not specified, the IP-address of the message's source is used.
If <port> is not specified, the <host> is first looked up in the list of subscribed hosts, and if found,
the associated port is used.
If <port> is not specified and <host> is not yet subscribed, the message's source-port is used.
If <prefix> is specified on subscription, <prefix> is prepended to the OSC-address of all messages
sent to the subscribed host.
If <prefix> is specified on unsubscription, the subscribed host is only unsubscribed if the host,
port and prefix all match the subscription.
If <prefix> is not specified on unsubscription, the subscribed host is unsubscribed if the host and port
match the subscription.
"""
if not isinstance(self.client, OSCMultiClient):
raise OSCServerError("Local %s does not support subsctiptions or message-filtering" % self.client.__class__.__name__)
addr_cmd = addr.split('/')[-1]
if len(data):
if data[0] in ('help', 'info'):
reply = OSCBundle(self.info_prefix)
reply.append(('server', str(self)))
reply.append(('subscribe_command', "ls | list : list subscribed targets"))
reply.append(('subscribe_command', "[subscribe | listen | sendto | target] <url> [<filter> ...] : subscribe to messages, set filters"))
reply.append(('subscribe_command', "[unsubscribe | silence | nosend | deltarget] <url> : unsubscribe from messages"))
return reply
if data[0] in ('ls', 'list'):
reply = OSCBundle(self.info_prefix)
for trg in self.client.getOSCTargetStrings():
reply.append(('target',) + trg)
return reply
if data[0] in ('subscribe', 'listen', 'sendto', 'target'):
return self._subscribe(data[1:], client_address)
if data[0] in ('unsubscribe', 'silence', 'nosend', 'deltarget'):
return self._unsubscribe(data[1:], client_address)
if addr_cmd in ('subscribe', 'listen', 'sendto', 'target'):
return self._subscribe(data, client_address)
if addr_cmd in ('unsubscribe', 'silence', 'nosend', 'deltarget'):
return self._unsubscribe(data, client_address)
class ForkingOSCServer(ForkingMixIn, OSCServer):
"""An Asynchronous OSCServer.
This server forks a new process to handle each incoming request.
"""
# set the RequestHandlerClass, will be overridden by ForkingOSCServer & ThreadingOSCServer
RequestHandlerClass = ThreadingOSCRequestHandler
class ThreadingOSCServer(ThreadingMixIn, OSCServer):
"""An Asynchronous OSCServer.
This server starts a new thread to handle each incoming request.
"""
# set the RequestHandlerClass, will be overridden by ForkingOSCServer & ThreadingOSCServer
RequestHandlerClass = ThreadingOSCRequestHandler
######
#
# OSCError classes
#
######
class OSCError(Exception):
"""Base Class for all OSC-related errors
"""
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class OSCClientError(OSCError):
"""Class for all OSCClient errors
"""
pass
class OSCServerError(OSCError):
"""Class for all OSCServer errors
"""
pass
class NoCallbackError(OSCServerError):
"""This error is raised (by an OSCServer) when an OSCMessage with an 'unmatched' address-pattern
is received, and no 'default' handler is registered.
"""
def __init__(self, pattern):
"""The specified 'pattern' should be the OSC-address of the 'unmatched' message causing the error to be raised.
"""
self.message = "No callback registered to handle OSC-address '%s'" % pattern
class NotSubscribedError(OSCClientError):
"""This error is raised (by an OSCMultiClient) when an attempt is made to unsubscribe a host
that isn't subscribed.
"""
def __init__(self, addr, prefix=None):
if prefix:
url = getUrlStr(addr, prefix)
else:
url = getUrlStr(addr, '')
self.message = "Target osc://%s is not subscribed" % url
######
#
# OSC over streaming transport layers (usually TCP)
#
# Note from the OSC 1.0 specifications about streaming protocols:
#
# The underlying network that delivers an OSC packet is responsible for
# delivering both the contents and the size to the OSC application. An OSC
# packet can be naturally represented by a datagram by a network protocol such
# as UDP. In a stream-based protocol such as TCP, the stream should begin with
# an int32 giving the size of the first packet, followed by the contents of the
# first packet, followed by the size of the second packet, etc.
#
# The contents of an OSC packet must be either an OSC Message or an OSC Bundle.
# The first byte of the packet's contents unambiguously distinguishes between
# these two alternatives.
#
######
class OSCStreamRequestHandler(StreamRequestHandler, OSCAddressSpace):
""" This is the central class of a streaming OSC server. If a client
connects to the server, the server instantiates a OSCStreamRequestHandler
for each new connection. This is fundamentally different to a packet
oriented server which has a single address space for all connections.
This connection based (streaming) OSC server maintains an address space
for each single connection, because usually tcp server spawn a new thread
or process for each new connection. This would generate severe
multithreading synchronization problems when each thread would operate on
the same address space object. Therefore: To implement a streaming/TCP OSC
server a custom handler must be implemented which implements the
setupAddressSpace member in which it creates its own address space for this
very connection. This has been done within the testbench and can serve as
inspiration.
"""
def __init__(self, request, client_address, server):
""" Initialize all base classes. The address space must be initialized
before the stream request handler because the initialization function
of the stream request handler calls the setup member which again
requires an already initialized address space.
"""
self._txMutex = threading.Lock()
OSCAddressSpace.__init__(self)
StreamRequestHandler.__init__(self, request, client_address, server)
def _unbundle(self, decoded):
"""Recursive bundle-unpacking function"""
if decoded[0] != "#bundle":
self.replies += self.dispatchMessage(decoded[0], decoded[1][1:], decoded[2:], self.client_address)
return
now = time.time()
timetag = decoded[1]
if (timetag > 0.) and (timetag > now):
time.sleep(timetag - now)
for msg in decoded[2:]:
self._unbundle(msg)
def setup(self):
StreamRequestHandler.setup(self)
print "SERVER: New client connection."
self.setupAddressSpace()
self.server._clientRegister(self)
def setupAddressSpace(self):
""" Override this function to customize your address space. """
pass
def finish(self):
StreamRequestHandler.finish(self)
self.server._clientUnregister(self)
print "SERVER: Client connection handled."
def _transmit(self, data):
sent = 0
while sent < len(data):
tmp = self.connection.send(data[sent:])
if tmp == 0:
return False
sent += tmp
return True
def _transmitMsg(self, msg):
"""Send an OSC message over a streaming socket. Raises exception if it
should fail. If everything is transmitted properly, True is returned. If
socket has been closed, False.
"""
if not isinstance(msg, OSCMessage):
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
try:
binary = msg.getBinary()
length = len(binary)
# prepend length of packet before the actual message (big endian)
len_big_endian = array.array('c', '\0' * 4)
struct.pack_into(">L", len_big_endian, 0, length)
len_big_endian = len_big_endian.tostring()
if self._transmit(len_big_endian) and self._transmit(binary):
return True
return False
except socket.error, e:
if e[0] == errno.EPIPE: # broken pipe
return False
raise e
def _receive(self, count):
""" Receive a certain amount of data from the socket and return it. If the
remote end should be closed in the meanwhile None is returned.
"""
chunk = self.connection.recv(count)
if not chunk or len(chunk) == 0:
return None
while len(chunk) < count:
tmp = self.connection.recv(count - len(chunk))
if not tmp or len(tmp) == 0:
return None
chunk = chunk + tmp
return chunk
def _receiveMsg(self):
""" Receive OSC message from a socket and decode.
If an error occurs, None is returned, else the message.
"""
# get OSC packet size from stream which is prepended each transmission
chunk = self._receive(4)
if chunk == None:
print "SERVER: Socket has been closed."
return None
# extract message length from big endian unsigned long (32 bit)
slen = struct.unpack(">L", chunk)[0]
# receive the actual message
chunk = self._receive(slen)
if chunk == None:
print "SERVER: Socket has been closed."
return None
# decode OSC data and dispatch
msg = decodeOSC(chunk)
if msg == None:
raise OSCError("SERVER: Message decoding failed.")
return msg
def handle(self):
"""
Handle a connection.
"""
# set socket blocking to avoid "resource currently not available"
# exceptions, because the connection socket inherits the settings
# from the listening socket and this times out from time to time
# in order to provide a way to shut the server down. But we want
# clean and blocking behaviour here
self.connection.settimeout(None)
print "SERVER: Entered server loop"
try:
while True:
decoded = self._receiveMsg()
if decoded == None:
return
elif len(decoded) <= 0:
# if message decoding fails we try to stay in sync but print a message
print "OSC stream server: Spurious message received."
continue
self.replies = []
self._unbundle(decoded)
if len(self.replies) > 1:
msg = OSCBundle()
for reply in self.replies:
msg.append(reply)
elif len(self.replies) == 1:
msg = self.replies[0]
else:
# no replies, continue receiving
continue
self._txMutex.acquire()
txOk = self._transmitMsg(msg)
self._txMutex.release()
if not txOk:
break
except socket.error, e:
if e[0] == errno.ECONNRESET:
# if connection has been reset by client, we do not care much
# about it, we just assume our duty fullfilled
print "SERVER: Connection has been reset by peer."
else:
raise e
def sendOSC(self, oscData):
""" This member can be used to transmit OSC messages or OSC bundles
over the client/server connection. It is thread save.
"""
self._txMutex.acquire()
result = self._transmitMsg(oscData)
self._txMutex.release()
return result
""" TODO Note on threaded unbundling for streaming (connection oriented)
transport:
Threaded unbundling as implemented in ThreadingOSCServer must be implemented in
a different way for the streaming variant, because contrary to the datagram
version the streaming handler is instantiated only once per connection. This
leads to the problem (if threaded unbundling is implemented as in OSCServer)
that all further message reception is blocked until all (previously received)
pending messages are processed.
Each StreamRequestHandler should provide a so called processing queue in which
all pending messages or subbundles are inserted to be processed in the future).
When a subbundle or message gets queued, a mechanism must be provided that
those messages get invoked when time asks for them. There are the following
opportunities:
- a timer is started which checks at regular intervals for messages in the
queue (polling - requires CPU resources)
- a dedicated timer is started for each message (requires timer resources)
"""
class OSCStreamingServer(TCPServer):
""" A connection oriented (TCP/IP) OSC server.
"""
# define a socket timeout, so the serve_forever loop can actually exit.
# with 2.6 and server.shutdown this wouldn't be necessary
socket_timeout = 1
# this is the class which handles a new connection. Override this for a
# useful customized server. See the testbench for an example
RequestHandlerClass = OSCStreamRequestHandler
def __init__(self, address):
"""Instantiate an OSCStreamingServer.
- server_address ((host, port) tuple): the local host & UDP-port
the server listens for new connections.
"""
self._clientList = []
self._clientListMutex = threading.Lock()
TCPServer.__init__(self, address, self.RequestHandlerClass)
self.socket.settimeout(self.socket_timeout)
def serve_forever(self):
"""Handle one request at a time until server is closed.
Had to add this since 2.5 does not support server.shutdown()
"""
self.running = True
while self.running:
self.handle_request() # this times-out when no data arrives.
def start(self):
""" Start the server thread. """
self._server_thread = threading.Thread(target=self.serve_forever)
self._server_thread.setDaemon(True)
self._server_thread.start()
def stop(self):
""" Stop the server thread and close the socket. """
self.running = False
self._server_thread.join()
self.server_close()
# 2.6 only
#self.shutdown()
def _clientRegister(self, client):
""" Gets called by each request/connection handler when connection is
established to add itself to the client list
"""
self._clientListMutex.acquire()
self._clientList.append(client)
self._clientListMutex.release()
def _clientUnregister(self, client):
""" Gets called by each request/connection handler when connection is
lost to remove itself from the client list
"""
self._clientListMutex.acquire()
self._clientList.remove(client)
self._clientListMutex.release()
def broadcastToClients(self, oscData):
""" Send OSC message or bundle to all connected clients. """
result = True
for client in self._clientList:
result = result and client.sendOSC(oscData)
return result
class OSCStreamingServerThreading(ThreadingMixIn, OSCStreamingServer):
pass
""" Implements a server which spawns a separate thread for each incoming
connection. Care must be taken since the OSC address space is for all
the same.
"""
class OSCStreamingClient(OSCAddressSpace):
""" OSC streaming client.
A streaming client establishes a connection to a streaming server but must
be able to handle replies by the server as well. To accomplish this the
receiving takes place in a secondary thread, because no one knows if we
have to expect a reply or not, i.e. synchronous architecture doesn't make
much sense.
Replies will be matched against the local address space. If message
handlers access code of the main thread (where the client messages are sent
to the server) care must be taken e.g. by installing sychronization
mechanisms or by using an event dispatcher which can handle events
originating from other threads.
"""
# set outgoing socket buffer size
sndbuf_size = 4096 * 8
rcvbuf_size = 4096 * 8
def __init__(self):
self._txMutex = threading.Lock()
OSCAddressSpace.__init__(self)
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, self.sndbuf_size)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, self.rcvbuf_size)
self.socket.settimeout(1.0)
self._running = False
def _receiveWithTimeout(self, count):
chunk = str()
while len(chunk) < count:
try:
tmp = self.socket.recv(count - len(chunk))
except socket.timeout:
if not self._running:
print "CLIENT: Socket timed out and termination requested."
return None
else:
continue
except socket.error, e:
if e[0] == errno.ECONNRESET:
print "CLIENT: Connection reset by peer."
return None
else:
raise e
if not tmp or len(tmp) == 0:
print "CLIENT: Socket has been closed."
return None
chunk = chunk + tmp
return chunk
def _receiveMsgWithTimeout(self):
""" Receive OSC message from a socket and decode.
If an error occurs, None is returned, else the message.
"""
# get OSC packet size from stream which is prepended each transmission
chunk = self._receiveWithTimeout(4)
if not chunk:
return None
# extract message length from big endian unsigned long (32 bit)
slen = struct.unpack(">L", chunk)[0]
# receive the actual message
chunk = self._receiveWithTimeout(slen)
if not chunk:
return None
# decode OSC content
msg = decodeOSC(chunk)
if msg == None:
raise OSCError("CLIENT: Message decoding failed.")
return msg
def _receiving_thread_entry(self):
print "CLIENT: Entered receiving thread."
self._running = True
while self._running:
decoded = self._receiveMsgWithTimeout()
if not decoded:
break
elif len(decoded) <= 0:
continue
self.replies = []
self._unbundle(decoded)
if len(self.replies) > 1:
msg = OSCBundle()
for reply in self.replies:
msg.append(reply)
elif len(self.replies) == 1:
msg = self.replies[0]
else:
continue
self._txMutex.acquire()
txOk = self._transmitMsgWithTimeout(msg)
self._txMutex.release()
if not txOk:
break
print "CLIENT: Receiving thread terminated."
def _unbundle(self, decoded):
if decoded[0] != "#bundle":
self.replies += self.dispatchMessage(decoded[0], decoded[1][1:], decoded[2:], self.socket.getpeername())
return
now = time.time()
timetag = decoded[1]
if (timetag > 0.) and (timetag > now):
time.sleep(timetag - now)
for msg in decoded[2:]:
self._unbundle(msg)
def connect(self, address):
self.socket.connect(address)
self.receiving_thread = threading.Thread(target=self._receiving_thread_entry)
self.receiving_thread.start()
def close(self):
# let socket time out
self._running = False
self.receiving_thread.join()
self.socket.close()
def _transmitWithTimeout(self, data):
sent = 0
while sent < len(data):
try:
tmp = self.socket.send(data[sent:])
except socket.timeout:
if not self._running:
print "CLIENT: Socket timed out and termination requested."
return False
else:
continue
except socket.error, e:
if e[0] == errno.ECONNRESET:
print "CLIENT: Connection reset by peer."
return False
else:
raise e
if tmp == 0:
return False
sent += tmp
return True
def _transmitMsgWithTimeout(self, msg):
if not isinstance(msg, OSCMessage):
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
binary = msg.getBinary()
length = len(binary)
# prepend length of packet before the actual message (big endian)
len_big_endian = array.array('c', '\0' * 4)
struct.pack_into(">L", len_big_endian, 0, length)
len_big_endian = len_big_endian.tostring()
if self._transmitWithTimeout(len_big_endian) and self._transmitWithTimeout(binary):
return True
else:
return False
def sendOSC(self, msg):
"""Send an OSC message or bundle to the server. Returns True on success.
"""
self._txMutex.acquire()
txOk = self._transmitMsgWithTimeout(msg)
self._txMutex.release()
return txOk
def __str__(self):
"""Returns a string containing this Client's Class-name, software-version
and the remote-address it is connected to (if any)
"""
out = self.__class__.__name__
out += " v%s.%s-%s" % version
addr = self.socket.getpeername()
if addr:
out += " connected to osc://%s" % getUrlStr(addr)
else:
out += " (unconnected)"
return out
def __eq__(self, other):
"""Compare function.
"""
if not isinstance(other, self.__class__):
return False
isequal = cmp(self.socket._sock, other.socket._sock)
if isequal and self.server and other.server:
return cmp(self.server, other.server)
return isequal
def __ne__(self, other):
"""Compare function.
"""
return not self.__eq__(other)
# vim:noexpandtab
|
eyeseast/python-frontmatter | refs/heads/master | setup.py | 1 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
with open("README.md") as f:
readme = f.read()
VERSION = "1.0.0"
setup(
name="python-frontmatter",
version=VERSION,
description="Parse and manage posts with YAML (or other) frontmatter",
long_description=readme,
long_description_content_type="text/markdown",
author="Chris Amico",
author_email="eyeseast@gmail.com",
url="https://github.com/eyeseast/python-frontmatter",
packages=["frontmatter"],
include_package_data=True,
install_requires=["PyYAML"],
extras_require={"test": ["pytest", "toml", "pyaml"], "docs": ["sphinx"]},
tests_require=["python-frontmatter[test]"],
license="MIT",
zip_safe=False,
keywords="frontmatter",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
test_suite="test",
)
|
orangeduck/PyAutoC | refs/heads/master | Python27/Lib/test/test_class.py | 17 | "Test the functionality of Python classes implementing operators."
import unittest
from test import test_support
testmeths = [
# Binary operations
"add",
"radd",
"sub",
"rsub",
"mul",
"rmul",
"div",
"rdiv",
"mod",
"rmod",
"divmod",
"rdivmod",
"pow",
"rpow",
"rshift",
"rrshift",
"lshift",
"rlshift",
"and",
"rand",
"or",
"ror",
"xor",
"rxor",
# List/dict operations
"contains",
"getitem",
"getslice",
"setitem",
"setslice",
"delitem",
"delslice",
# Unary operations
"neg",
"pos",
"abs",
# generic operations
"init",
]
# These need to return something other than None
# "coerce",
# "hash",
# "str",
# "repr",
# "int",
# "long",
# "float",
# "oct",
# "hex",
# These are separate because they can influence the test of other methods.
# "getattr",
# "setattr",
# "delattr",
callLst = []
def trackCall(f):
def track(*args, **kwargs):
callLst.append((f.__name__, args))
return f(*args, **kwargs)
return track
class AllTests:
trackCall = trackCall
@trackCall
def __coerce__(self, *args):
return (self,) + args
@trackCall
def __hash__(self, *args):
return hash(id(self))
@trackCall
def __str__(self, *args):
return "AllTests"
@trackCall
def __repr__(self, *args):
return "AllTests"
@trackCall
def __int__(self, *args):
return 1
@trackCall
def __float__(self, *args):
return 1.0
@trackCall
def __long__(self, *args):
return 1L
@trackCall
def __oct__(self, *args):
return '01'
@trackCall
def __hex__(self, *args):
return '0x1'
@trackCall
def __cmp__(self, *args):
return 0
# Synthesize all the other AllTests methods from the names in testmeths.
method_template = """\
@trackCall
def __%(method)s__(self, *args):
pass
"""
for method in testmeths:
exec method_template % locals() in AllTests.__dict__
del method, method_template
class ClassTests(unittest.TestCase):
def setUp(self):
callLst[:] = []
def assertCallStack(self, expected_calls):
actualCallList = callLst[:] # need to copy because the comparison below will add
# additional calls to callLst
if expected_calls != actualCallList:
self.fail("Expected call list:\n %s\ndoes not match actual call list\n %s" %
(expected_calls, actualCallList))
def testInit(self):
foo = AllTests()
self.assertCallStack([("__init__", (foo,))])
def testBinaryOps(self):
testme = AllTests()
# Binary operations
callLst[:] = []
testme + 1
self.assertCallStack([("__coerce__", (testme, 1)), ("__add__", (testme, 1))])
callLst[:] = []
1 + testme
self.assertCallStack([("__coerce__", (testme, 1)), ("__radd__", (testme, 1))])
callLst[:] = []
testme - 1
self.assertCallStack([("__coerce__", (testme, 1)), ("__sub__", (testme, 1))])
callLst[:] = []
1 - testme
self.assertCallStack([("__coerce__", (testme, 1)), ("__rsub__", (testme, 1))])
callLst[:] = []
testme * 1
self.assertCallStack([("__coerce__", (testme, 1)), ("__mul__", (testme, 1))])
callLst[:] = []
1 * testme
self.assertCallStack([("__coerce__", (testme, 1)), ("__rmul__", (testme, 1))])
if 1/2 == 0:
callLst[:] = []
testme / 1
self.assertCallStack([("__coerce__", (testme, 1)), ("__div__", (testme, 1))])
callLst[:] = []
1 / testme
self.assertCallStack([("__coerce__", (testme, 1)), ("__rdiv__", (testme, 1))])
callLst[:] = []
testme % 1
self.assertCallStack([("__coerce__", (testme, 1)), ("__mod__", (testme, 1))])
callLst[:] = []
1 % testme
self.assertCallStack([("__coerce__", (testme, 1)), ("__rmod__", (testme, 1))])
callLst[:] = []
divmod(testme,1)
self.assertCallStack([("__coerce__", (testme, 1)), ("__divmod__", (testme, 1))])
callLst[:] = []
divmod(1, testme)
self.assertCallStack([("__coerce__", (testme, 1)), ("__rdivmod__", (testme, 1))])
callLst[:] = []
testme ** 1
self.assertCallStack([("__coerce__", (testme, 1)), ("__pow__", (testme, 1))])
callLst[:] = []
1 ** testme
self.assertCallStack([("__coerce__", (testme, 1)), ("__rpow__", (testme, 1))])
callLst[:] = []
testme >> 1
self.assertCallStack([("__coerce__", (testme, 1)), ("__rshift__", (testme, 1))])
callLst[:] = []
1 >> testme
self.assertCallStack([("__coerce__", (testme, 1)), ("__rrshift__", (testme, 1))])
callLst[:] = []
testme << 1
self.assertCallStack([("__coerce__", (testme, 1)), ("__lshift__", (testme, 1))])
callLst[:] = []
1 << testme
self.assertCallStack([("__coerce__", (testme, 1)), ("__rlshift__", (testme, 1))])
callLst[:] = []
testme & 1
self.assertCallStack([("__coerce__", (testme, 1)), ("__and__", (testme, 1))])
callLst[:] = []
1 & testme
self.assertCallStack([("__coerce__", (testme, 1)), ("__rand__", (testme, 1))])
callLst[:] = []
testme | 1
self.assertCallStack([("__coerce__", (testme, 1)), ("__or__", (testme, 1))])
callLst[:] = []
1 | testme
self.assertCallStack([("__coerce__", (testme, 1)), ("__ror__", (testme, 1))])
callLst[:] = []
testme ^ 1
self.assertCallStack([("__coerce__", (testme, 1)), ("__xor__", (testme, 1))])
callLst[:] = []
1 ^ testme
self.assertCallStack([("__coerce__", (testme, 1)), ("__rxor__", (testme, 1))])
def testListAndDictOps(self):
testme = AllTests()
# List/dict operations
class Empty: pass
try:
1 in Empty()
self.fail('failed, should have raised TypeError')
except TypeError:
pass
callLst[:] = []
1 in testme
self.assertCallStack([('__contains__', (testme, 1))])
callLst[:] = []
testme[1]
self.assertCallStack([('__getitem__', (testme, 1))])
callLst[:] = []
testme[1] = 1
self.assertCallStack([('__setitem__', (testme, 1, 1))])
callLst[:] = []
del testme[1]
self.assertCallStack([('__delitem__', (testme, 1))])
callLst[:] = []
testme[:42]
self.assertCallStack([('__getslice__', (testme, 0, 42))])
callLst[:] = []
testme[:42] = "The Answer"
self.assertCallStack([('__setslice__', (testme, 0, 42, "The Answer"))])
callLst[:] = []
del testme[:42]
self.assertCallStack([('__delslice__', (testme, 0, 42))])
callLst[:] = []
testme[2:1024:10]
self.assertCallStack([('__getitem__', (testme, slice(2, 1024, 10)))])
callLst[:] = []
testme[2:1024:10] = "A lot"
self.assertCallStack([('__setitem__', (testme, slice(2, 1024, 10),
"A lot"))])
callLst[:] = []
del testme[2:1024:10]
self.assertCallStack([('__delitem__', (testme, slice(2, 1024, 10)))])
callLst[:] = []
testme[:42, ..., :24:, 24, 100]
self.assertCallStack([('__getitem__', (testme, (slice(None, 42, None),
Ellipsis,
slice(None, 24, None),
24, 100)))])
callLst[:] = []
testme[:42, ..., :24:, 24, 100] = "Strange"
self.assertCallStack([('__setitem__', (testme, (slice(None, 42, None),
Ellipsis,
slice(None, 24, None),
24, 100), "Strange"))])
callLst[:] = []
del testme[:42, ..., :24:, 24, 100]
self.assertCallStack([('__delitem__', (testme, (slice(None, 42, None),
Ellipsis,
slice(None, 24, None),
24, 100)))])
# Now remove the slice hooks to see if converting normal slices to
# slice object works.
getslice = AllTests.__getslice__
del AllTests.__getslice__
setslice = AllTests.__setslice__
del AllTests.__setslice__
delslice = AllTests.__delslice__
del AllTests.__delslice__
# XXX when using new-style classes the slice testme[:42] produces
# slice(None, 42, None) instead of slice(0, 42, None). py3k will have
# to change this test.
callLst[:] = []
testme[:42]
self.assertCallStack([('__getitem__', (testme, slice(0, 42, None)))])
callLst[:] = []
testme[:42] = "The Answer"
self.assertCallStack([('__setitem__', (testme, slice(0, 42, None),
"The Answer"))])
callLst[:] = []
del testme[:42]
self.assertCallStack([('__delitem__', (testme, slice(0, 42, None)))])
# Restore the slice methods, or the tests will fail with regrtest -R.
AllTests.__getslice__ = getslice
AllTests.__setslice__ = setslice
AllTests.__delslice__ = delslice
def testUnaryOps(self):
testme = AllTests()
callLst[:] = []
-testme
self.assertCallStack([('__neg__', (testme,))])
callLst[:] = []
+testme
self.assertCallStack([('__pos__', (testme,))])
callLst[:] = []
abs(testme)
self.assertCallStack([('__abs__', (testme,))])
callLst[:] = []
int(testme)
self.assertCallStack([('__int__', (testme,))])
callLst[:] = []
long(testme)
self.assertCallStack([('__long__', (testme,))])
callLst[:] = []
float(testme)
self.assertCallStack([('__float__', (testme,))])
callLst[:] = []
oct(testme)
self.assertCallStack([('__oct__', (testme,))])
callLst[:] = []
hex(testme)
self.assertCallStack([('__hex__', (testme,))])
def testMisc(self):
testme = AllTests()
callLst[:] = []
hash(testme)
self.assertCallStack([('__hash__', (testme,))])
callLst[:] = []
repr(testme)
self.assertCallStack([('__repr__', (testme,))])
callLst[:] = []
str(testme)
self.assertCallStack([('__str__', (testme,))])
callLst[:] = []
testme == 1
self.assertCallStack([("__coerce__", (testme, 1)), ('__cmp__', (testme, 1))])
callLst[:] = []
testme < 1
self.assertCallStack([("__coerce__", (testme, 1)), ('__cmp__', (testme, 1))])
callLst[:] = []
testme > 1
self.assertCallStack([("__coerce__", (testme, 1)), ('__cmp__', (testme, 1))])
callLst[:] = []
eval('testme <> 1') # XXX kill this in py3k
self.assertCallStack([("__coerce__", (testme, 1)), ('__cmp__', (testme, 1))])
callLst[:] = []
testme != 1
self.assertCallStack([("__coerce__", (testme, 1)), ('__cmp__', (testme, 1))])
callLst[:] = []
1 == testme
self.assertCallStack([("__coerce__", (testme, 1)), ('__cmp__', (1, testme))])
callLst[:] = []
1 < testme
self.assertCallStack([("__coerce__", (testme, 1)), ('__cmp__', (1, testme))])
callLst[:] = []
1 > testme
self.assertCallStack([("__coerce__", (testme, 1)), ('__cmp__', (1, testme))])
callLst[:] = []
eval('1 <> testme')
self.assertCallStack([("__coerce__", (testme, 1)), ('__cmp__', (1, testme))])
callLst[:] = []
1 != testme
self.assertCallStack([("__coerce__", (testme, 1)), ('__cmp__', (1, testme))])
def testGetSetAndDel(self):
# Interfering tests
class ExtraTests(AllTests):
@trackCall
def __getattr__(self, *args):
return "SomeVal"
@trackCall
def __setattr__(self, *args):
pass
@trackCall
def __delattr__(self, *args):
pass
testme = ExtraTests()
callLst[:] = []
testme.spam
self.assertCallStack([('__getattr__', (testme, "spam"))])
callLst[:] = []
testme.eggs = "spam, spam, spam and ham"
self.assertCallStack([('__setattr__', (testme, "eggs",
"spam, spam, spam and ham"))])
callLst[:] = []
del testme.cardinal
self.assertCallStack([('__delattr__', (testme, "cardinal"))])
def testDel(self):
x = []
class DelTest:
def __del__(self):
x.append("crab people, crab people")
testme = DelTest()
del testme
import gc
gc.collect()
self.assertEqual(["crab people, crab people"], x)
def testBadTypeReturned(self):
# return values of some method are type-checked
class BadTypeClass:
def __int__(self):
return None
__float__ = __int__
__long__ = __int__
__str__ = __int__
__repr__ = __int__
__oct__ = __int__
__hex__ = __int__
for f in [int, float, long, str, repr, oct, hex]:
self.assertRaises(TypeError, f, BadTypeClass())
def testMixIntsAndLongs(self):
# mixing up ints and longs is okay
class IntLongMixClass:
@trackCall
def __int__(self):
return 42L
@trackCall
def __long__(self):
return 64
mixIntAndLong = IntLongMixClass()
callLst[:] = []
as_int = int(mixIntAndLong)
self.assertEqual(type(as_int), long)
self.assertEqual(as_int, 42L)
self.assertCallStack([('__int__', (mixIntAndLong,))])
callLst[:] = []
as_long = long(mixIntAndLong)
self.assertEqual(type(as_long), long)
self.assertEqual(as_long, 64)
self.assertCallStack([('__long__', (mixIntAndLong,))])
def testHashStuff(self):
# Test correct errors from hash() on objects with comparisons but
# no __hash__
class C0:
pass
hash(C0()) # This should work; the next two should raise TypeError
class C1:
def __cmp__(self, other): return 0
self.assertRaises(TypeError, hash, C1())
class C2:
def __eq__(self, other): return 1
self.assertRaises(TypeError, hash, C2())
def testSFBug532646(self):
# Test for SF bug 532646
class A:
pass
A.__call__ = A()
a = A()
try:
a() # This should not segfault
except RuntimeError:
pass
else:
self.fail("Failed to raise RuntimeError")
def testForExceptionsRaisedInInstanceGetattr2(self):
# Tests for exceptions raised in instance_getattr2().
def booh(self):
raise AttributeError("booh")
class A:
a = property(booh)
try:
A().a # Raised AttributeError: A instance has no attribute 'a'
except AttributeError, x:
if str(x) != "booh":
self.fail("attribute error for A().a got masked: %s" % x)
class E:
__eq__ = property(booh)
E() == E() # In debug mode, caused a C-level assert() to fail
class I:
__init__ = property(booh)
try:
# In debug mode, printed XXX undetected error and
# raises AttributeError
I()
except AttributeError, x:
pass
else:
self.fail("attribute error for I.__init__ got masked")
def testHashComparisonOfMethods(self):
# Test comparison and hash of methods
class A:
def __init__(self, x):
self.x = x
def f(self):
pass
def g(self):
pass
def __eq__(self, other):
return self.x == other.x
def __hash__(self):
return self.x
class B(A):
pass
a1 = A(1)
a2 = A(2)
self.assertEqual(a1.f, a1.f)
self.assertNotEqual(a1.f, a2.f)
self.assertNotEqual(a1.f, a1.g)
self.assertEqual(a1.f, A(1).f)
self.assertEqual(hash(a1.f), hash(a1.f))
self.assertEqual(hash(a1.f), hash(A(1).f))
self.assertNotEqual(A.f, a1.f)
self.assertNotEqual(A.f, A.g)
self.assertEqual(B.f, A.f)
self.assertEqual(hash(B.f), hash(A.f))
# the following triggers a SystemError in 2.4
a = A(hash(A.f.im_func)^(-1))
hash(a.f)
def test_main():
with test_support.check_py3k_warnings(
(".+__(get|set|del)slice__ has been removed", DeprecationWarning),
("classic int division", DeprecationWarning),
("<> not supported", DeprecationWarning)):
test_support.run_unittest(ClassTests)
if __name__=='__main__':
test_main()
|
meta-it/misc-addons | refs/heads/10.0 | hr_public_holidays_ics_import/__openerp__.py | 1 | # -*- coding: utf-8 -*-
{
"name": """Import Holiday ICS""",
"summary": """
No need for import holidays manually anymore""",
"category": "Human Resources",
"images": ['images/ics_import.png'],
"version": "1.0.0",
"author": "IT-Projects LLC, Ildar Nasyrov",
"website": "https://it-projects.info",
"license": "AGPL-3",
"price": 9.00,
"currency": "EUR",
"depends": [
"hr_public_holidays",
],
"external_dependencies": {"python": ['icalendar'], "bin": []},
"data": [
"wizard/import_ics.xml",
"views/hr_public_holidays_view.xml",
],
"qweb": [
],
"demo": [
],
"post_load": None,
"pre_init_hook": None,
"post_init_hook": None,
"installable": False,
"auto_install": False,
}
|
michalliu/OpenWrt-Firefly-Libraries | refs/heads/master | staging_dir/target-mipsel_1004kc+dsp_uClibc-0.9.33.2/usr/lib/python3.4/unittest/test/test_program.py | 71 | import io
import os
import sys
from test import support
import unittest
import unittest.test
class Test_TestProgram(unittest.TestCase):
def test_discovery_from_dotted_path(self):
loader = unittest.TestLoader()
tests = [self]
expectedPath = os.path.abspath(os.path.dirname(unittest.test.__file__))
self.wasRun = False
def _find_tests(start_dir, pattern):
self.wasRun = True
self.assertEqual(start_dir, expectedPath)
return tests
loader._find_tests = _find_tests
suite = loader.discover('unittest.test')
self.assertTrue(self.wasRun)
self.assertEqual(suite._tests, tests)
# Horrible white box test
def testNoExit(self):
result = object()
test = object()
class FakeRunner(object):
def run(self, test):
self.test = test
return result
runner = FakeRunner()
oldParseArgs = unittest.TestProgram.parseArgs
def restoreParseArgs():
unittest.TestProgram.parseArgs = oldParseArgs
unittest.TestProgram.parseArgs = lambda *args: None
self.addCleanup(restoreParseArgs)
def removeTest():
del unittest.TestProgram.test
unittest.TestProgram.test = test
self.addCleanup(removeTest)
program = unittest.TestProgram(testRunner=runner, exit=False, verbosity=2)
self.assertEqual(program.result, result)
self.assertEqual(runner.test, test)
self.assertEqual(program.verbosity, 2)
class FooBar(unittest.TestCase):
def testPass(self):
assert True
def testFail(self):
assert False
class FooBarLoader(unittest.TestLoader):
"""Test loader that returns a suite containing FooBar."""
def loadTestsFromModule(self, module):
return self.suiteClass(
[self.loadTestsFromTestCase(Test_TestProgram.FooBar)])
def loadTestsFromNames(self, names, module):
return self.suiteClass(
[self.loadTestsFromTestCase(Test_TestProgram.FooBar)])
def test_defaultTest_with_string(self):
class FakeRunner(object):
def run(self, test):
self.test = test
return True
old_argv = sys.argv
sys.argv = ['faketest']
runner = FakeRunner()
program = unittest.TestProgram(testRunner=runner, exit=False,
defaultTest='unittest.test',
testLoader=self.FooBarLoader())
sys.argv = old_argv
self.assertEqual(('unittest.test',), program.testNames)
def test_defaultTest_with_iterable(self):
class FakeRunner(object):
def run(self, test):
self.test = test
return True
old_argv = sys.argv
sys.argv = ['faketest']
runner = FakeRunner()
program = unittest.TestProgram(
testRunner=runner, exit=False,
defaultTest=['unittest.test', 'unittest.test2'],
testLoader=self.FooBarLoader())
sys.argv = old_argv
self.assertEqual(['unittest.test', 'unittest.test2'],
program.testNames)
def test_NonExit(self):
program = unittest.main(exit=False,
argv=["foobar"],
testRunner=unittest.TextTestRunner(stream=io.StringIO()),
testLoader=self.FooBarLoader())
self.assertTrue(hasattr(program, 'result'))
def test_Exit(self):
self.assertRaises(
SystemExit,
unittest.main,
argv=["foobar"],
testRunner=unittest.TextTestRunner(stream=io.StringIO()),
exit=True,
testLoader=self.FooBarLoader())
def test_ExitAsDefault(self):
self.assertRaises(
SystemExit,
unittest.main,
argv=["foobar"],
testRunner=unittest.TextTestRunner(stream=io.StringIO()),
testLoader=self.FooBarLoader())
class InitialisableProgram(unittest.TestProgram):
exit = False
result = None
verbosity = 1
defaultTest = None
testRunner = None
testLoader = unittest.defaultTestLoader
module = '__main__'
progName = 'test'
test = 'test'
def __init__(self, *args):
pass
RESULT = object()
class FakeRunner(object):
initArgs = None
test = None
raiseError = False
def __init__(self, **kwargs):
FakeRunner.initArgs = kwargs
if FakeRunner.raiseError:
FakeRunner.raiseError = False
raise TypeError
def run(self, test):
FakeRunner.test = test
return RESULT
class TestCommandLineArgs(unittest.TestCase):
def setUp(self):
self.program = InitialisableProgram()
self.program.createTests = lambda: None
FakeRunner.initArgs = None
FakeRunner.test = None
FakeRunner.raiseError = False
def testVerbosity(self):
program = self.program
for opt in '-q', '--quiet':
program.verbosity = 1
program.parseArgs([None, opt])
self.assertEqual(program.verbosity, 0)
for opt in '-v', '--verbose':
program.verbosity = 1
program.parseArgs([None, opt])
self.assertEqual(program.verbosity, 2)
def testBufferCatchFailfast(self):
program = self.program
for arg, attr in (('buffer', 'buffer'), ('failfast', 'failfast'),
('catch', 'catchbreak')):
if attr == 'catch' and not hasInstallHandler:
continue
setattr(program, attr, None)
program.parseArgs([None])
self.assertIs(getattr(program, attr), False)
false = []
setattr(program, attr, false)
program.parseArgs([None])
self.assertIs(getattr(program, attr), false)
true = [42]
setattr(program, attr, true)
program.parseArgs([None])
self.assertIs(getattr(program, attr), true)
short_opt = '-%s' % arg[0]
long_opt = '--%s' % arg
for opt in short_opt, long_opt:
setattr(program, attr, None)
program.parseArgs([None, opt])
self.assertIs(getattr(program, attr), True)
setattr(program, attr, False)
with support.captured_stderr() as stderr, \
self.assertRaises(SystemExit) as cm:
program.parseArgs([None, opt])
self.assertEqual(cm.exception.args, (2,))
setattr(program, attr, True)
with support.captured_stderr() as stderr, \
self.assertRaises(SystemExit) as cm:
program.parseArgs([None, opt])
self.assertEqual(cm.exception.args, (2,))
def testWarning(self):
"""Test the warnings argument"""
# see #10535
class FakeTP(unittest.TestProgram):
def parseArgs(self, *args, **kw): pass
def runTests(self, *args, **kw): pass
warnoptions = sys.warnoptions[:]
try:
sys.warnoptions[:] = []
# no warn options, no arg -> default
self.assertEqual(FakeTP().warnings, 'default')
# no warn options, w/ arg -> arg value
self.assertEqual(FakeTP(warnings='ignore').warnings, 'ignore')
sys.warnoptions[:] = ['somevalue']
# warn options, no arg -> None
# warn options, w/ arg -> arg value
self.assertEqual(FakeTP().warnings, None)
self.assertEqual(FakeTP(warnings='ignore').warnings, 'ignore')
finally:
sys.warnoptions[:] = warnoptions
def testRunTestsRunnerClass(self):
program = self.program
program.testRunner = FakeRunner
program.verbosity = 'verbosity'
program.failfast = 'failfast'
program.buffer = 'buffer'
program.warnings = 'warnings'
program.runTests()
self.assertEqual(FakeRunner.initArgs, {'verbosity': 'verbosity',
'failfast': 'failfast',
'buffer': 'buffer',
'warnings': 'warnings'})
self.assertEqual(FakeRunner.test, 'test')
self.assertIs(program.result, RESULT)
def testRunTestsRunnerInstance(self):
program = self.program
program.testRunner = FakeRunner()
FakeRunner.initArgs = None
program.runTests()
# A new FakeRunner should not have been instantiated
self.assertIsNone(FakeRunner.initArgs)
self.assertEqual(FakeRunner.test, 'test')
self.assertIs(program.result, RESULT)
def testRunTestsOldRunnerClass(self):
program = self.program
FakeRunner.raiseError = True
program.testRunner = FakeRunner
program.verbosity = 'verbosity'
program.failfast = 'failfast'
program.buffer = 'buffer'
program.test = 'test'
program.runTests()
# If initialising raises a type error it should be retried
# without the new keyword arguments
self.assertEqual(FakeRunner.initArgs, {})
self.assertEqual(FakeRunner.test, 'test')
self.assertIs(program.result, RESULT)
def testCatchBreakInstallsHandler(self):
module = sys.modules['unittest.main']
original = module.installHandler
def restore():
module.installHandler = original
self.addCleanup(restore)
self.installed = False
def fakeInstallHandler():
self.installed = True
module.installHandler = fakeInstallHandler
program = self.program
program.catchbreak = True
program.testRunner = FakeRunner
program.runTests()
self.assertTrue(self.installed)
def _patch_isfile(self, names, exists=True):
def isfile(path):
return path in names
original = os.path.isfile
os.path.isfile = isfile
def restore():
os.path.isfile = original
self.addCleanup(restore)
def testParseArgsFileNames(self):
# running tests with filenames instead of module names
program = self.program
argv = ['progname', 'foo.py', 'bar.Py', 'baz.PY', 'wing.txt']
self._patch_isfile(argv)
program.createTests = lambda: None
program.parseArgs(argv)
# note that 'wing.txt' is not a Python file so the name should
# *not* be converted to a module name
expected = ['foo', 'bar', 'baz', 'wing.txt']
self.assertEqual(program.testNames, expected)
def testParseArgsFilePaths(self):
program = self.program
argv = ['progname', 'foo/bar/baz.py', 'green\\red.py']
self._patch_isfile(argv)
program.createTests = lambda: None
program.parseArgs(argv)
expected = ['foo.bar.baz', 'green.red']
self.assertEqual(program.testNames, expected)
def testParseArgsNonExistentFiles(self):
program = self.program
argv = ['progname', 'foo/bar/baz.py', 'green\\red.py']
self._patch_isfile([])
program.createTests = lambda: None
program.parseArgs(argv)
self.assertEqual(program.testNames, argv[1:])
def testParseArgsAbsolutePathsThatCanBeConverted(self):
cur_dir = os.getcwd()
program = self.program
def _join(name):
return os.path.join(cur_dir, name)
argv = ['progname', _join('foo/bar/baz.py'), _join('green\\red.py')]
self._patch_isfile(argv)
program.createTests = lambda: None
program.parseArgs(argv)
expected = ['foo.bar.baz', 'green.red']
self.assertEqual(program.testNames, expected)
def testParseArgsAbsolutePathsThatCannotBeConverted(self):
program = self.program
# even on Windows '/...' is considered absolute by os.path.abspath
argv = ['progname', '/foo/bar/baz.py', '/green/red.py']
self._patch_isfile(argv)
program.createTests = lambda: None
program.parseArgs(argv)
self.assertEqual(program.testNames, argv[1:])
# it may be better to use platform specific functions to normalise paths
# rather than accepting '.PY' and '\' as file separator on Linux / Mac
# it would also be better to check that a filename is a valid module
# identifier (we have a regex for this in loader.py)
# for invalid filenames should we raise a useful error rather than
# leaving the current error message (import of filename fails) in place?
if __name__ == '__main__':
unittest.main()
|
ESS-LLP/erpnext-medical | refs/heads/develop | erpnext/crm/doctype/lead/lead_dashboard.py | 37 | from frappe import _
def get_data():
return {
'fieldname': 'lead',
'transactions': [
{
'items': ['Opportunity', 'Quotation']
},
]
} |
cemmanouilidis/go.platform | refs/heads/master | test/test.py | 1 | #!/usr/bin/env python3
import unittest
import subprocess
class TestLinuxDistribution(unittest.TestCase):
def testUbuntu1404(self):
_, output = subprocess.getstatusoutput(
"docker run --rm -it -v `pwd`:/pkg ubuntu:14.04 /pkg/platform"
)
self.assertEqual("ubuntu 14.04 trusty", output)
def testUbuntu1204(self):
_, output = subprocess.getstatusoutput(
"docker run --rm -it -v `pwd`:/pkg ubuntu:12.04 /pkg/platform"
)
self.assertEqual("ubuntu 12.04 precise", output)
def testDebianJessie(self):
_, output = subprocess.getstatusoutput(
"docker run --rm -it -v `pwd`:/pkg debian:jessie /pkg/platform"
)
self.assertEqual("debian 8.2 ", output)
def testFedora20(self):
_, output = subprocess.getstatusoutput(
"docker run --rm -it -v `pwd`:/pkg fedora:20 /pkg/platform"
)
self.assertEqual("fedora 20 Heisenbug", output)
def testCentos72511(self):
_, output = subprocess.getstatusoutput(
"docker run --rm -it -v `pwd`:/pkg centos:7.2.1511 /pkg/platform"
)
self.assertEqual("centos 7 Core", output)
if __name__ == "__main__":
unittest.main(verbosity=2)
|
leppa/home-assistant | refs/heads/dev | homeassistant/components/wirelesstag/switch.py | 6 | """Switch implementation for Wireless Sensor Tags (wirelesstag.net)."""
import logging
import voluptuous as vol
from homeassistant.components.switch import PLATFORM_SCHEMA, SwitchDevice
from homeassistant.const import CONF_MONITORED_CONDITIONS
import homeassistant.helpers.config_validation as cv
from . import DOMAIN as WIRELESSTAG_DOMAIN, WirelessTagBaseSensor
_LOGGER = logging.getLogger(__name__)
ARM_TEMPERATURE = "temperature"
ARM_HUMIDITY = "humidity"
ARM_MOTION = "motion"
ARM_LIGHT = "light"
ARM_MOISTURE = "moisture"
# Switch types: Name, tag sensor type
SWITCH_TYPES = {
ARM_TEMPERATURE: ["Arm Temperature", "temperature"],
ARM_HUMIDITY: ["Arm Humidity", "humidity"],
ARM_MOTION: ["Arm Motion", "motion"],
ARM_LIGHT: ["Arm Light", "light"],
ARM_MOISTURE: ["Arm Moisture", "moisture"],
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_MONITORED_CONDITIONS, default=[]): vol.All(
cv.ensure_list, [vol.In(SWITCH_TYPES)]
)
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up switches for a Wireless Sensor Tags."""
platform = hass.data.get(WIRELESSTAG_DOMAIN)
switches = []
tags = platform.load_tags()
for switch_type in config.get(CONF_MONITORED_CONDITIONS):
for _, tag in tags.items():
if switch_type in tag.allowed_monitoring_types:
switches.append(WirelessTagSwitch(platform, tag, switch_type))
add_entities(switches, True)
class WirelessTagSwitch(WirelessTagBaseSensor, SwitchDevice):
"""A switch implementation for Wireless Sensor Tags."""
def __init__(self, api, tag, switch_type):
"""Initialize a switch for Wireless Sensor Tag."""
super().__init__(api, tag)
self._switch_type = switch_type
self.sensor_type = SWITCH_TYPES[self._switch_type][1]
self._name = "{} {}".format(self._tag.name, SWITCH_TYPES[self._switch_type][0])
def turn_on(self, **kwargs):
"""Turn on the switch."""
self._api.arm(self)
def turn_off(self, **kwargs):
"""Turn on the switch."""
self._api.disarm(self)
@property
def is_on(self) -> bool:
"""Return True if entity is on."""
return self._state
def updated_state_value(self):
"""Provide formatted value."""
return self.principal_value
@property
def principal_value(self):
"""Provide actual value of switch."""
attr_name = f"is_{self.sensor_type}_sensor_armed"
return getattr(self._tag, attr_name, False)
|
drj11/pdftables | refs/heads/dev | test/test_ground.py | 3 | from pdftables.pdf_document import PDFDocument
from pdftables.pdftables import page_to_tables
import lxml.etree
from collections import Counter
from nose.tools import assert_equals
class ResultTable(object):
def __sub__(self, other):
r = ResultTable()
r.cells = self.cells
r.cells.subtract(other.cells)
r.number_of_rows = self.number_of_rows - other.number_of_rows
r.number_of_cols = self.number_of_cols - other.number_of_cols
return r
def __repr__(self):
assert self.cells is not None
response = "<ResultTable: {col}x{row} +{plus} -{minus}>"
return response.format(col=self.number_of_cols,
row=self.number_of_rows,
plus=sum(self.cells[x] for x in self.cells if self.cells[x] >= 1),
minus=abs(sum(self.cells[x] for x in self.cells if self.cells[x] <= -1)))
def pdf_results(filename):
def get_cells(table):
cells = Counter()
for row in table.data:
for cell in row:
cells.update([cell])
return cells
#doc = PDFDocument.from_fileobj(open(filename, "rb"))
doc = PDFDocument.from_path(filename)
for page in doc.get_pages():
table_container = page_to_tables(page)
builder = []
for table in table_container:
r = ResultTable()
r.cells = get_cells(table)
r.number_of_rows = len(table.data)
r.number_of_cols = max(len(row) for row in table.data)
builder.append(r)
return builder
def xml_results(filename):
def max_of_strs(strs):
return max(map(int, strs))
root = lxml.etree.fromstring(open(filename, "rb").read())
builder = []
for table in root.xpath("//table"):
r = ResultTable()
r.cells = Counter(table.xpath("//content/text()"))
cols = table.xpath("//@end-col")
cols.extend(table.xpath("//@start-col"))
rows = table.xpath("//@end-row")
rows.extend(table.xpath("//@start-row"))
r.number_of_cols = max_of_strs(cols) + 1 # starts at zero
r.number_of_rows = max_of_strs(rows) + 1 # starts at zero
builder.append(r)
return builder
def _test_ground(filebase, number):
"""tests whether we successfully parse ground truth data:
see fixtures/eu-dataset"""
pdf_tables = pdf_results(filebase % (number, ".pdf"))
xml_tables = xml_results(filebase % (number, "-str.xml"))
assert_equals(len(pdf_tables), len(xml_tables))
for i in range(0, len(pdf_tables)):
pdf_table = pdf_tables[i]
xml_table = xml_tables[i]
diff = pdf_table - xml_table
clean_diff_list = {x:diff.cells[x] for x in diff.cells if diff.cells[x] != 0}
assert_equals(pdf_table.number_of_cols, xml_table.number_of_cols)
assert_equals(pdf_table.number_of_rows, xml_table.number_of_rows)
assert_equals(clean_diff_list, {})
def test_all_eu():
filebase = "fixtures/eu-dataset/eu-%03d%s"
for i in range(1,35): # 1..34
yield _test_ground, filebase, i
|
TriOptima/tri.table | refs/heads/master | examples/examples/urls.py | 2 | from django.conf.urls import include, url
from django.contrib import admin
from examples import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^readme_example_1/$', views.readme_example_1, name='readme_example_1'),
url(r'^readme_example_2/$', views.readme_example_2, name='readme_example_2'),
url(r'^kitchen_sink/$', views.kitchen_sink, name='kitchen_sink'),
url(r'^style.css$', views.style, name='style'),
]
|
andrewsmedina/django | refs/heads/master | tests/aggregation_regress/tests.py | 5 | from __future__ import absolute_import, unicode_literals
import datetime
import pickle
from decimal import Decimal
from operator import attrgetter
from django.core.exceptions import FieldError
from django.contrib.contenttypes.models import ContentType
from django.db.models import Count, Max, Avg, Sum, StdDev, Variance, F, Q
from django.test import TestCase, Approximate, skipUnlessDBFeature
from django.utils import six
from django.utils.unittest import expectedFailure
from .models import (Author, Book, Publisher, Clues, Entries, HardbackBook,
ItemTag, WithManualPK)
class AggregationTests(TestCase):
fixtures = ["aggregation_regress.json"]
def assertObjectAttrs(self, obj, **kwargs):
for attr, value in six.iteritems(kwargs):
self.assertEqual(getattr(obj, attr), value)
def test_aggregates_in_where_clause(self):
"""
Regression test for #12822: DatabaseError: aggregates not allowed in
WHERE clause
Tests that the subselect works and returns results equivalent to a
query with the IDs listed.
Before the corresponding fix for this bug, this test passed in 1.1 and
failed in 1.2-beta (trunk).
"""
qs = Book.objects.values('contact').annotate(Max('id'))
qs = qs.order_by('contact').values_list('id__max', flat=True)
# don't do anything with the queryset (qs) before including it as a
# subquery
books = Book.objects.order_by('id')
qs1 = books.filter(id__in=qs)
qs2 = books.filter(id__in=list(qs))
self.assertEqual(list(qs1), list(qs2))
def test_aggregates_in_where_clause_pre_eval(self):
"""
Regression test for #12822: DatabaseError: aggregates not allowed in
WHERE clause
Same as the above test, but evaluates the queryset for the subquery
before it's used as a subquery.
Before the corresponding fix for this bug, this test failed in both
1.1 and 1.2-beta (trunk).
"""
qs = Book.objects.values('contact').annotate(Max('id'))
qs = qs.order_by('contact').values_list('id__max', flat=True)
# force the queryset (qs) for the subquery to be evaluated in its
# current state
list(qs)
books = Book.objects.order_by('id')
qs1 = books.filter(id__in=qs)
qs2 = books.filter(id__in=list(qs))
self.assertEqual(list(qs1), list(qs2))
@skipUnlessDBFeature('supports_subqueries_in_group_by')
def test_annotate_with_extra(self):
"""
Regression test for #11916: Extra params + aggregation creates
incorrect SQL.
"""
#oracle doesn't support subqueries in group by clause
shortest_book_sql = """
SELECT name
FROM aggregation_regress_book b
WHERE b.publisher_id = aggregation_regress_publisher.id
ORDER BY b.pages
LIMIT 1
"""
# tests that this query does not raise a DatabaseError due to the full
# subselect being (erroneously) added to the GROUP BY parameters
qs = Publisher.objects.extra(select={
'name_of_shortest_book': shortest_book_sql,
}).annotate(total_books=Count('book'))
# force execution of the query
list(qs)
def test_aggregate(self):
# Ordering requests are ignored
self.assertEqual(
Author.objects.order_by("name").aggregate(Avg("age")),
{"age__avg": Approximate(37.444, places=1)}
)
# Implicit ordering is also ignored
self.assertEqual(
Book.objects.aggregate(Sum("pages")),
{"pages__sum": 3703},
)
# Baseline results
self.assertEqual(
Book.objects.aggregate(Sum('pages'), Avg('pages')),
{'pages__sum': 3703, 'pages__avg': Approximate(617.166, places=2)}
)
# Empty values query doesn't affect grouping or results
self.assertEqual(
Book.objects.values().aggregate(Sum('pages'), Avg('pages')),
{'pages__sum': 3703, 'pages__avg': Approximate(617.166, places=2)}
)
# Aggregate overrides extra selected column
self.assertEqual(
Book.objects.extra(select={'price_per_page' : 'price / pages'}).aggregate(Sum('pages')),
{'pages__sum': 3703}
)
def test_annotation(self):
# Annotations get combined with extra select clauses
obj = Book.objects.annotate(mean_auth_age=Avg("authors__age")).extra(select={"manufacture_cost": "price * .5"}).get(pk=2)
self.assertObjectAttrs(obj,
contact_id=3,
id=2,
isbn='067232959',
mean_auth_age=45.0,
name='Sams Teach Yourself Django in 24 Hours',
pages=528,
price=Decimal("23.09"),
pubdate=datetime.date(2008, 3, 3),
publisher_id=2,
rating=3.0
)
# Different DB backends return different types for the extra select computation
self.assertTrue(obj.manufacture_cost == 11.545 or obj.manufacture_cost == Decimal('11.545'))
# Order of the annotate/extra in the query doesn't matter
obj = Book.objects.extra(select={'manufacture_cost' : 'price * .5'}).annotate(mean_auth_age=Avg('authors__age')).get(pk=2)
self.assertObjectAttrs(obj,
contact_id=3,
id=2,
isbn='067232959',
mean_auth_age=45.0,
name='Sams Teach Yourself Django in 24 Hours',
pages=528,
price=Decimal("23.09"),
pubdate=datetime.date(2008, 3, 3),
publisher_id=2,
rating=3.0
)
# Different DB backends return different types for the extra select computation
self.assertTrue(obj.manufacture_cost == 11.545 or obj.manufacture_cost == Decimal('11.545'))
# Values queries can be combined with annotate and extra
obj = Book.objects.annotate(mean_auth_age=Avg('authors__age')).extra(select={'manufacture_cost' : 'price * .5'}).values().get(pk=2)
manufacture_cost = obj['manufacture_cost']
self.assertTrue(manufacture_cost == 11.545 or manufacture_cost == Decimal('11.545'))
del obj['manufacture_cost']
self.assertEqual(obj, {
"contact_id": 3,
"id": 2,
"isbn": "067232959",
"mean_auth_age": 45.0,
"name": "Sams Teach Yourself Django in 24 Hours",
"pages": 528,
"price": Decimal("23.09"),
"pubdate": datetime.date(2008, 3, 3),
"publisher_id": 2,
"rating": 3.0,
})
# The order of the (empty) values, annotate and extra clauses doesn't
# matter
obj = Book.objects.values().annotate(mean_auth_age=Avg('authors__age')).extra(select={'manufacture_cost' : 'price * .5'}).get(pk=2)
manufacture_cost = obj['manufacture_cost']
self.assertTrue(manufacture_cost == 11.545 or manufacture_cost == Decimal('11.545'))
del obj['manufacture_cost']
self.assertEqual(obj, {
'contact_id': 3,
'id': 2,
'isbn': '067232959',
'mean_auth_age': 45.0,
'name': 'Sams Teach Yourself Django in 24 Hours',
'pages': 528,
'price': Decimal("23.09"),
'pubdate': datetime.date(2008, 3, 3),
'publisher_id': 2,
'rating': 3.0
})
# If the annotation precedes the values clause, it won't be included
# unless it is explicitly named
obj = Book.objects.annotate(mean_auth_age=Avg('authors__age')).extra(select={'price_per_page' : 'price / pages'}).values('name').get(pk=1)
self.assertEqual(obj, {
"name": 'The Definitive Guide to Django: Web Development Done Right',
})
obj = Book.objects.annotate(mean_auth_age=Avg('authors__age')).extra(select={'price_per_page' : 'price / pages'}).values('name','mean_auth_age').get(pk=1)
self.assertEqual(obj, {
'mean_auth_age': 34.5,
'name': 'The Definitive Guide to Django: Web Development Done Right',
})
# If an annotation isn't included in the values, it can still be used
# in a filter
qs = Book.objects.annotate(n_authors=Count('authors')).values('name').filter(n_authors__gt=2)
self.assertQuerysetEqual(
qs, [
{"name": 'Python Web Development with Django'}
],
lambda b: b,
)
# The annotations are added to values output if values() precedes
# annotate()
obj = Book.objects.values('name').annotate(mean_auth_age=Avg('authors__age')).extra(select={'price_per_page' : 'price / pages'}).get(pk=1)
self.assertEqual(obj, {
'mean_auth_age': 34.5,
'name': 'The Definitive Guide to Django: Web Development Done Right',
})
# Check that all of the objects are getting counted (allow_nulls) and
# that values respects the amount of objects
self.assertEqual(
len(Author.objects.annotate(Avg('friends__age')).values()),
9
)
# Check that consecutive calls to annotate accumulate in the query
qs = Book.objects.values('price').annotate(oldest=Max('authors__age')).order_by('oldest', 'price').annotate(Max('publisher__num_awards'))
self.assertQuerysetEqual(
qs, [
{'price': Decimal("30"), 'oldest': 35, 'publisher__num_awards__max': 3},
{'price': Decimal("29.69"), 'oldest': 37, 'publisher__num_awards__max': 7},
{'price': Decimal("23.09"), 'oldest': 45, 'publisher__num_awards__max': 1},
{'price': Decimal("75"), 'oldest': 57, 'publisher__num_awards__max': 9},
{'price': Decimal("82.8"), 'oldest': 57, 'publisher__num_awards__max': 7}
],
lambda b: b,
)
def test_aggrate_annotation(self):
# Aggregates can be composed over annotations.
# The return type is derived from the composed aggregate
vals = Book.objects.all().annotate(num_authors=Count('authors__id')).aggregate(Max('pages'), Max('price'), Sum('num_authors'), Avg('num_authors'))
self.assertEqual(vals, {
'num_authors__sum': 10,
'num_authors__avg': Approximate(1.666, places=2),
'pages__max': 1132,
'price__max': Decimal("82.80")
})
def test_field_error(self):
# Bad field requests in aggregates are caught and reported
self.assertRaises(
FieldError,
lambda: Book.objects.all().aggregate(num_authors=Count('foo'))
)
self.assertRaises(
FieldError,
lambda: Book.objects.all().annotate(num_authors=Count('foo'))
)
self.assertRaises(
FieldError,
lambda: Book.objects.all().annotate(num_authors=Count('authors__id')).aggregate(Max('foo'))
)
def test_more(self):
# Old-style count aggregations can be mixed with new-style
self.assertEqual(
Book.objects.annotate(num_authors=Count('authors')).count(),
6
)
# Non-ordinal, non-computed Aggregates over annotations correctly
# inherit the annotation's internal type if the annotation is ordinal
# or computed
vals = Book.objects.annotate(num_authors=Count('authors')).aggregate(Max('num_authors'))
self.assertEqual(
vals,
{'num_authors__max': 3}
)
vals = Publisher.objects.annotate(avg_price=Avg('book__price')).aggregate(Max('avg_price'))
self.assertEqual(
vals,
{'avg_price__max': 75.0}
)
# Aliases are quoted to protected aliases that might be reserved names
vals = Book.objects.aggregate(number=Max('pages'), select=Max('pages'))
self.assertEqual(
vals,
{'number': 1132, 'select': 1132}
)
# Regression for #10064: select_related() plays nice with aggregates
obj = Book.objects.select_related('publisher').annotate(num_authors=Count('authors')).values()[0]
self.assertEqual(obj, {
'contact_id': 8,
'id': 5,
'isbn': '013790395',
'name': 'Artificial Intelligence: A Modern Approach',
'num_authors': 2,
'pages': 1132,
'price': Decimal("82.8"),
'pubdate': datetime.date(1995, 1, 15),
'publisher_id': 3,
'rating': 4.0,
})
# Regression for #10010: exclude on an aggregate field is correctly
# negated
self.assertEqual(
len(Book.objects.annotate(num_authors=Count('authors'))),
6
)
self.assertEqual(
len(Book.objects.annotate(num_authors=Count('authors')).filter(num_authors__gt=2)),
1
)
self.assertEqual(
len(Book.objects.annotate(num_authors=Count('authors')).exclude(num_authors__gt=2)),
5
)
self.assertEqual(
len(Book.objects.annotate(num_authors=Count('authors')).filter(num_authors__lt=3).exclude(num_authors__lt=2)),
2
)
self.assertEqual(
len(Book.objects.annotate(num_authors=Count('authors')).exclude(num_authors__lt=2).filter(num_authors__lt=3)),
2
)
def test_aggregate_fexpr(self):
# Aggregates can be used with F() expressions
# ... where the F() is pushed into the HAVING clause
qs = Publisher.objects.annotate(num_books=Count('book')).filter(num_books__lt=F('num_awards')/2).order_by('name').values('name','num_books','num_awards')
self.assertQuerysetEqual(
qs, [
{'num_books': 1, 'name': 'Morgan Kaufmann', 'num_awards': 9},
{'num_books': 2, 'name': 'Prentice Hall', 'num_awards': 7}
],
lambda p: p,
)
qs = Publisher.objects.annotate(num_books=Count('book')).exclude(num_books__lt=F('num_awards')/2).order_by('name').values('name','num_books','num_awards')
self.assertQuerysetEqual(
qs, [
{'num_books': 2, 'name': 'Apress', 'num_awards': 3},
{'num_books': 0, 'name': "Jonno's House of Books", 'num_awards': 0},
{'num_books': 1, 'name': 'Sams', 'num_awards': 1}
],
lambda p: p,
)
# ... and where the F() references an aggregate
qs = Publisher.objects.annotate(num_books=Count('book')).filter(num_awards__gt=2*F('num_books')).order_by('name').values('name','num_books','num_awards')
self.assertQuerysetEqual(
qs, [
{'num_books': 1, 'name': 'Morgan Kaufmann', 'num_awards': 9},
{'num_books': 2, 'name': 'Prentice Hall', 'num_awards': 7}
],
lambda p: p,
)
qs = Publisher.objects.annotate(num_books=Count('book')).exclude(num_books__lt=F('num_awards')/2).order_by('name').values('name','num_books','num_awards')
self.assertQuerysetEqual(
qs, [
{'num_books': 2, 'name': 'Apress', 'num_awards': 3},
{'num_books': 0, 'name': "Jonno's House of Books", 'num_awards': 0},
{'num_books': 1, 'name': 'Sams', 'num_awards': 1}
],
lambda p: p,
)
def test_db_col_table(self):
# Tests on fields with non-default table and column names.
qs = Clues.objects.values('EntryID__Entry').annotate(Appearances=Count('EntryID'), Distinct_Clues=Count('Clue', distinct=True))
self.assertQuerysetEqual(qs, [])
qs = Entries.objects.annotate(clue_count=Count('clues__ID'))
self.assertQuerysetEqual(qs, [])
def test_empty(self):
# Regression for #10089: Check handling of empty result sets with
# aggregates
self.assertEqual(
Book.objects.filter(id__in=[]).count(),
0
)
vals = Book.objects.filter(id__in=[]).aggregate(num_authors=Count('authors'), avg_authors=Avg('authors'), max_authors=Max('authors'), max_price=Max('price'), max_rating=Max('rating'))
self.assertEqual(
vals,
{'max_authors': None, 'max_rating': None, 'num_authors': 0, 'avg_authors': None, 'max_price': None}
)
qs = Publisher.objects.filter(pk=5).annotate(num_authors=Count('book__authors'), avg_authors=Avg('book__authors'), max_authors=Max('book__authors'), max_price=Max('book__price'), max_rating=Max('book__rating')).values()
self.assertQuerysetEqual(
qs, [
{'max_authors': None, 'name': "Jonno's House of Books", 'num_awards': 0, 'max_price': None, 'num_authors': 0, 'max_rating': None, 'id': 5, 'avg_authors': None}
],
lambda p: p
)
def test_more_more(self):
# Regression for #10113 - Fields mentioned in order_by() must be
# included in the GROUP BY. This only becomes a problem when the
# order_by introduces a new join.
self.assertQuerysetEqual(
Book.objects.annotate(num_authors=Count('authors')).order_by('publisher__name', 'name'), [
"Practical Django Projects",
"The Definitive Guide to Django: Web Development Done Right",
"Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp",
"Artificial Intelligence: A Modern Approach",
"Python Web Development with Django",
"Sams Teach Yourself Django in 24 Hours",
],
lambda b: b.name
)
# Regression for #10127 - Empty select_related() works with annotate
qs = Book.objects.filter(rating__lt=4.5).select_related().annotate(Avg('authors__age'))
self.assertQuerysetEqual(
qs, [
('Artificial Intelligence: A Modern Approach', 51.5, 'Prentice Hall', 'Peter Norvig'),
('Practical Django Projects', 29.0, 'Apress', 'James Bennett'),
('Python Web Development with Django', Approximate(30.333, places=2), 'Prentice Hall', 'Jeffrey Forcier'),
('Sams Teach Yourself Django in 24 Hours', 45.0, 'Sams', 'Brad Dayley')
],
lambda b: (b.name, b.authors__age__avg, b.publisher.name, b.contact.name)
)
# Regression for #10132 - If the values() clause only mentioned extra
# (select=) columns, those columns are used for grouping
qs = Book.objects.extra(select={'pub':'publisher_id'}).values('pub').annotate(Count('id')).order_by('pub')
self.assertQuerysetEqual(
qs, [
{'pub': 1, 'id__count': 2},
{'pub': 2, 'id__count': 1},
{'pub': 3, 'id__count': 2},
{'pub': 4, 'id__count': 1}
],
lambda b: b
)
qs = Book.objects.extra(select={'pub':'publisher_id', 'foo':'pages'}).values('pub').annotate(Count('id')).order_by('pub')
self.assertQuerysetEqual(
qs, [
{'pub': 1, 'id__count': 2},
{'pub': 2, 'id__count': 1},
{'pub': 3, 'id__count': 2},
{'pub': 4, 'id__count': 1}
],
lambda b: b
)
# Regression for #10182 - Queries with aggregate calls are correctly
# realiased when used in a subquery
ids = Book.objects.filter(pages__gt=100).annotate(n_authors=Count('authors')).filter(n_authors__gt=2).order_by('n_authors')
self.assertQuerysetEqual(
Book.objects.filter(id__in=ids), [
"Python Web Development with Django",
],
lambda b: b.name
)
# Regression for #15709 - Ensure each group_by field only exists once
# per query
qs = Book.objects.values('publisher').annotate(max_pages=Max('pages')).order_by()
grouping, gb_params = qs.query.get_compiler(qs.db).get_grouping([], [])
self.assertEqual(len(grouping), 1)
def test_duplicate_alias(self):
# Regression for #11256 - duplicating a default alias raises ValueError.
self.assertRaises(ValueError, Book.objects.all().annotate, Avg('authors__age'), authors__age__avg=Avg('authors__age'))
def test_field_name_conflict(self):
# Regression for #11256 - providing an aggregate name that conflicts with a field name on the model raises ValueError
self.assertRaises(ValueError, Author.objects.annotate, age=Avg('friends__age'))
def test_m2m_name_conflict(self):
# Regression for #11256 - providing an aggregate name that conflicts with an m2m name on the model raises ValueError
self.assertRaises(ValueError, Author.objects.annotate, friends=Count('friends'))
def test_values_queryset_non_conflict(self):
# Regression for #14707 -- If you're using a values query set, some potential conflicts are avoided.
# age is a field on Author, so it shouldn't be allowed as an aggregate.
# But age isn't included in the ValuesQuerySet, so it is.
results = Author.objects.values('name').annotate(age=Count('book_contact_set')).order_by('name')
self.assertEqual(len(results), 9)
self.assertEqual(results[0]['name'], 'Adrian Holovaty')
self.assertEqual(results[0]['age'], 1)
# Same problem, but aggregating over m2m fields
results = Author.objects.values('name').annotate(age=Avg('friends__age')).order_by('name')
self.assertEqual(len(results), 9)
self.assertEqual(results[0]['name'], 'Adrian Holovaty')
self.assertEqual(results[0]['age'], 32.0)
# Same problem, but colliding with an m2m field
results = Author.objects.values('name').annotate(friends=Count('friends')).order_by('name')
self.assertEqual(len(results), 9)
self.assertEqual(results[0]['name'], 'Adrian Holovaty')
self.assertEqual(results[0]['friends'], 2)
def test_reverse_relation_name_conflict(self):
# Regression for #11256 - providing an aggregate name that conflicts with a reverse-related name on the model raises ValueError
self.assertRaises(ValueError, Author.objects.annotate, book_contact_set=Avg('friends__age'))
def test_pickle(self):
# Regression for #10197 -- Queries with aggregates can be pickled.
# First check that pickling is possible at all. No crash = success
qs = Book.objects.annotate(num_authors=Count('authors'))
pickle.dumps(qs)
# Then check that the round trip works.
query = qs.query.get_compiler(qs.db).as_sql()[0]
qs2 = pickle.loads(pickle.dumps(qs))
self.assertEqual(
qs2.query.get_compiler(qs2.db).as_sql()[0],
query,
)
def test_more_more_more(self):
# Regression for #10199 - Aggregate calls clone the original query so
# the original query can still be used
books = Book.objects.all()
books.aggregate(Avg("authors__age"))
self.assertQuerysetEqual(
books.all(), [
'Artificial Intelligence: A Modern Approach',
'Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp',
'Practical Django Projects',
'Python Web Development with Django',
'Sams Teach Yourself Django in 24 Hours',
'The Definitive Guide to Django: Web Development Done Right'
],
lambda b: b.name
)
# Regression for #10248 - Annotations work with DateQuerySets
qs = Book.objects.annotate(num_authors=Count('authors')).filter(num_authors=2).dates('pubdate', 'day')
self.assertQuerysetEqual(
qs, [
datetime.date(1995, 1, 15),
datetime.date(2007, 12, 6),
],
lambda b: b
)
# Regression for #10290 - extra selects with parameters can be used for
# grouping.
qs = Book.objects.annotate(mean_auth_age=Avg('authors__age')).extra(select={'sheets' : '(pages + %s) / %s'}, select_params=[1, 2]).order_by('sheets').values('sheets')
self.assertQuerysetEqual(
qs, [
150,
175,
224,
264,
473,
566
],
lambda b: int(b["sheets"])
)
# Regression for 10425 - annotations don't get in the way of a count()
# clause
self.assertEqual(
Book.objects.values('publisher').annotate(Count('publisher')).count(),
4
)
self.assertEqual(
Book.objects.annotate(Count('publisher')).values('publisher').count(),
6
)
publishers = Publisher.objects.filter(id__in=[1, 2])
self.assertEqual(
sorted(p.name for p in publishers),
[
"Apress",
"Sams"
]
)
publishers = publishers.annotate(n_books=Count("book"))
self.assertEqual(
publishers[0].n_books,
2
)
self.assertEqual(
sorted(p.name for p in publishers),
[
"Apress",
"Sams"
]
)
books = Book.objects.filter(publisher__in=publishers)
self.assertQuerysetEqual(
books, [
"Practical Django Projects",
"Sams Teach Yourself Django in 24 Hours",
"The Definitive Guide to Django: Web Development Done Right",
],
lambda b: b.name
)
self.assertEqual(
sorted(p.name for p in publishers),
[
"Apress",
"Sams"
]
)
# Regression for 10666 - inherited fields work with annotations and
# aggregations
self.assertEqual(
HardbackBook.objects.aggregate(n_pages=Sum('book_ptr__pages')),
{'n_pages': 2078}
)
self.assertEqual(
HardbackBook.objects.aggregate(n_pages=Sum('pages')),
{'n_pages': 2078},
)
qs = HardbackBook.objects.annotate(n_authors=Count('book_ptr__authors')).values('name', 'n_authors')
self.assertQuerysetEqual(
qs, [
{'n_authors': 2, 'name': 'Artificial Intelligence: A Modern Approach'},
{'n_authors': 1, 'name': 'Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp'}
],
lambda h: h
)
qs = HardbackBook.objects.annotate(n_authors=Count('authors')).values('name', 'n_authors')
self.assertQuerysetEqual(
qs, [
{'n_authors': 2, 'name': 'Artificial Intelligence: A Modern Approach'},
{'n_authors': 1, 'name': 'Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp'}
],
lambda h: h,
)
# Regression for #10766 - Shouldn't be able to reference an aggregate
# fields in an aggregate() call.
self.assertRaises(
FieldError,
lambda: Book.objects.annotate(mean_age=Avg('authors__age')).annotate(Avg('mean_age'))
)
def test_empty_filter_count(self):
self.assertEqual(
Author.objects.filter(id__in=[]).annotate(Count("friends")).count(),
0
)
def test_empty_filter_aggregate(self):
self.assertEqual(
Author.objects.filter(id__in=[]).annotate(Count("friends")).aggregate(Count("pk")),
{"pk__count": None}
)
def test_none_call_before_aggregate(self):
# Regression for #11789
self.assertEqual(
Author.objects.none().aggregate(Avg('age')),
{'age__avg': None}
)
def test_annotate_and_join(self):
self.assertEqual(
Author.objects.annotate(c=Count("friends__name")).exclude(friends__name="Joe").count(),
Author.objects.count()
)
def test_f_expression_annotation(self):
# Books with less than 200 pages per author.
qs = Book.objects.values("name").annotate(
n_authors=Count("authors")
).filter(
pages__lt=F("n_authors") * 200
).values_list("pk")
self.assertQuerysetEqual(
Book.objects.filter(pk__in=qs), [
"Python Web Development with Django"
],
attrgetter("name")
)
def test_values_annotate_values(self):
qs = Book.objects.values("name").annotate(
n_authors=Count("authors")
).values_list("pk", flat=True)
self.assertEqual(list(qs), list(Book.objects.values_list("pk", flat=True)))
def test_having_group_by(self):
# Test that when a field occurs on the LHS of a HAVING clause that it
# appears correctly in the GROUP BY clause
qs = Book.objects.values_list("name").annotate(
n_authors=Count("authors")
).filter(
pages__gt=F("n_authors")
).values_list("name", flat=True)
# Results should be the same, all Books have more pages than authors
self.assertEqual(
list(qs), list(Book.objects.values_list("name", flat=True))
)
def test_annotation_disjunction(self):
qs = Book.objects.annotate(n_authors=Count("authors")).filter(
Q(n_authors=2) | Q(name="Python Web Development with Django")
)
self.assertQuerysetEqual(
qs, [
"Artificial Intelligence: A Modern Approach",
"Python Web Development with Django",
"The Definitive Guide to Django: Web Development Done Right",
],
attrgetter("name")
)
qs = Book.objects.annotate(n_authors=Count("authors")).filter(
Q(name="The Definitive Guide to Django: Web Development Done Right") | (Q(name="Artificial Intelligence: A Modern Approach") & Q(n_authors=3))
)
self.assertQuerysetEqual(
qs, [
"The Definitive Guide to Django: Web Development Done Right",
],
attrgetter("name")
)
qs = Publisher.objects.annotate(
rating_sum=Sum("book__rating"),
book_count=Count("book")
).filter(
Q(rating_sum__gt=5.5) | Q(rating_sum__isnull=True)
).order_by('pk')
self.assertQuerysetEqual(
qs, [
"Apress",
"Prentice Hall",
"Jonno's House of Books",
],
attrgetter("name")
)
qs = Publisher.objects.annotate(
rating_sum=Sum("book__rating"),
book_count=Count("book")
).filter(
Q(pk__lt=F("book_count")) | Q(rating_sum=None)
).order_by("pk")
self.assertQuerysetEqual(
qs, [
"Apress",
"Jonno's House of Books",
],
attrgetter("name")
)
def test_quoting_aggregate_order_by(self):
qs = Book.objects.filter(
name="Python Web Development with Django"
).annotate(
authorCount=Count("authors")
).order_by("authorCount")
self.assertQuerysetEqual(
qs, [
("Python Web Development with Django", 3),
],
lambda b: (b.name, b.authorCount)
)
@skipUnlessDBFeature('supports_stddev')
def test_stddev(self):
self.assertEqual(
Book.objects.aggregate(StdDev('pages')),
{'pages__stddev': Approximate(311.46, 1)}
)
self.assertEqual(
Book.objects.aggregate(StdDev('rating')),
{'rating__stddev': Approximate(0.60, 1)}
)
self.assertEqual(
Book.objects.aggregate(StdDev('price')),
{'price__stddev': Approximate(24.16, 2)}
)
self.assertEqual(
Book.objects.aggregate(StdDev('pages', sample=True)),
{'pages__stddev': Approximate(341.19, 2)}
)
self.assertEqual(
Book.objects.aggregate(StdDev('rating', sample=True)),
{'rating__stddev': Approximate(0.66, 2)}
)
self.assertEqual(
Book.objects.aggregate(StdDev('price', sample=True)),
{'price__stddev': Approximate(26.46, 1)}
)
self.assertEqual(
Book.objects.aggregate(Variance('pages')),
{'pages__variance': Approximate(97010.80, 1)}
)
self.assertEqual(
Book.objects.aggregate(Variance('rating')),
{'rating__variance': Approximate(0.36, 1)}
)
self.assertEqual(
Book.objects.aggregate(Variance('price')),
{'price__variance': Approximate(583.77, 1)}
)
self.assertEqual(
Book.objects.aggregate(Variance('pages', sample=True)),
{'pages__variance': Approximate(116412.96, 1)}
)
self.assertEqual(
Book.objects.aggregate(Variance('rating', sample=True)),
{'rating__variance': Approximate(0.44, 2)}
)
self.assertEqual(
Book.objects.aggregate(Variance('price', sample=True)),
{'price__variance': Approximate(700.53, 2)}
)
def test_filtering_by_annotation_name(self):
# Regression test for #14476
# The name of the explicitly provided annotation name in this case
# poses no problem
qs = Author.objects.annotate(book_cnt=Count('book')).filter(book_cnt=2).order_by('name')
self.assertQuerysetEqual(
qs,
['Peter Norvig'],
lambda b: b.name
)
# Neither in this case
qs = Author.objects.annotate(book_count=Count('book')).filter(book_count=2).order_by('name')
self.assertQuerysetEqual(
qs,
['Peter Norvig'],
lambda b: b.name
)
# This case used to fail because the ORM couldn't resolve the
# automatically generated annotation name `book__count`
qs = Author.objects.annotate(Count('book')).filter(book__count=2).order_by('name')
self.assertQuerysetEqual(
qs,
['Peter Norvig'],
lambda b: b.name
)
def test_type_conversion(self):
# The database backend convert_values function should not try to covert
# CharFields to float. Refs #13844.
from django.db.models import CharField
from django.db import connection
testData = 'not_a_float_value'
testField = CharField()
self.assertEqual(
connection.ops.convert_values(testData, testField),
testData
)
def test_annotate_joins(self):
"""
Test that the base table's join isn't promoted to LOUTER. This could
cause the query generation to fail if there is an exclude() for fk-field
in the query, too. Refs #19087.
"""
qs = Book.objects.annotate(n=Count('pk'))
self.assertIs(qs.query.alias_map['aggregation_regress_book'].join_type, None)
# Check that the query executes without problems.
self.assertEqual(len(qs.exclude(publisher=-1)), 6)
@skipUnlessDBFeature("allows_group_by_pk")
def test_aggregate_duplicate_columns(self):
# Regression test for #17144
results = Author.objects.annotate(num_contacts=Count('book_contact_set'))
# There should only be one GROUP BY clause, for the `id` column.
# `name` and `age` should not be grouped on.
grouping, gb_params = results.query.get_compiler(using='default').get_grouping([], [])
self.assertEqual(len(grouping), 1)
assert 'id' in grouping[0]
assert 'name' not in grouping[0]
assert 'age' not in grouping[0]
# The query group_by property should also only show the `id`.
self.assertEqual(results.query.group_by, [('aggregation_regress_author', 'id')])
# Ensure that we get correct results.
self.assertEqual(
[(a.name, a.num_contacts) for a in results.order_by('name')],
[
('Adrian Holovaty', 1),
('Brad Dayley', 1),
('Jacob Kaplan-Moss', 0),
('James Bennett', 1),
('Jeffrey Forcier', 1),
('Paul Bissex', 0),
('Peter Norvig', 2),
('Stuart Russell', 0),
('Wesley J. Chun', 0),
]
)
@skipUnlessDBFeature("allows_group_by_pk")
def test_aggregate_duplicate_columns_only(self):
# Works with only() too.
results = Author.objects.only('id', 'name').annotate(num_contacts=Count('book_contact_set'))
grouping, gb_params = results.query.get_compiler(using='default').get_grouping([], [])
self.assertEqual(len(grouping), 1)
assert 'id' in grouping[0]
assert 'name' not in grouping[0]
assert 'age' not in grouping[0]
# The query group_by property should also only show the `id`.
self.assertEqual(results.query.group_by, [('aggregation_regress_author', 'id')])
# Ensure that we get correct results.
self.assertEqual(
[(a.name, a.num_contacts) for a in results.order_by('name')],
[
('Adrian Holovaty', 1),
('Brad Dayley', 1),
('Jacob Kaplan-Moss', 0),
('James Bennett', 1),
('Jeffrey Forcier', 1),
('Paul Bissex', 0),
('Peter Norvig', 2),
('Stuart Russell', 0),
('Wesley J. Chun', 0),
]
)
@skipUnlessDBFeature("allows_group_by_pk")
def test_aggregate_duplicate_columns_select_related(self):
# And select_related()
results = Book.objects.select_related('contact').annotate(
num_authors=Count('authors'))
grouping, gb_params = results.query.get_compiler(using='default').get_grouping([], [])
self.assertEqual(len(grouping), 1)
assert 'id' in grouping[0]
assert 'name' not in grouping[0]
assert 'contact' not in grouping[0]
# The query group_by property should also only show the `id`.
self.assertEqual(results.query.group_by, [('aggregation_regress_book', 'id')])
# Ensure that we get correct results.
self.assertEqual(
[(b.name, b.num_authors) for b in results.order_by('name')],
[
('Artificial Intelligence: A Modern Approach', 2),
('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 1),
('Practical Django Projects', 1),
('Python Web Development with Django', 3),
('Sams Teach Yourself Django in 24 Hours', 1),
('The Definitive Guide to Django: Web Development Done Right', 2)
]
)
def test_reverse_join_trimming(self):
qs = Author.objects.annotate(Count('book_contact_set__contact'))
self.assertIn(' JOIN ', str(qs.query))
def test_aggregation_with_generic_reverse_relation(self):
"""
Regression test for #10870: Aggregates with joins ignore extra
filters provided by setup_joins
tests aggregations with generic reverse relations
"""
b = Book.objects.get(name='Practical Django Projects')
ItemTag.objects.create(object_id=b.id, tag='intermediate',
content_type=ContentType.objects.get_for_model(b))
ItemTag.objects.create(object_id=b.id, tag='django',
content_type=ContentType.objects.get_for_model(b))
# Assign a tag to model with same PK as the book above. If the JOIN
# used in aggregation doesn't have content type as part of the
# condition the annotation will also count the 'hi mom' tag for b.
wmpk = WithManualPK.objects.create(id=b.pk)
ItemTag.objects.create(object_id=wmpk.id, tag='hi mom',
content_type=ContentType.objects.get_for_model(wmpk))
b = Book.objects.get(name__startswith='Paradigms of Artificial Intelligence')
ItemTag.objects.create(object_id=b.id, tag='intermediate',
content_type=ContentType.objects.get_for_model(b))
self.assertEqual(Book.objects.aggregate(Count('tags')), {'tags__count': 3})
results = Book.objects.annotate(Count('tags')).order_by('-tags__count', 'name')
self.assertEqual(
[(b.name, b.tags__count) for b in results],
[
('Practical Django Projects', 2),
('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 1),
('Artificial Intelligence: A Modern Approach', 0),
('Python Web Development with Django', 0),
('Sams Teach Yourself Django in 24 Hours', 0),
('The Definitive Guide to Django: Web Development Done Right', 0)
]
)
def test_negated_aggregation(self):
expected_results = Author.objects.exclude(
pk__in=Author.objects.annotate(book_cnt=Count('book')).filter(book_cnt=2)
).order_by('name')
expected_results = [a.name for a in expected_results]
qs = Author.objects.annotate(book_cnt=Count('book')).exclude(
Q(book_cnt=2), Q(book_cnt=2)).order_by('name')
self.assertQuerysetEqual(
qs,
expected_results,
lambda b: b.name
)
expected_results = Author.objects.exclude(
pk__in=Author.objects.annotate(book_cnt=Count('book')).filter(book_cnt=2)
).order_by('name')
expected_results = [a.name for a in expected_results]
qs = Author.objects.annotate(book_cnt=Count('book')).exclude(Q(book_cnt=2)|Q(book_cnt=2)).order_by('name')
self.assertQuerysetEqual(
qs,
expected_results,
lambda b: b.name
)
def test_name_filters(self):
qs = Author.objects.annotate(Count('book')).filter(
Q(book__count__exact=2)|Q(name='Adrian Holovaty')
).order_by('name')
self.assertQuerysetEqual(
qs,
['Adrian Holovaty', 'Peter Norvig'],
lambda b: b.name
)
def test_name_expressions(self):
# Test that aggregates are spotted corretly from F objects.
# Note that Adrian's age is 34 in the fixtures, and he has one book
# so both conditions match one author.
qs = Author.objects.annotate(Count('book')).filter(
Q(name='Peter Norvig')|Q(age=F('book__count') + 33)
).order_by('name')
self.assertQuerysetEqual(
qs,
['Adrian Holovaty', 'Peter Norvig'],
lambda b: b.name
)
def test_ticket_11293(self):
q1 = Q(price__gt=50)
q2 = Q(authors__count__gt=1)
query = Book.objects.annotate(Count('authors')).filter(
q1 | q2).order_by('pk')
self.assertQuerysetEqual(
query, [1, 4, 5, 6],
lambda b: b.pk)
def test_ticket_11293_q_immutable(self):
"""
Check that splitting a q object to parts for where/having doesn't alter
the original q-object.
"""
q1 = Q(isbn='')
q2 = Q(authors__count__gt=1)
query = Book.objects.annotate(Count('authors'))
query.filter(q1 | q2)
self.assertEqual(len(q2.children), 1)
def test_fobj_group_by(self):
"""
Check that an F() object referring to related column works correctly
in group by.
"""
qs = Book.objects.annotate(
acount=Count('authors')
).filter(
acount=F('publisher__num_awards')
)
self.assertQuerysetEqual(
qs, ['Sams Teach Yourself Django in 24 Hours'],
lambda b: b.name)
|
Theer108/invenio | refs/heads/master | invenio/modules/groups/user_settings.py | 13 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2012, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""WebAccount User Settings"""
from flask import url_for
from invenio.base.i18n import _
from invenio.ext.template import render_template_to_string
from flask_login import current_user
from invenio.modules.account.models import UserUsergroup
from invenio.modules.dashboard.settings import Settings
class WebGroupSettings(Settings):
def __init__(self):
super(WebGroupSettings, self).__init__()
self.icon = 'tags'
self.title = _('Group')
self.view = url_for('webgroup.index')
#self.edit = url_for('webgroup.edit', name=self.name)
def widget(self):
uid = current_user.get_id()
usergroups = UserUsergroup.query.filter(
UserUsergroup.id_user == uid
).all()
template = """
{%- if usergroups -%}
{{ _('You are involved in following groups:') }}
<div>
{%- for ug in usergroups -%}
<span class="label label-default">
{{ ug.usergroup.name }}
</span>
{%- endfor -%}
</div>
{%- else -%}
{{ _('You are not involved in any group.') }}
{%- endif -%}
"""
rv = render_template_to_string(template, _from_string=True,
usergroups=usergroups)
return rv
widget.size = 4
@property
def is_authorized(self):
return current_user.is_authenticated() and \
current_user.is_authorized('usegroups')
# Compulsory plugin interface
settings = WebGroupSettings
#__all__ = ['WebMessageSettings']
|
loafbaker/django_matchmaker | refs/heads/master | jobs/migrations/0001_initial.py | 1 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Employer',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=250)),
],
),
migrations.CreateModel(
name='Job',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('text', models.CharField(max_length=120)),
('active', models.BooleanField(default=True)),
('flagged', models.ManyToManyField(to=settings.AUTH_USER_MODEL, blank=True)),
],
),
migrations.CreateModel(
name='Location',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=250)),
('active', models.BooleanField(default=True)),
('flagged', models.ManyToManyField(to=settings.AUTH_USER_MODEL, blank=True)),
],
),
migrations.AddField(
model_name='employer',
name='location',
field=models.ForeignKey(blank=True, to='jobs.Location', null=True),
),
]
|
timothypage/etor | refs/heads/master | etor/samples/models.py | 1 | from django.db import models
from picker.models import Race, Ethnicity, TestReason, SpecimenSource
# TODO move this to picker?
GENDER_CHOICES = (
(u'M', u'Male'),
(u'F', u'Female'),
(u'U', u'Unknown'),
)
SUFFIX_CHOICES = (
(u'JR.',u'JR.'),
(u'SR.',u'SR.'),
(u'I', u'I'),
(u'II', u'II'),
(u'III', u'III'),
(u'IV', u'IV'),
)
REL_CHOICES = (
(u'FATHER', u'Father'),
)
class Place(models.Model):
city = models.CharField(max_length=64)
state = models.CharField(max_length=64)
zip = models.CharField(max_length=16)
def __unicode__(self):
return self.city + ' ' + self.state + ' ' + self.zip
class Insurance(models.Model):
last_name = models.CharField(max_length=255, help_text="Subscriber Last Name", blank=True)
first_name = models.CharField(max_length=255, help_text="Subscriber First Name", blank=True)
middle_initial = models.CharField(max_length=3, help_text="Subscriber MI", blank=True)
relationship = models.CharField(max_length=32, choices=REL_CHOICES, blank=True)
group_number = models.CharField(max_length=255, help_text="Insurance Group Number", blank=True)
contract_number = models.CharField(max_length=255, help_text="Insurance Contract Number", blank=True)
def __unicode__(self):
return self.last_name + ' ' + self.first_name
class Patient(models.Model):
first_name = models.CharField(max_length=255)
last_name = models.CharField(max_length=255, blank=True)
middle_initial = models.CharField(max_length=3, blank=True)
suffix = models.CharField(max_length=3, choices=SUFFIX_CHOICES, blank=True)
birth_date = models.DateField(blank=True)
gender = models.CharField(max_length=16, choices=GENDER_CHOICES, blank=True)
address = models.ForeignKey(Place, null=True)
race = models.ForeignKey(Race, null=True) #TODO: continue making optional.
ethnicity = models.ForeignKey(Ethnicity, null=True)
patient_id = models.CharField(max_length=255, help_text="Submitter's Patient ID Number", blank=True)
insurance = models.ForeignKey(Insurance, null=True)
def __unicode__(self):
return self.first_name + ' ' + self.last_name + ' ' + str(self.birth_date)
class Specimen(models.Model):
patient = models.ForeignKey(Patient)
collection_time = models.DateTimeField()
specimen_id = models.CharField(max_length=255, blank=True)
source = models.ForeignKey(SpecimenSource, null=True)
test_reason = models.ManyToManyField(TestReason, verbose_name="reason for testing", null=True)
def __unicode__(self):
return self.patient.first_name + ' ' + self.patient.last_name + ' ' + str(self.source)
|
zrs233/ursula | refs/heads/master | library/ceph_pool.py | 5 | #!/usr/bin/python
#coding: utf-8 -*-
DOCUMENTATION = """
---
author: Michael Sambol
module: ceph_pool
short_description: Creates ceph pool and ensures correct pg count
description:
There are three possible outcomes:
1/ Create a new pool if it doesn't exist
2/ Set the pool's pg count to correct number
3/ Nothing: pool is created and pg count is correct
options:
pool_name:
description:
- The pool in question: create it or ensure correct pg count
required: true
osds:
description:
- The osds count: pg count is calculated based on this
required: true
"""
EXAMPLES = """
# ceph_pool can only be run on nodes that have an admin keyring
# pool_name = default
- ceph_pool:
pool_name: default
osds: "{{ groups['ceph_osds_ssd']|length * ceph.disks|length }}"
register: pool_output
run_once: true
delegate_to: "{{ groups['ceph_monitors'][0] }}"
"""
import time
def increase_pg_count(module, osds, pool_name,
current_pg_count, desired_pg_count):
max_increase_per_pass = osds * 32
diff = desired_pg_count - current_pg_count
while diff > 0:
if diff <= max_increase_per_pass:
cmd = ['ceph', 'osd', 'pool', 'set', pool_name,
'pg_num', str(desired_pg_count)]
rc, out, err = module.run_command(cmd, check_rc=True)
# needs at least 10 seconds or the second command will fail
time.sleep(10)
cmd = ['ceph', 'osd', 'pool', 'set', pool_name,
'pgp_num', str(desired_pg_count)]
rc, out, err = module.run_command(cmd, check_rc=True)
diff = 0
else:
current_pg_count = current_pg_count + max_increase_per_pass
cmd = ['ceph', 'osd', 'pool', 'set', pool_name,
'pg_num', str(current_pg_count)]
rc, out, err = module.run_command(cmd, check_rc=True)
# needs at least 10 seconds or the second command will fail
time.sleep(10)
cmd = ['ceph', 'osd', 'pool', 'set', pool_name,
'pgp_num', str(current_pg_count)]
rc, out, err = module.run_command(cmd, check_rc=True)
diff = diff - max_increase_per_pass
def main():
module = AnsibleModule(
argument_spec=dict(
pool_name=dict(required=True),
osds=dict(required=True, type='int'),
),
)
pool_name = module.params.get('pool_name')
osds = module.params.get('osds')
# calculate desired pg count
# 100 is a constant and 3 is the number of copies
# read more about pg count here: http://ceph.com/pgcalc/
total_pg_count = (osds * 100) / 3
i = 0
desired_pg_count = 0
while desired_pg_count < total_pg_count:
desired_pg_count = 2**i
i += 1
# if desired_pg_count is > 32 pgs/osd, ceph throws a warning
# common protocol is to divide by 2
if (desired_pg_count / osds) > 32:
desired_pg_count = desired_pg_count / 2
# does the pool exist already?
cmd = ['ceph', 'osd', 'pool', 'get', pool_name, 'pg_num']
rc, out, err = module.run_command(cmd, check_rc=False)
# no
if rc != 0:
# create the pool
cmd = ['ceph', 'osd', 'pool', 'create', pool_name,
str(desired_pg_count), str(desired_pg_count)]
rc, out, err = module.run_command(cmd, check_rc=True)
module.exit_json(changed=True, msg="new pool was created")
# yes
else:
# does the current pg count match the desired pg count?
## Example
# out.splitlines()[0] = "pg_num: 256"
# current_pg_count = 256
current_pg_count = int(out.splitlines()[0].split(":")[1].strip())
# no
if current_pg_count < desired_pg_count:
increase_pg_count(module, osds, pool_name,
current_pg_count, desired_pg_count)
module.exit_json(changed=True, msg="pool's pg count was changed")
# yes
else:
module.exit_json(changed=False)
from ansible.module_utils.basic import *
main()
|
egineering-llc/egat | refs/heads/master | examples/example_jqueryui/tabs.py | 2 | __author__ = 'Brenda'
from egat.testset import SequentialTestSet
from webdriver_resource import WebDriverResource
from selenium import webdriver
class Test8(SequentialTestSet):
def testStep1(self):
# We can access the configuration parameters from inside any test function.
base_url = self.configuration["base_url"]
port = self.configuration["port"]
@WebDriverResource.decorator
def testStep2(self):
# Verifying that the page is loaded and exists by checking for a specific meta content identifier
self.browser = webdriver.Firefox()
self.browser.get("http://jqueryui.com")
if self.browser.find_element_by_css_selector("meta[name='author']"):
assert(True)
else:
assert(False)
def testStep3(self):
# Verifying the tabs page at starting position
self.browser.find_element_by_link_text('Tabs').click()
self.browser.get_screenshot_as_file('Screenshots/Tabs/tabs_pagedisplayed.png')
self.browser.switch_to_frame(self.browser.find_element_by_css_selector('#content > iframe'))
if self.browser.find_element_by_id('tabs'):
assert(True)
else:
assert(False)
def testStep4(self):
# Verifying the first tab
if self.browser.find_element_by_link_text('Nunc tincidunt'):
assert(True)
else:
assert(False)
def testStep5(self):
# Verifying that when first tab is displayed the associated text is displayed
self.browser.get_screenshot_as_file('Screenshots/Tabs/tabs_tab1.png')
if "false" in self.browser.find_element_by_id('tabs-1').get_attribute('aria-hidden'):
assert(True)
else:
assert(False)
element1 = self.browser.find_element_by_id('tabs-1').text
if "Proin elit arcu" in element1:
assert(True)
else:
assert(False)
def testStep6(self):
# Verifying that when second tab is displayed the associated text is displayed
self.browser.find_element_by_link_text('Proin dolor').click()
self.browser.get_screenshot_as_file('Screenshots/Tabs/tabs_tab2.png')
if 'false' in self.browser.find_element_by_id('tabs-2').get_attribute('aria-hidden'):
assert(True)
else:
assert(False)
element2 = self.browser.find_element_by_id('tabs-2').text
if "Morbi tincidunt" in element2:
assert(True)
else:
assert(False)
def testStep7(self):
# Verifying that when third tab is displayed the associated text is displayed
self.browser.find_element_by_link_text('Aenean lacinia').click()
self.browser.get_screenshot_as_file('Screenshots/Tabs/tabs_tab3.png')
if 'false' in self.browser.find_element_by_id('tabs-3').get_attribute('aria-hidden'):
assert(True)
else:
assert(False)
element3 = self.browser.find_element_by_id('tabs-3').text
if "Mauris eleifend est et turpis" in element3:
assert(True)
else:
assert(False)
def testStep8(self):
self.browser.quit() |
orhankislal/incubator-madlib | refs/heads/master | src/madpack/yaml/composer.py | 120 |
__all__ = ['Composer', 'ComposerError']
from error import MarkedYAMLError
from events import *
from nodes import *
class ComposerError(MarkedYAMLError):
pass
class Composer(object):
def __init__(self):
self.anchors = {}
def check_node(self):
# Drop the STREAM-START event.
if self.check_event(StreamStartEvent):
self.get_event()
# If there are more documents available?
return not self.check_event(StreamEndEvent)
def get_node(self):
# Get the root node of the next document.
if not self.check_event(StreamEndEvent):
return self.compose_document()
def compose_document(self):
# Drop the DOCUMENT-START event.
self.get_event()
# Compose the root node.
node = self.compose_node(None, None)
# Drop the DOCUMENT-END event.
self.get_event()
self.anchors = {}
return node
def compose_node(self, parent, index):
if self.check_event(AliasEvent):
event = self.get_event()
anchor = event.anchor
if anchor not in self.anchors:
raise ComposerError(None, None, "found undefined alias %r"
% anchor.encode('utf-8'), event.start_mark)
return self.anchors[anchor]
event = self.peek_event()
anchor = event.anchor
if anchor is not None:
if anchor in self.anchors:
raise ComposerError("found duplicate anchor %r; first occurence"
% anchor.encode('utf-8'), self.anchors[anchor].start_mark,
"second occurence", event.start_mark)
self.descend_resolver(parent, index)
if self.check_event(ScalarEvent):
node = self.compose_scalar_node(anchor)
elif self.check_event(SequenceStartEvent):
node = self.compose_sequence_node(anchor)
elif self.check_event(MappingStartEvent):
node = self.compose_mapping_node(anchor)
self.ascend_resolver()
return node
def compose_scalar_node(self, anchor):
event = self.get_event()
tag = event.tag
if tag is None or tag == u'!':
tag = self.resolve(ScalarNode, event.value, event.implicit)
node = ScalarNode(tag, event.value,
event.start_mark, event.end_mark, style=event.style)
if anchor is not None:
self.anchors[anchor] = node
return node
def compose_sequence_node(self, anchor):
start_event = self.get_event()
tag = start_event.tag
if tag is None or tag == u'!':
tag = self.resolve(SequenceNode, None, start_event.implicit)
node = SequenceNode(tag, [],
start_event.start_mark, None,
flow_style=start_event.flow_style)
if anchor is not None:
self.anchors[anchor] = node
index = 0
while not self.check_event(SequenceEndEvent):
node.value.append(self.compose_node(node, index))
index += 1
end_event = self.get_event()
node.end_mark = end_event.end_mark
return node
def compose_mapping_node(self, anchor):
start_event = self.get_event()
tag = start_event.tag
if tag is None or tag == u'!':
tag = self.resolve(MappingNode, None, start_event.implicit)
node = MappingNode(tag, [],
start_event.start_mark, None,
flow_style=start_event.flow_style)
if anchor is not None:
self.anchors[anchor] = node
while not self.check_event(MappingEndEvent):
#key_event = self.peek_event()
item_key = self.compose_node(node, None)
#if item_key in node.value:
# raise ComposerError("while composing a mapping", start_event.start_mark,
# "found duplicate key", key_event.start_mark)
item_value = self.compose_node(node, item_key)
#node.value[item_key] = item_value
node.value.append((item_key, item_value))
end_event = self.get_event()
node.end_mark = end_event.end_mark
return node
|
linktlh/Toontown-journey | refs/heads/master | toontown/suit/DistributedBossCog.py | 1 | from pandac.PandaModules import *
from direct.interval.IntervalGlobal import *
from direct.distributed.ClockDelta import *
from direct.directnotify import DirectNotifyGlobal
from otp.avatar import DistributedAvatar
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import ToontownBattleGlobals
from toontown.battle import BattleExperience
from toontown.battle import BattleBase
import BossCog
import SuitDNA
from toontown.coghq import CogDisguiseGlobals
from direct.showbase import Transitions
from toontown.hood import ZoneUtil
from toontown.building import ElevatorUtils
from toontown.building import ElevatorConstants
from toontown.distributed import DelayDelete
from toontown.effects import DustCloud
from toontown.toonbase import TTLocalizer
from toontown.friends import FriendsListManager
from direct.controls.ControlManager import CollisionHandlerRayStart
from direct.showbase import PythonUtil
import random
from toontown.nametag import NametagGlobals
class DistributedBossCog(DistributedAvatar.DistributedAvatar, BossCog.BossCog):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedBossCog')
allowClickedNameTag = True
def __init__(self, cr):
DistributedAvatar.DistributedAvatar.__init__(self, cr)
BossCog.BossCog.__init__(self)
self.gotAllToons = 0
self.toonsA = []
self.toonsB = []
self.involvedToons = []
self.toonRequest = None
self.battleNumber = 0
self.battleAId = None
self.battleBId = None
self.battleA = None
self.battleB = None
self.battleRequest = None
self.arenaSide = 0
self.toonSphere = None
self.localToonIsSafe = 0
self.__toonsStuckToFloor = []
self.cqueue = None
self.rays = None
self.ray1 = None
self.ray2 = None
self.ray3 = None
self.e1 = None
self.e2 = None
self.e3 = None
self.battleANode = self.attachNewNode('battleA')
self.battleBNode = self.attachNewNode('battleB')
self.battleANode.setPosHpr(*ToontownGlobals.BossCogBattleAPosHpr)
self.battleBNode.setPosHpr(*ToontownGlobals.BossCogBattleBPosHpr)
self.activeIntervals = {}
self.flashInterval = None
self.elevatorType = ElevatorConstants.ELEVATOR_VP
return
def announceGenerate(self):
DistributedAvatar.DistributedAvatar.announceGenerate(self)
self.prevCogSuitLevel = localAvatar.getCogLevels()[CogDisguiseGlobals.dept2deptIndex(self.style.dept)]
nearBubble = CollisionSphere(0, 0, 0, 50)
nearBubble.setTangible(0)
nearBubbleNode = CollisionNode('NearBoss')
nearBubbleNode.setCollideMask(ToontownGlobals.WallBitmask)
nearBubbleNode.addSolid(nearBubble)
self.attachNewNode(nearBubbleNode)
self.accept('enterNearBoss', self.avatarNearEnter)
self.accept('exitNearBoss', self.avatarNearExit)
self.collNode.removeSolid(0)
tube1 = CollisionTube(6.5, -7.5, 2, 6.5, 7.5, 2, 2.5)
tube2 = CollisionTube(-6.5, -7.5, 2, -6.5, 7.5, 2, 2.5)
roof = CollisionPolygon(Point3(-4.4, 7.1, 5.5), Point3(-4.4, -7.1, 5.5), Point3(4.4, -7.1, 5.5), Point3(4.4, 7.1, 5.5))
side1 = CollisionPolygon(Point3(-4.4, -7.1, 5.5), Point3(-4.4, 7.1, 5.5), Point3(-4.4, 7.1, 0), Point3(-4.4, -7.1, 0))
side2 = CollisionPolygon(Point3(4.4, 7.1, 5.5), Point3(4.4, -7.1, 5.5), Point3(4.4, -7.1, 0), Point3(4.4, 7.1, 0))
front1 = CollisionPolygon(Point3(4.4, -7.1, 5.5), Point3(-4.4, -7.1, 5.5), Point3(-4.4, -7.1, 5.2), Point3(4.4, -7.1, 5.2))
back1 = CollisionPolygon(Point3(-4.4, 7.1, 5.5), Point3(4.4, 7.1, 5.5), Point3(4.4, 7.1, 5.2), Point3(-4.4, 7.1, 5.2))
self.collNode.addSolid(tube1)
self.collNode.addSolid(tube2)
self.collNode.addSolid(roof)
self.collNode.addSolid(side1)
self.collNode.addSolid(side2)
self.collNode.addSolid(front1)
self.collNode.addSolid(back1)
self.collNodePath.reparentTo(self.axle)
self.collNode.setCollideMask(ToontownGlobals.PieBitmask | ToontownGlobals.WallBitmask | ToontownGlobals.CameraBitmask)
self.collNode.setName('BossZap')
self.setTag('attackCode', str(ToontownGlobals.BossCogElectricFence))
self.accept('enterBossZap', self.__touchedBoss)
bubbleL = CollisionSphere(10, -5, 0, 10)
bubbleL.setTangible(0)
bubbleLNode = CollisionNode('BossZap')
bubbleLNode.setCollideMask(ToontownGlobals.WallBitmask)
bubbleLNode.addSolid(bubbleL)
self.bubbleL = self.axle.attachNewNode(bubbleLNode)
self.bubbleL.setTag('attackCode', str(ToontownGlobals.BossCogSwatLeft))
self.bubbleL.stash()
bubbleR = CollisionSphere(-10, -5, 0, 10)
bubbleR.setTangible(0)
bubbleRNode = CollisionNode('BossZap')
bubbleRNode.setCollideMask(ToontownGlobals.WallBitmask)
bubbleRNode.addSolid(bubbleR)
self.bubbleR = self.axle.attachNewNode(bubbleRNode)
self.bubbleR.setTag('attackCode', str(ToontownGlobals.BossCogSwatRight))
self.bubbleR.stash()
bubbleF = CollisionSphere(0, -25, 0, 12)
bubbleF.setTangible(0)
bubbleFNode = CollisionNode('BossZap')
bubbleFNode.setCollideMask(ToontownGlobals.WallBitmask)
bubbleFNode.addSolid(bubbleF)
self.bubbleF = self.rotateNode.attachNewNode(bubbleFNode)
self.bubbleF.setTag('attackCode', str(ToontownGlobals.BossCogFrontAttack))
self.bubbleF.stash()
def disable(self):
DistributedAvatar.DistributedAvatar.disable(self)
self.battleAId = None
self.battleBId = None
self.battleA = None
self.battleB = None
self.cr.relatedObjectMgr.abortRequest(self.toonRequest)
self.toonRequest = None
self.cr.relatedObjectMgr.abortRequest(self.battleRequest)
self.battleRequest = None
self.stopAnimate()
self.cleanupIntervals()
self.cleanupFlash()
self.disableLocalToonSimpleCollisions()
self.ignoreAll()
return
def delete(self):
try:
self.DistributedBossCog_deleted
except:
self.DistributedBossCog_deleted = 1
self.ignoreAll()
DistributedAvatar.DistributedAvatar.delete(self)
BossCog.BossCog.delete(self)
def setDNAString(self, dnaString):
BossCog.BossCog.setDNAString(self, dnaString)
def getDNAString(self):
return self.dna.makeNetString()
def setDNA(self, dna):
BossCog.BossCog.setDNA(self, dna)
def setToonIds(self, involvedToons, toonsA, toonsB):
self.involvedToons = involvedToons
self.toonsA = toonsA
self.toonsB = toonsB
self.cr.relatedObjectMgr.abortRequest(self.toonRequest)
self.gotAllToons = 0
self.toonRequest = self.cr.relatedObjectMgr.requestObjects(self.involvedToons, allCallback=self.__gotAllToons, eachCallback=self.gotToon)
def getDialogueArray(self, *args):
return BossCog.BossCog.getDialogueArray(self, *args)
def storeInterval(self, interval, name):
if name in self.activeIntervals:
ival = self.activeIntervals[name]
if hasattr(ival, 'delayDelete') or hasattr(ival, 'delayDeletes'):
self.clearInterval(name, finish=1)
self.activeIntervals[name] = interval
def cleanupIntervals(self):
for interval in self.activeIntervals.values():
interval.finish()
DelayDelete.cleanupDelayDeletes(interval)
self.activeIntervals = {}
def clearInterval(self, name, finish = 1):
if name in self.activeIntervals:
ival = self.activeIntervals[name]
if finish:
ival.finish()
else:
ival.pause()
if name in self.activeIntervals:
DelayDelete.cleanupDelayDeletes(ival)
del self.activeIntervals[name]
else:
self.notify.debug('interval: %s already cleared' % name)
def finishInterval(self, name):
if name in self.activeIntervals:
interval = self.activeIntervals[name]
interval.finish()
def d_avatarEnter(self):
self.sendUpdate('avatarEnter', [])
def d_avatarExit(self):
self.sendUpdate('avatarExit', [])
def avatarNearEnter(self, entry):
self.sendUpdate('avatarNearEnter', [])
def avatarNearExit(self, entry):
self.sendUpdate('avatarNearExit', [])
def hasLocalToon(self):
doId = localAvatar.doId
return doId in self.toonsA or doId in self.toonsB
def setBattleExperience(self, id0, origExp0, earnedExp0, origQuests0, items0, missedItems0, origMerits0, merits0, parts0, id1, origExp1, earnedExp1, origQuests1, items1, missedItems1, origMerits1, merits1, parts1, id2, origExp2, earnedExp2, origQuests2, items2, missedItems2, origMerits2, merits2, parts2, id3, origExp3, earnedExp3, origQuests3, items3, missedItems3, origMerits3, merits3, parts3, id4, origExp4, earnedExp4, origQuests4, items4, missedItems4, origMerits4, merits4, parts4, id5, origExp5, earnedExp5, origQuests5, items5, missedItems5, origMerits5, merits5, parts5, id6, origExp6, earnedExp6, origQuests6, items6, missedItems6, origMerits6, merits6, parts6, id7, origExp7, earnedExp7, origQuests7, items7, missedItems7, origMerits7, merits7, parts7, deathList, uberList, helpfulToons):
self.deathList = deathList
self.uberList = uberList
self.helpfulToons = helpfulToons
entries = ((id0,
origExp0,
earnedExp0,
origQuests0,
items0,
missedItems0,
origMerits0,
merits0,
parts0),
(id1,
origExp1,
earnedExp1,
origQuests1,
items1,
missedItems1,
origMerits1,
merits1,
parts1),
(id2,
origExp2,
earnedExp2,
origQuests2,
items2,
missedItems2,
origMerits2,
merits2,
parts2),
(id3,
origExp3,
earnedExp3,
origQuests3,
items3,
missedItems3,
origMerits3,
merits3,
parts3),
(id4,
origExp4,
earnedExp4,
origQuests4,
items4,
missedItems4,
origMerits4,
merits4,
parts4),
(id5,
origExp5,
earnedExp5,
origQuests5,
items5,
missedItems5,
origMerits5,
merits5,
parts5),
(id6,
origExp6,
earnedExp6,
origQuests6,
items6,
missedItems6,
origMerits6,
merits6,
parts6),
(id7,
origExp7,
earnedExp7,
origQuests7,
items7,
missedItems7,
origMerits7,
merits7,
parts7))
self.toonRewardDicts = BattleExperience.genRewardDicts(entries)
self.toonRewardIds = [id0,
id1,
id2,
id3,
id4,
id5,
id6,
id7]
def setArenaSide(self, arenaSide):
self.arenaSide = arenaSide
def setState(self, state):
self.request(state)
def gotToon(self, toon):
stateName = self.state
def __gotAllToons(self, toons):
self.gotAllToons = 1
messenger.send('gotAllToons')
def setBattleIds(self, battleNumber, battleAId, battleBId):
self.battleNumber = battleNumber
self.battleAId = battleAId
self.battleBId = battleBId
self.cr.relatedObjectMgr.abortRequest(self.battleRequest)
self.battleRequest = self.cr.relatedObjectMgr.requestObjects([self.battleAId, self.battleBId], allCallback=self.__gotBattles)
def __gotBattles(self, battles):
self.battleRequest = None
if self.battleA and self.battleA != battles[0]:
self.battleA.cleanupBattle()
if self.battleB and self.battleB != battles[1]:
self.battleB.cleanupBattle()
self.battleA = battles[0]
self.battleB = battles[1]
return
def cleanupBattles(self):
if self.battleA:
self.battleA.cleanupBattle()
if self.battleB:
self.battleB.cleanupBattle()
def makeEndOfBattleMovie(self, hasLocalToon):
return Sequence()
def controlToons(self):
for panel in self.cr.openAvatarPanels:
if panel:
panel.cleanupDialog()
for toonId in self.involvedToons:
toon = self.cr.doId2do.get(toonId)
if toon:
toon.stopLookAround()
toon.stopSmooth()
if self.hasLocalToon():
self.toMovieMode()
def enableLocalToonSimpleCollisions(self):
if not self.toonSphere:
sphere = CollisionSphere(0, 0, 1, 1)
sphere.setRespectEffectiveNormal(0)
sphereNode = CollisionNode('SimpleCollisions')
sphereNode.setFromCollideMask(ToontownGlobals.WallBitmask | ToontownGlobals.FloorBitmask)
sphereNode.setIntoCollideMask(BitMask32.allOff())
sphereNode.addSolid(sphere)
self.toonSphere = NodePath(sphereNode)
self.toonSphereHandler = CollisionHandlerPusher()
self.toonSphereHandler.addCollider(self.toonSphere, localAvatar)
self.toonSphere.reparentTo(localAvatar)
base.cTrav.addCollider(self.toonSphere, self.toonSphereHandler)
def disableLocalToonSimpleCollisions(self):
if self.toonSphere:
base.cTrav.removeCollider(self.toonSphere)
self.toonSphere.detachNode()
def toOuchMode(self):
if self.cr:
place = self.cr.playGame.getPlace()
if place and hasattr(place, 'fsm'):
place.setState('ouch')
def toCraneMode(self):
if self.cr:
place = self.cr.playGame.getPlace()
if place and hasattr(place, 'fsm'):
place.setState('crane')
def toMovieMode(self):
if self.cr:
place = self.cr.playGame.getPlace()
if place and hasattr(place, 'fsm'):
place.setState('movie')
def toWalkMode(self):
if self.cr:
place = self.cr.playGame.getPlace()
if place and hasattr(place, 'fsm'):
place.setState('walk')
def toFinalBattleMode(self):
if self.cr:
place = self.cr.playGame.getPlace()
if place and hasattr(place, 'fsm'):
place.setState('finalBattle')
def releaseToons(self, finalBattle = 0):
for toonId in self.involvedToons:
toon = self.cr.doId2do.get(toonId)
if toon:
if self.battleA and toon in self.battleA.toons:
pass
elif self.battleB and toon in self.battleB.toons:
pass
else:
toon.startLookAround()
toon.startSmooth()
toon.wrtReparentTo(render)
if toon == localAvatar:
if finalBattle:
self.toFinalBattleMode()
else:
self.toWalkMode()
def stickToonsToFloor(self):
self.unstickToons()
rayNode = CollisionNode('stickToonsToFloor')
rayNode.addSolid(CollisionRay(0.0, 0.0, CollisionHandlerRayStart, 0.0, 0.0, -1.0))
rayNode.setFromCollideMask(ToontownGlobals.FloorBitmask)
rayNode.setIntoCollideMask(BitMask32.allOff())
ray = NodePath(rayNode)
lifter = CollisionHandlerFloor()
lifter.setOffset(ToontownGlobals.FloorOffset)
lifter.setReach(10.0)
for toonId in self.involvedToons:
toon = base.cr.doId2do.get(toonId)
if toon:
toonRay = ray.instanceTo(toon)
lifter.addCollider(toonRay, toon)
base.cTrav.addCollider(toonRay, lifter)
self.__toonsStuckToFloor.append(toonRay)
def unstickToons(self):
for toonRay in self.__toonsStuckToFloor:
base.cTrav.removeCollider(toonRay)
toonRay.removeNode()
self.__toonsStuckToFloor = []
def stickBossToFloor(self):
self.unstickBoss()
self.ray1 = CollisionRay(0.0, 10.0, 20.0, 0.0, 0.0, -1.0)
self.ray2 = CollisionRay(0.0, 0.0, 20.0, 0.0, 0.0, -1.0)
self.ray3 = CollisionRay(0.0, -10.0, 20.0, 0.0, 0.0, -1.0)
rayNode = CollisionNode('stickBossToFloor')
rayNode.addSolid(self.ray1)
rayNode.addSolid(self.ray2)
rayNode.addSolid(self.ray3)
rayNode.setFromCollideMask(ToontownGlobals.FloorBitmask)
rayNode.setIntoCollideMask(BitMask32.allOff())
self.rays = self.attachNewNode(rayNode)
self.cqueue = CollisionHandlerQueue()
base.cTrav.addCollider(self.rays, self.cqueue)
def rollBoss(self, t, fromPos, deltaPos):
self.setPos(fromPos + deltaPos * t)
if not self.cqueue:
return
self.cqueue.sortEntries()
numEntries = self.cqueue.getNumEntries()
if numEntries != 0:
for i in xrange(self.cqueue.getNumEntries() - 1, -1, -1):
entry = self.cqueue.getEntry(i)
solid = entry.getFrom()
if solid == self.ray1:
self.e1 = entry
elif solid == self.ray2:
self.e2 = entry
elif solid == self.ray3:
self.e3 = entry
else:
self.notify.warning('Unexpected ray in __liftBoss')
return
self.cqueue.clearEntries()
if not (self.e1 and self.e2 and self.e3):
self.notify.debug('Some points missed in __liftBoss')
return
p1 = self.e1.getSurfacePoint(self)
p2 = self.e2.getSurfacePoint(self)
p3 = self.e3.getSurfacePoint(self)
p2a = (p1 + p3) / 2
if p2a[2] > p2[2]:
center = p2a
else:
center = p2
self.setZ(self, center[2])
if p1[2] > p2[2] + 0.01 or p3[2] > p2[2] + 0.01:
mat = Mat4(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
if abs(p3[2] - center[2]) < abs(p1[2] - center[2]):
lookAt(mat, Vec3(p1 - center), CSDefault)
else:
lookAt(mat, Vec3(center - p3), CSDefault)
self.rotateNode.setMat(mat)
else:
self.rotateNode.clearTransform()
def unstickBoss(self):
if self.rays:
base.cTrav.removeCollider(self.rays)
self.rays.removeNode()
self.rays = None
self.ray1 = None
self.ray2 = None
self.ray3 = None
self.e1 = None
self.e2 = None
self.e3 = None
self.rotateNode.clearTransform()
self.cqueue = None
return
def rollBossToPoint(self, fromPos, fromHpr, toPos, toHpr, reverse):
vector = Vec3(toPos - fromPos)
distance = vector.length()
if toHpr == None:
mat = Mat3(0, 0, 0, 0, 0, 0, 0, 0, 0)
headsUp(mat, vector, CSDefault)
scale = VBase3(0, 0, 0)
shear = VBase3(0, 0, 0)
toHpr = VBase3(0, 0, 0)
decomposeMatrix(mat, scale, shear, toHpr, CSDefault)
if fromHpr:
newH = PythonUtil.fitDestAngle2Src(fromHpr[0], toHpr[0])
toHpr = VBase3(newH, 0, 0)
else:
fromHpr = toHpr
turnTime = abs(toHpr[0] - fromHpr[0]) / ToontownGlobals.BossCogTurnSpeed
if toHpr[0] < fromHpr[0]:
leftRate = ToontownGlobals.BossCogTreadSpeed
else:
leftRate = -ToontownGlobals.BossCogTreadSpeed
if reverse:
rollTreadRate = -ToontownGlobals.BossCogTreadSpeed
else:
rollTreadRate = ToontownGlobals.BossCogTreadSpeed
rollTime = distance / ToontownGlobals.BossCogRollSpeed
deltaPos = toPos - fromPos
track = Sequence(Func(self.setPos, fromPos), Func(self.headsUp, toPos), Parallel(self.hprInterval(turnTime, toHpr, fromHpr), self.rollLeftTreads(turnTime, leftRate), self.rollRightTreads(turnTime, -leftRate)), Parallel(LerpFunctionInterval(self.rollBoss, duration=rollTime, extraArgs=[fromPos, deltaPos]), self.rollLeftTreads(rollTime, rollTreadRate), self.rollRightTreads(rollTime, rollTreadRate)))
return (track, toHpr)
def setupElevator(self, elevatorModel):
self.elevatorModel = elevatorModel
self.leftDoor = self.elevatorModel.find('**/left-door')
if self.leftDoor.isEmpty():
self.leftDoor = self.elevatorModel.find('**/left_door')
self.rightDoor = self.elevatorModel.find('**/right-door')
if self.rightDoor.isEmpty():
self.rightDoor = self.elevatorModel.find('**/right_door')
self.openSfx = base.loadSfx('phase_9/audio/sfx/CHQ_FACT_door_open_sliding.ogg')
self.finalOpenSfx = base.loadSfx('phase_9/audio/sfx/CHQ_FACT_door_open_final.ogg')
self.closeSfx = base.loadSfx('phase_9/audio/sfx/CHQ_FACT_door_open_sliding.ogg')
self.finalCloseSfx = base.loadSfx('phase_9/audio/sfx/CHQ_FACT_door_open_final.ogg')
self.openDoors = ElevatorUtils.getOpenInterval(self, self.leftDoor, self.rightDoor, self.openSfx, self.finalOpenSfx, self.elevatorType)
self.closeDoors = ElevatorUtils.getCloseInterval(self, self.leftDoor, self.rightDoor, self.closeSfx, self.finalCloseSfx, self.elevatorType)
self.closeDoors.start()
self.closeDoors.finish()
def putToonInCogSuit(self, toon):
if not toon.isDisguised:
deptIndex = SuitDNA.suitDepts.index(self.style.dept)
toon.setCogIndex(deptIndex)
toon.getGeomNode().hide()
def placeToonInElevator(self, toon):
self.putToonInCogSuit(toon)
toonIndex = self.involvedToons.index(toon.doId)
toon.reparentTo(self.elevatorModel)
toon.setPos(*ElevatorConstants.BigElevatorPoints[toonIndex])
toon.setHpr(180, 0, 0)
toon.suit.loop('neutral')
def toonNormalEyes(self, toons, bArrayOfObjs = False):
if bArrayOfObjs:
toonObjs = toons
else:
toonObjs = []
for toonId in toons:
toon = base.cr.doId2do.get(toonId)
if toon:
toonObjs.append(toon)
seq = Sequence()
for toon in toonObjs:
seq.append(Func(toon.normalEyes))
seq.append(Func(toon.blinkEyes))
return seq
def toonDied(self, avId):
if avId == localAvatar.doId:
self.localToonDied()
def localToonToSafeZone(self):
target_sz = ZoneUtil.getSafeZoneId(localAvatar.defaultZone)
place = self.cr.playGame.getPlace()
place.fsm.request('teleportOut', [{'loader': ZoneUtil.getLoaderName(target_sz),
'where': ZoneUtil.getWhereName(target_sz, 1),
'how': 'teleportIn',
'hoodId': target_sz,
'zoneId': target_sz,
'shardId': None,
'avId': -1,
'battle': 1}])
return
def localToonDied(self):
target_sz = ZoneUtil.getSafeZoneId(localAvatar.defaultZone)
place = self.cr.playGame.getPlace()
place.fsm.request('died', [{'loader': ZoneUtil.getLoaderName(target_sz),
'where': ZoneUtil.getWhereName(target_sz, 1),
'how': 'teleportIn',
'hoodId': target_sz,
'zoneId': target_sz,
'shardId': None,
'avId': -1,
'battle': 1}])
return
def toonsToBattlePosition(self, toonIds, battleNode):
points = BattleBase.BattleBase.toonPoints[len(toonIds) - 1]
self.notify.debug('toonsToBattlePosition: points = %s' % points[0][0])
for i in xrange(len(toonIds)):
toon = base.cr.doId2do.get(toonIds[i])
if toon:
toon.reparentTo(render)
pos, h = points[i]
self.notify.debug('toonsToBattlePosition: battleNode=%s %.2f %.2f %.2f %.2f %.2f %.2f' % (battleNode,
pos[0],
pos[1],
pos[2],
h,
0,
0))
self.notify.debug('old toon pos %s' % toon.getPos())
self.notify.debug('pos=%.2f %.2f %.2f h=%.2f' % (pos[0],
pos[1],
pos[2],
h))
self.notify.debug('battleNode.pos = %s' % battleNode.getPos())
self.notify.debug('battleNode.hpr = %s' % battleNode.getHpr())
toon.setPosHpr(battleNode, pos[0], pos[1], pos[2], h, 0, 0)
self.notify.debug('new toon pos %s ' % toon.getPos())
def __touchedBoss(self, entry):
self.notify.debug('%s' % entry)
self.notify.debug('fromPos = %s' % entry.getFromNodePath().getPos(render))
self.notify.debug('intoPos = %s' % entry.getIntoNodePath().getPos(render))
attackCodeStr = entry.getIntoNodePath().getNetTag('attackCode')
if attackCodeStr == '':
self.notify.warning('Node %s has no attackCode tag.' % repr(entry.getIntoNodePath()))
return
attackCode = int(attackCodeStr)
if attackCode == ToontownGlobals.BossCogLawyerAttack and self.dna.dept != 'l':
self.notify.warning('got lawyer attack but not in CJ boss battle')
return
self.zapLocalToon(attackCode)
def zapLocalToon(self, attackCode, origin = None):
if self.localToonIsSafe or localAvatar.ghostMode or localAvatar.isStunned:
return
messenger.send('interrupt-pie')
place = self.cr.playGame.getPlace()
currentState = None
if place:
currentState = place.fsm.getCurrentState().getName()
if currentState != 'walk' and currentState != 'finalBattle' and currentState != 'crane':
return
toon = localAvatar
fling = 1
shake = 0
if attackCode == ToontownGlobals.BossCogAreaAttack:
fling = 0
shake = 1
if fling:
if origin == None:
origin = self
base.camera.wrtReparentTo(render)
toon.headsUp(origin)
base.camera.wrtReparentTo(toon)
bossRelativePos = toon.getPos(self.getGeomNode())
bp2d = Vec2(bossRelativePos[0], bossRelativePos[1])
bp2d.normalize()
pos = toon.getPos()
hpr = toon.getHpr()
timestamp = globalClockDelta.getFrameNetworkTime()
self.sendUpdate('zapToon', [pos[0],
pos[1],
pos[2],
hpr[0] % 360.0,
hpr[1],
hpr[2],
bp2d[0],
bp2d[1],
attackCode,
timestamp])
self.doZapToon(toon, fling=fling, shake=shake)
return
def showZapToon(self, toonId, x, y, z, h, p, r, attackCode, timestamp):
if toonId == localAvatar.doId:
return
ts = globalClockDelta.localElapsedTime(timestamp)
pos = Point3(x, y, z)
hpr = VBase3(h, p, r)
fling = 1
toon = self.cr.doId2do.get(toonId)
if toon:
if attackCode == ToontownGlobals.BossCogAreaAttack:
pos = None
hpr = None
fling = 0
else:
ts -= toon.smoother.getDelay()
self.doZapToon(toon, pos=pos, hpr=hpr, ts=ts, fling=fling)
return
def doZapToon(self, toon, pos = None, hpr = None, ts = 0, fling = 1, shake = 1):
zapName = toon.uniqueName('zap')
self.clearInterval(zapName)
zapTrack = Sequence(name=zapName)
if toon == localAvatar:
self.toOuchMode()
messenger.send('interrupt-pie')
self.enableLocalToonSimpleCollisions()
else:
zapTrack.append(Func(toon.stopSmooth))
def getSlideToPos(toon = toon):
return render.getRelativePoint(toon, Point3(0, -5, 0))
if pos != None and hpr != None:
(zapTrack.append(Func(toon.setPosHpr, pos, hpr)),)
toonTrack = Parallel()
if shake and toon == localAvatar:
toonTrack.append(Sequence(Func(base.camera.setZ, base.camera, 1), Wait(0.15), Func(base.camera.setZ, base.camera, -2), Wait(0.15), Func(base.camera.setZ, base.camera, 1)))
if fling:
toonTrack += [ActorInterval(toon, 'slip-backward'), toon.posInterval(0.5, getSlideToPos, fluid=1)]
else:
toonTrack += [ActorInterval(toon, 'slip-forward')]
zapTrack.append(toonTrack)
if toon == localAvatar:
zapTrack.append(Func(self.disableLocalToonSimpleCollisions))
currentState = self.state
if currentState == 'BattleThree':
zapTrack.append(Func(self.toFinalBattleMode))
elif hasattr(self, 'chairs'):
zapTrack.append(Func(self.toFinalBattleMode))
else:
zapTrack.append(Func(self.toWalkMode))
else:
zapTrack.append(Func(toon.startSmooth))
if ts > 0:
startTime = ts
else:
zapTrack = Sequence(Wait(-ts), zapTrack)
startTime = 0
zapTrack.append(Func(self.clearInterval, zapName))
zapTrack.delayDelete = DelayDelete.DelayDelete(toon, 'BossCog.doZapToon')
zapTrack.start(startTime)
self.storeInterval(zapTrack, zapName)
return
def setAttackCode(self, attackCode, avId = 0):
self.attackCode = attackCode
self.attackAvId = avId
if attackCode == ToontownGlobals.BossCogDizzy:
self.setDizzy(1)
self.cleanupAttacks()
self.doAnimate(None, raised=0, happy=1)
elif attackCode == ToontownGlobals.BossCogDizzyNow:
self.setDizzy(1)
self.cleanupAttacks()
self.doAnimate('hit', happy=1, now=1)
elif attackCode == ToontownGlobals.BossCogSwatLeft:
self.setDizzy(0)
self.doAnimate('ltSwing', now=1)
elif attackCode == ToontownGlobals.BossCogSwatRight:
self.setDizzy(0)
self.doAnimate('rtSwing', now=1)
elif attackCode == ToontownGlobals.BossCogAreaAttack:
self.setDizzy(0)
self.doAnimate('areaAttack', now=1)
elif attackCode == ToontownGlobals.BossCogFrontAttack:
self.setDizzy(0)
self.doAnimate('frontAttack', now=1)
elif attackCode == ToontownGlobals.BossCogRecoverDizzyAttack:
self.setDizzy(0)
self.doAnimate('frontAttack', now=1)
elif attackCode == ToontownGlobals.BossCogDirectedAttack or attackCode == ToontownGlobals.BossCogSlowDirectedAttack:
self.setDizzy(0)
self.doDirectedAttack(avId, attackCode)
elif attackCode == ToontownGlobals.BossCogNoAttack:
self.setDizzy(0)
self.doAnimate(None, raised=1)
return
def cleanupAttacks(self):
pass
def cleanupFlash(self):
if self.flashInterval:
self.flashInterval.finish()
self.flashInterval = None
return
def flashRed(self):
self.cleanupFlash()
self.setColorScale(1, 1, 1, 1)
i = Sequence(self.colorScaleInterval(0.1, colorScale=VBase4(1, 0, 0, 1)), self.colorScaleInterval(0.3, colorScale=VBase4(1, 1, 1, 1)))
self.flashInterval = i
i.start()
def flashGreen(self):
self.cleanupFlash()
if not self.isEmpty():
self.setColorScale(1, 1, 1, 1)
i = Sequence(self.colorScaleInterval(0.1, colorScale=VBase4(0, 1, 0, 1)), self.colorScaleInterval(0.3, colorScale=VBase4(1, 1, 1, 1)))
self.flashInterval = i
i.start()
def getGearFrisbee(self):
return loader.loadModel('phase_9/models/char/gearProp')
def backupToonsToBattlePosition(self, toonIds, battleNode):
self.notify.debug('backupToonsToBattlePosition:')
ival = Parallel()
points = BattleBase.BattleBase.toonPoints[len(toonIds) - 1]
for i in xrange(len(toonIds)):
toon = base.cr.doId2do.get(toonIds[i])
if toon:
pos, h = points[i]
pos = render.getRelativePoint(battleNode, pos)
ival.append(Sequence(Func(toon.setPlayRate, -0.8, 'walk'), Func(toon.loop, 'walk'), toon.posInterval(3, pos), Func(toon.setPlayRate, 1, 'walk'), Func(toon.loop, 'neutral')))
return ival
def loseCogSuits(self, toons, battleNode, camLoc, arrayOfObjs = False):
seq = Sequence()
if not toons:
return seq
self.notify.debug('battleNode=%s camLoc=%s' % (battleNode, camLoc))
seq.append(Func(base.camera.setPosHpr, battleNode, *camLoc))
suitsOff = Parallel()
if arrayOfObjs:
toonArray = toons
else:
toonArray = []
for toonId in toons:
toon = base.cr.doId2do.get(toonId)
if toon:
toonArray.append(toon)
for toon in toonArray:
dustCloud = DustCloud.DustCloud()
dustCloud.setPos(0, 2, 3)
dustCloud.setScale(0.5)
dustCloud.setDepthWrite(0)
dustCloud.setBin('fixed', 0)
dustCloud.createTrack()
suitsOff.append(
Sequence(
Func(dustCloud.reparentTo, toon),
Parallel(
dustCloud.track,
Sequence(
Wait(0.3),
Func(toon.takeOffSuit),
Func(toon.sadEyes),
Func(toon.blinkEyes),
Func(toon.play, 'slip-backward'),
Wait(0.7),
)
),
Func(dustCloud.detachNode),
Func(dustCloud.destroy),
Wait(3),
Func(toon.loop, 'neutral'),
)
)
seq.append(suitsOff)
return seq
def doDirectedAttack(self, avId, attackCode):
toon = base.cr.doId2do.get(avId)
if toon:
gearRoot = self.rotateNode.attachNewNode('gearRoot')
gearRoot.setZ(10)
gearRoot.setTag('attackCode', str(attackCode))
gearModel = self.getGearFrisbee()
gearModel.setScale(0.2)
gearRoot.headsUp(toon)
toToonH = PythonUtil.fitDestAngle2Src(0, gearRoot.getH() + 180)
gearRoot.lookAt(toon)
neutral = 'Fb_neutral'
if not self.twoFaced:
neutral = 'Ff_neutral'
gearTrack = Parallel()
for i in xrange(4):
node = gearRoot.attachNewNode(str(i))
node.hide()
node.setPos(0, 5.85, 4.0)
gear = gearModel.instanceTo(node)
x = random.uniform(-5, 5)
z = random.uniform(-3, 3)
h = random.uniform(-720, 720)
gearTrack.append(Sequence(Wait(i * 0.15), Func(node.show), Parallel(node.posInterval(1, Point3(x, 50, z), fluid=1), node.hprInterval(1, VBase3(h, 0, 0), fluid=1)), Func(node.detachNode)))
if not self.raised:
neutral1Anim = self.getAnim('down2Up')
self.raised = 1
else:
neutral1Anim = ActorInterval(self, neutral, startFrame=48)
throwAnim = self.getAnim('throw')
neutral2Anim = ActorInterval(self, neutral)
extraAnim = Sequence()
if attackCode == ToontownGlobals.BossCogSlowDirectedAttack:
extraAnim = ActorInterval(self, neutral)
seq = Sequence(ParallelEndTogether(self.pelvis.hprInterval(1, VBase3(toToonH, 0, 0)), neutral1Anim), extraAnim, Parallel(Sequence(Wait(0.19), gearTrack, Func(gearRoot.detachNode), self.pelvis.hprInterval(0.2, VBase3(0, 0, 0))), Sequence(throwAnim, neutral2Anim)))
self.doAnimate(seq, now=1, raised=1)
def announceAreaAttack(self):
if not getattr(localAvatar.controlManager.currentControls, 'isAirborne', 0):
self.zapLocalToon(ToontownGlobals.BossCogAreaAttack)
def loadEnvironment(self):
self.elevatorMusic = base.loadMusic('phase_7/audio/bgm/tt_elevator.ogg')
self.stingMusic = base.loadMusic('phase_7/audio/bgm/encntr_suit_winning_indoor.ogg')
self.battleOneMusic = base.loadMusic('phase_3.5/audio/bgm/encntr_general_bg.ogg')
self.battleThreeMusic = base.loadMusic('phase_7/audio/bgm/encntr_suit_winning_indoor.ogg')
self.epilogueMusic = base.loadMusic('phase_9/audio/bgm/encntr_hall_of_fame.ogg')
def unloadEnvironment(self):
pass
def enterOff(self):
self.cleanupIntervals()
self.hide()
self.clearChat()
self.toWalkMode()
def exitOff(self):
self.show()
def enterWaitForToons(self):
self.cleanupIntervals()
self.hide()
if self.gotAllToons:
self.__doneWaitForToons()
else:
self.accept('gotAllToons', self.__doneWaitForToons)
self.transitions = Transitions.Transitions(loader)
self.transitions.IrisModelName = 'phase_3/models/misc/iris'
self.transitions.FadeModelName = 'phase_3/models/misc/fade'
self.transitions.fadeScreen(alpha=1)
NametagGlobals.setWant2dNametags(False)
def __doneWaitForToons(self):
self.doneBarrier('WaitForToons')
def exitWaitForToons(self):
self.show()
self.transitions.noFade()
del self.transitions
NametagGlobals.setWant2dNametags(True)
def enterElevator(self):
for toonId in self.involvedToons:
toon = self.cr.doId2do.get(toonId)
if toon:
toon.stopLookAround()
toon.stopSmooth()
self.placeToonInElevator(toon)
self.toMovieMode()
base.camera.reparentTo(self.elevatorModel)
base.camera.setPosHpr(0, 25, 8, 180, 0, 0)
base.playMusic(self.elevatorMusic, looping=1, volume=1.0)
ival = Sequence(ElevatorUtils.getRideElevatorInterval(self.elevatorType), ElevatorUtils.getRideElevatorInterval(self.elevatorType), self.openDoors, Func(base.camera.wrtReparentTo, render), Func(self.__doneElevator))
intervalName = 'ElevatorMovie'
ival.start()
self.storeInterval(ival, intervalName)
def __doneElevator(self):
self.doneBarrier('Elevator')
def exitElevator(self):
intervalName = 'ElevatorMovie'
self.clearInterval(intervalName)
self.elevatorMusic.stop()
ElevatorUtils.closeDoors(self.leftDoor, self.rightDoor, self.elevatorType)
def enterIntroduction(self):
self.controlToons()
ElevatorUtils.openDoors(self.leftDoor, self.rightDoor, self.elevatorType)
NametagGlobals.setWant2dNametags(False)
intervalName = 'IntroductionMovie'
delayDeletes = []
seq = Sequence(self.makeIntroductionMovie(delayDeletes), Func(self.__beginBattleOne), name=intervalName)
seq.delayDeletes = delayDeletes
seq.start()
self.storeInterval(seq, intervalName)
def __beginBattleOne(self):
intervalName = 'IntroductionMovie'
self.clearInterval(intervalName)
self.doneBarrier('Introduction')
def exitIntroduction(self):
self.notify.debug('DistributedBossCog.exitIntroduction:')
intervalName = 'IntroductionMovie'
self.clearInterval(intervalName)
self.unstickToons()
self.releaseToons()
NametagGlobals.setWant2dNametags(True)
ElevatorUtils.closeDoors(self.leftDoor, self.rightDoor, self.elevatorType)
def enterBattleOne(self):
self.cleanupIntervals()
mult = ToontownBattleGlobals.getBossBattleCreditMultiplier(1)
localAvatar.inventory.setBattleCreditMultiplier(mult)
self.toonsToBattlePosition(self.toonsA, self.battleANode)
self.toonsToBattlePosition(self.toonsB, self.battleBNode)
self.releaseToons()
base.playMusic(self.battleOneMusic, looping=1, volume=0.9)
def exitBattleOne(self):
self.cleanupBattles()
self.battleOneMusic.stop()
localAvatar.inventory.setBattleCreditMultiplier(1)
def enterBattleThree(self):
self.cleanupIntervals()
self.releaseToons(finalBattle=1)
self.accept('clickedNametag', self.__clickedNameTag)
self.accept('friendAvatar', self.__handleFriendAvatar)
self.accept('avatarDetails', self.__handleAvatarDetails)
NametagGlobals.setWant2dNametags(False)
NametagGlobals.setWantActiveNametags(True)
def exitBattleThree(self):
self.ignore('clickedNameTag')
self.ignore('friendAvatar')
self.ignore('avatarDetails')
self.cleanupIntervals()
def __clickedNameTag(self, avatar):
self.notify.debug('__clickedNameTag')
if not (self.state == 'BattleThree' or self.state == 'BattleFour'):
return
if not self.allowClickedNameTag:
return
if self.cr:
place = self.cr.playGame.getPlace()
if place and hasattr(place, 'fsm'):
FriendsListManager.FriendsListManager._FriendsListManager__handleClickedNametag(place, avatar)
def __handleFriendAvatar(self, avId, avName, avDisableName):
self.notify.debug('__handleFriendAvatar')
if not (self.state == 'BattleThree' or self.state == 'BattleFour'):
return
if not self.allowClickedNameTag:
return
if self.cr:
place = self.cr.playGame.getPlace()
if place and hasattr(place, 'fsm'):
FriendsListManager.FriendsListManager._FriendsListManager__handleFriendAvatar(place, avId, avName, avDisableName)
def __handleAvatarDetails(self, avId, avName, playerId = None):
self.notify.debug('__handleAvatarDetails')
if not (self.state == 'BattleThree' or self.state == 'BattleFour'):
return
if not self.allowClickedNameTag:
return
if self.cr:
place = self.cr.playGame.getPlace()
if place and hasattr(place, 'fsm'):
FriendsListManager.FriendsListManager._FriendsListManager__handleAvatarDetails(place, avId, avName, playerId)
def enterBattleFour(self):
self.cleanupIntervals()
self.releaseToons(finalBattle=1)
self.accept('clickedNametag', self.__clickedNameTag)
self.accept('friendAvatar', self.__handleFriendAvatar)
self.accept('avatarDetails', self.__handleAvatarDetails)
NametagGlobals.setWant2dNametags(False)
NametagGlobals.setWantActiveNametags(True)
def exitBattleFour(self):
self.ignore('clickedNameTag')
self.ignore('friendAvatar')
self.ignore('avatarDetails')
self.cleanupIntervals()
def enterFrolic(self):
self.cleanupIntervals()
self.clearChat()
self.reparentTo(render)
self.stopAnimate()
self.pose('Ff_neutral', 0)
self.releaseToons()
def exitFrolic(self):
pass
def setToonsToNeutral(self, toonIds):
for i in xrange(len(toonIds)):
toon = base.cr.doId2do.get(toonIds[i])
if toon:
if toon.isDisguised:
toon.suit.loop('neutral')
toon.loop('neutral')
def wearCogSuits(self, toons, battleNode, camLoc, arrayOfObjs = False, waiter = False):
seq = Sequence()
if not toons:
return seq
self.notify.debug('battleNode=%s camLoc=%s' % (battleNode, camLoc))
if camLoc:
seq.append(Func(base.camera.setPosHpr, battleNode, *camLoc))
suitsOff = Parallel()
if arrayOfObjs:
toonArray = toons
else:
toonArray = []
for toonId in toons:
toon = base.cr.doId2do.get(toonId)
if toon:
toonArray.append(toon)
for toon in toonArray:
dustCloud = DustCloud.DustCloud()
dustCloud.setPos(0, 2, 3)
dustCloud.setScale(0.5)
dustCloud.setDepthWrite(0)
dustCloud.setBin('fixed', 0)
dustCloud.createTrack()
makeWaiter = Sequence()
if waiter:
makeWaiter = Func(toon.makeWaiter)
suitsOff.append(Sequence(Func(dustCloud.reparentTo, toon), Parallel(dustCloud.track, Sequence(Wait(0.3), Func(self.putToonInCogSuit, toon), makeWaiter, Wait(0.7))), Func(dustCloud.detachNode)))
seq.append(suitsOff)
return seq
|
sam-m888/addons-source | refs/heads/master | LastChange/LastChangeGramplet.py | 2 | # Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2010 Jakim Friant
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# $Id$
#------------------------------------------------------------------------
#
# Python modules
#
#------------------------------------------------------------------------
from bisect import bisect
#------------------------------------------------------------------------
#
# GRAMPS modules
#
#------------------------------------------------------------------------
from gramps.gen.plug import Gramplet
from gramps.gen.const import GRAMPS_LOCALE as glocale
from gramps.gen.datehandler import format_time
try:
_trans = glocale.get_addon_translator(__file__)
except ValueError:
_trans = glocale.translation
_ = _trans.gettext
#------------------------------------------------------------------------
#
# Constants
#
#------------------------------------------------------------------------
_YIELD_INTERVAL = 100
class LastChangeGramplet(Gramplet):
""" Scans for the last 10 changes among people and shows them """
def init(self):
""" Set up the GUI """
self.set_tooltip(_("Double-click name for details"))
self.set_text(_("No Family Tree loaded."))
def main(self):
"""Search the database for the last person records changed."""
self.set_text(_("Processing...") + "\n")
counter = 0
the_list = [] # sorted list of people with change times, newest first
for handle in self.dbstate.db.iter_person_handles():
change = -self.dbstate.db.get_raw_person_data(handle)[17]
bsindex = bisect(KeyWrapper(the_list, key=lambda c: c[1]),
change)
the_list.insert(bsindex, (handle, change))
if len(the_list) > 10: # only need 10 entries, so remove oldest
the_list.pop(10)
if counter % _YIELD_INTERVAL: # let rest of GUI run
yield True
counter += 1
yield True
self.clear_text()
counter = 1
for handle, change in the_list:
person = self.dbstate.db.get_person_from_handle(handle)
self.append_text(" %d. " % (counter, ))
self.link(person.get_primary_name().get_name(), 'Person',
person.handle)
self.append_text(" (%s %s)" % (_('changed on'),
format_time(person.change)))
if counter < 10:
self.append_text("\n")
counter += 1
def db_changed(self):
"""Connect the signals that trigger an update."""
self.connect(self.dbstate.db, 'person-update', self.update)
self.connect(self.dbstate.db, 'person-add', self.update)
self.connect(self.dbstate.db, 'person-delete', self.update)
self.connect(self.dbstate.db, 'person-rebuild', self.update)
self.connect(self.dbstate.db, 'family-rebuild', self.update)
self.connect(self.dbstate.db, 'family-add', self.update)
self.connect(self.dbstate.db, 'family-delete', self.update)
self.connect(self.dbstate.db, 'family-update', self.update)
class KeyWrapper:
""" used to create an way for bisect to operate on an element of a tuple
in the list."""
def __init__(self, iterable, key):
self.iter = iterable
self.key = key
def __getitem__(self, i):
return self.key(self.iter[i])
def __len__(self):
return len(self.iter)
|
nirgal/ngw | refs/heads/master | core/views/logs.py | 1 | '''
Log managing views
'''
from django.utils import formats
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_lazy
from ngw.core.models import Log
from ngw.core.nav import Navbar
from ngw.core.views.generic import NgwAdminAcl, NgwListView
__all__ = ['LogListView']
class LogListView(NgwAdminAcl, NgwListView):
'''
Display full log list (history).
'''
template_name = 'log_list.html'
list_display = (
'small_date', 'contact', 'action_txt', 'target_repr', 'property_repr',
'change')
def small_date(self, log):
return formats.date_format(log.dt, "SHORT_DATETIME_FORMAT")
small_date.short_description = ugettext_lazy('Date UTC')
small_date.admin_order_field = 'dt'
def get_root_queryset(self):
return Log.objects.all()
def get_context_data(self, **kwargs):
context = {}
context['title'] = _('Global log')
context['objtype'] = Log
context['nav'] = Navbar(Log.get_class_navcomponent())
context.update(kwargs)
return super().get_context_data(**context)
|
medspx/QGIS | refs/heads/master | python/plugins/processing/algs/gdal/ColorRelief.py | 5 | # -*- coding: utf-8 -*-
"""
***************************************************************************
ColorRelief.py
---------------------
Date : October 2013
Copyright : (C) 2013 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Alexander Bruy'
__date__ = 'October 2013'
__copyright__ = '(C) 2013, Alexander Bruy'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from qgis.core import (QgsRasterFileWriter,
QgsProcessingParameterDefinition,
QgsProcessingParameterRasterLayer,
QgsProcessingParameterBand,
QgsProcessingParameterBoolean,
QgsProcessingParameterEnum,
QgsProcessingParameterFile,
QgsProcessingParameterString,
QgsProcessingParameterRasterDestination)
from processing.algs.gdal.GdalAlgorithm import GdalAlgorithm
from processing.algs.gdal.GdalUtils import GdalUtils
class ColorRelief(GdalAlgorithm):
INPUT = 'INPUT'
BAND = 'BAND'
COMPUTE_EDGES = 'COMPUTE_EDGES'
COLOR_TABLE = 'COLOR_TABLE'
MATCH_MODE = 'MATCH_MODE'
OPTIONS = 'OPTIONS'
OUTPUT = 'OUTPUT'
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.modes = ((self.tr('Use strict color matching'), '-exact_color_entry'),
(self.tr('Use closest RGBA quadruplet'), '-nearest_color_entry'))
self.addParameter(QgsProcessingParameterRasterLayer(self.INPUT,
self.tr('Input layer')))
self.addParameter(QgsProcessingParameterBand(self.BAND,
self.tr('Band number'),
parentLayerParameterName=self.INPUT))
self.addParameter(QgsProcessingParameterBoolean(self.COMPUTE_EDGES,
self.tr('Compute edges'),
defaultValue=False))
self.addParameter(QgsProcessingParameterFile(self.COLOR_TABLE,
self.tr('Color configuration file')))
self.addParameter(QgsProcessingParameterEnum(self.MATCH_MODE,
self.tr('Matching mode'),
options=[i[0] for i in self.modes],
defaultValue=0))
options_param = QgsProcessingParameterString(self.OPTIONS,
self.tr('Additional creation parameters'),
defaultValue='',
optional=True)
options_param.setFlags(options_param.flags() | QgsProcessingParameterDefinition.FlagAdvanced)
options_param.setMetadata({
'widget_wrapper': {
'class': 'processing.algs.gdal.ui.RasterOptionsWidget.RasterOptionsWidgetWrapper'}})
self.addParameter(options_param)
self.addParameter(QgsProcessingParameterRasterDestination(self.OUTPUT, self.tr('Color relief')))
def name(self):
return 'colorrelief'
def displayName(self):
return self.tr('Color relief')
def group(self):
return self.tr('Raster analysis')
def groupId(self):
return 'rasteranalysis'
def getConsoleCommands(self, parameters, context, feedback, executing=True):
arguments = ['color-relief']
inLayer = self.parameterAsRasterLayer(parameters, self.INPUT, context)
arguments.append(inLayer.source())
arguments.append(self.parameterAsFile(parameters, self.COLOR_TABLE, context))
out = self.parameterAsOutputLayer(parameters, self.OUTPUT, context)
arguments.append(out)
arguments.append('-of')
arguments.append(QgsRasterFileWriter.driverForExtension(os.path.splitext(out)[1]))
arguments.append('-b')
arguments.append(str(self.parameterAsInt(parameters, self.BAND, context)))
if self.parameterAsBool(parameters, self.COMPUTE_EDGES, context):
arguments.append('-compute_edges')
arguments.append(self.modes[self.parameterAsEnum(parameters, self.MATCH_MODE, context)][1])
return ['gdaldem', GdalUtils.escapeAndJoin(arguments)]
|
rmcgibbo/conda-build | refs/heads/master | conda_build/main_pipbuild.py | 2 | # (c) Continuum Analytics, Inc. / http://continuum.io
# All Rights Reserved
#
# conda is distributed under the terms of the BSD 3-clause license.
# Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.
from __future__ import print_function, division, absolute_import
import sys
import os.path
import subprocess
import yaml
#from conda.cli import common
import conda.config as cc
from conda.cli.conda_argparse import ArgumentParser
from conda_build.main_build import args_func
from conda.install import rm_rf
import conda_build.build as build
from conda_build.metadata import MetaData
from conda_build import utils
if sys.version_info < (3,):
from xmlrpclib import ServerProxy
else:
from xmlrpc.client import ServerProxy
def main():
p = ArgumentParser(
description="""
Tool for building conda packages using pip install. NOTE: this command is
experimental. The recommended way to build conda packages from packages on
PyPI is using conda skeleton pypi and conda build.
""",
)
p.add_argument(
"--no-anaconda-upload",
action="store_false",
help="Do not ask to upload the package to anaconda.org.",
dest='binstar_upload',
default=cc.binstar_upload,
)
p.add_argument(
"--anaconda-upload",
action="store_true",
help="Upload the package to anaconda.org.",
dest='binstar_upload',
default=cc.binstar_upload,
)
p.add_argument(
'pypi_name',
action="store",
metavar='<PYPI_NAME>',
nargs=1,
help="Name of package on PyPI."
)
p.add_argument(
"--release",
action='store',
nargs=1,
help="Version of the package to build.",
default="latest"
)
p.add_argument(
"--pypi-url",
action="store",
default='http://pypi.python.org/pypi',
help="Url to use for PyPI.",
)
p.add_argument(
"--noarch-python",
action="store_true",
default=False,
help="Creates package as noarch")
p.set_defaults(func=execute)
args = p.parse_args()
args_func(args, p)
def handle_binstar_upload(path):
from conda_build.external import find_executable
binstar = find_executable('anaconda')
if binstar is None:
sys.exit('''
Error: cannot locate anaconda command (required for upload)
# Try:
# $ conda install anaconda-client
''')
print("Uploading to anaconda.org")
args = [binstar, 'upload', path]
subprocess.call(args)
# Run conda skeleton pypi {0} --no-download --no-prompt
# Check to be sure all the dependencies are already in conda repositories
# if not, recursively build them...
# Modify the recipe directory to make a new recipe with just the dependencies
# and a build script that says pip install for both build.sh and build.bat
skeleton_template = "conda skeleton pypi {0} --no-prompt"
skeleton_template_wversion = "conda skeleton pypi {0} --version {1} --no-prompt"
build_template = "conda build {0} --no-anaconda-upload --no-test"
meta_template = """package:
name: {packagename}
version: !!str {version}
{build_comment}build:
{noarch_python_comment}noarch_python: True
requirements:
build:
- python
- pip{depends}
run:
- python{depends}
about:
home: {homeurl}
license: {license}
summary: {summary}
"""
def build_recipe(package, version=None):
if version:
dirname = package.lower() + "-" + version
else:
dirname = package.lower()
if os.path.isdir(dirname):
rm_rf(dirname)
if version is None:
args = skeleton_template.format(package).split()
else:
args = skeleton_template_wversion.format(package, version).split()
print("Creating standard recipe for {0}".format(dirname))
try:
result = utils.execute(args, check_exit_code=True)
except subprocess.CalledProcessError as err:
print(err.output)
raise RuntimeError((" ".join(args)))
output = result.strip().splitlines()
if output[-1] == 'Done':
direc = output[-2].split()[-1]
else:
raise RuntimeError("Incorrect output from build_recipe: %s" % output)
return os.path.abspath(direc)
def convert_recipe(direc, package, noarch_python=False):
print("Converting recipe in {0}".format(direc))
buildstr = 'pip install %s\n' % package
# convert build.sh file and bld.bat file
filenames = ['build.sh', 'bld.bat']
for name in filenames:
with open(os.path.join(direc, name), 'w') as fid:
fid.write(buildstr)
# convert meta.yaml file
with open(os.path.join(direc, 'meta.yaml')) as fid:
fid.seek(0)
meta = yaml.load(fid)
bdep = meta['requirements']['build']
bdep.remove('python')
try:
bdep.remove('setuptools')
bdep.remove('pip')
except ValueError:
pass
depends = bdep
indent = '\n - '
d = {}
d['packagename'] = meta['package']['name']
d['version'] = meta['package']['version']
if depends:
d['depends'] = indent.join([''] + depends)
else:
d['depends'] = ''
d['homeurl'] = meta['about']['home']
d['license'] = meta['about']['license']
d['summary'] = meta['about']['summary']
if noarch_python:
d['build_comment'] = ''
d['noarch_python_comment'] = ''
else:
d['build_comment'] = '# '
d['noarch_python_comment'] = '# '
with open(os.path.join(direc, 'meta.yaml'), 'w') as fid:
fid.write(meta_template.format(**d))
return depends
def get_all_dependencies(package, version):
import conda.config
prefix = os.path.join(conda.config.default_prefix, 'envs', '_pipbuild_')
cmd1 = "conda create -n _pipbuild_ --yes python pip"
print(cmd1)
utils.execute(cmd1.split())
cmd2 = "%s/bin/pip install %s==%s" % (prefix, package, version)
print(cmd2)
try:
execute(cmd2.split(), check_exit_code=True)
except subprocess.CalledProcessError:
raise RuntimeError("Could not pip install %s==%s" % (package, version))
cmd3args = ['%s/bin/python' % prefix, '__tmpfile__.py']
fid = open('__tmpfile__.py', 'w')
fid.write("import pkg_resources;\n")
fid.write("reqs = pkg_resources.get_distribution('%s').requires();\n" %
package)
fid.write("print([(req.key, req.specs) for req in reqs])\n")
fid.close()
print("Getting dependencies...")
output, _ = utils.execute(cmd3args, check_exit_code=True)
deps = eval(output)
os.unlink('__tmpfile__.py')
depends = []
for dep in deps:
if len(dep[1]) == 2 and dep[1][0] == '==':
depends.append(dep[0] + ' ' + dep[1][1])
else:
depends.append(dep[0])
cmd4 = "conda remove -n _pipbuild_ --yes --all"
utils.execute(cmd4.split())
return depends
def make_recipe(package, version, noarch_python=False):
if version is None:
release = client.package_releases(package)
if len(release) > 0:
version = release[0]
else:
raise RuntimeError("Empty releases for %s" % package)
depends = get_all_dependencies(package, version)
dirname = package.lower() + "-" + version
if os.path.isdir(dirname):
rm_rf(dirname)
os.mkdir(dirname)
direc = os.path.abspath(dirname)
build = 'pip install %s==%s\n' % (package, version)
# write build.sh file and bld.bat file
filenames = ['build.sh', 'bld.bat']
for name in filenames:
with open(os.path.join(direc, name), 'w') as fid:
fid.write(build)
indent = '\n - '
d = {}
d['packagename'] = package
d['version'] = version
if depends:
d['depends'] = indent.join([''] + depends)
else:
d['depends'] = ''
data = client.release_data(package, version)
if not data:
raise RuntimeError("Cannot get data for %s-%s" % (package, version))
license_classifier = "License :: OSI Approved ::"
if 'classifiers' in data:
licenses = [classifier.lstrip(license_classifier) for classifier in
data['classifiers'] if classifier.startswith(license_classifier)]
else:
licenses = []
if not licenses:
license = data.get('license', 'UNKNOWN') or 'UNKNOWN'
else:
license = ' or '.join(licenses)
d['homeurl'] = data['home_page']
d['license'] = license
d['summary'] = repr(data['summary'])
if noarch_python:
d['build_comment'] = ''
d['noarch_python_comment'] = ''
else:
d['build_comment'] = '# '
d['noarch_python_comment'] = '# '
with open(os.path.join(direc, 'meta.yaml'), 'w') as fid:
fid.write(meta_template.format(**d))
return direc, depends
def build_package(package, version=None, noarch_python=False):
if ' ' in package:
package, version = package.split(' ')
try:
directory = build_recipe(package, version=version)
dependencies = convert_recipe(directory, package,
noarch_python=noarch_python)
except RuntimeError:
directory, dependencies = make_recipe(package, version,
noarch_python=noarch_python)
return_code = 0
try:
print("package = %s" % package)
print(" dependencies = %s" % dependencies)
# Dependencies will be either package_name or
# package_name version_number
# Only == dependency specs get version numbers
# All else are just handled without a version spec
for depend in dependencies:
build_package(depend)
args = build_template.format(directory).split()
print("Building conda package for {0}".format(package.lower()))
try:
utils.execute(args, check_exit_code=True)
except subprocess.CalledProcessError as exc:
return_code = exc.return_code
else:
m = MetaData(directory)
handle_binstar_upload(build.bldpkg_path(m))
finally:
rm_rf(directory)
return return_code
def execute(args, parser):
global binstar_upload
global client
binstar_upload = args.binstar_upload
client = ServerProxy(args.pypi_url)
package = args.pypi_name[0]
if args.release == 'latest':
version = None
all_versions = False
else:
all_versions = True
version = args.release[0]
search = client.search({'name': package})
if search:
r_name = list(filter(lambda x: ('name' in x and package.lower() == x['name'].lower()), search))
if r_name:
print('Package search: %s' % r_name[0])
package = r_name[0]['name']
releases = client.package_releases(package, all_versions)
if not releases:
sys.exit("Error: PyPI does not have a package named %s" % package)
if all_versions and version not in releases:
print(releases)
print("Warning: PyPI does not have version %s of package %s" %
(version, package))
if all_versions:
build_package(package, version, noarch_python=args.noarch_python)
else:
version = releases[0]
build_package(package, version, noarch_python=args.noarch_python)
if __name__ == '__main__':
main()
|
NicWayand/OpenSfM | refs/heads/master | opensfm/features.py | 2 | # -*- coding: utf-8 -*-
import os, sys
import tempfile
import time
import logging
from subprocess import call
import numpy as np
import json
import uuid
import cv2
import csfm
logger = logging.getLogger(__name__)
def resized_image(image, config):
feature_process_size = config.get('feature_process_size', -1)
size = np.array(image.shape[0:2])
if 0 < feature_process_size < size.max():
new_size = size * feature_process_size / size.max()
return cv2.resize(image, dsize=(new_size[1], new_size[0]))
else:
return image
def root_feature(desc, l2_normalization=False):
if l2_normalization:
s2 = np.linalg.norm(desc, axis=1)
desc = (desc.T/s2).T
s = np.sum(desc, 1)
desc = np.sqrt(desc.T/s).T
return desc
def root_feature_surf(desc, l2_normalization=False, partial=False):
"""
Experimental square root mapping of surf-like feature, only work for 64-dim surf now
"""
if desc.shape[1] == 64:
if l2_normalization:
s2 = np.linalg.norm(desc, axis=1)
desc = (desc.T/s2).T
if partial:
ii = np.array([i for i in xrange(64) if (i%4==2 or i%4==3)])
else:
ii = np.arange(64)
desc_sub = np.abs(desc[:, ii])
desc_sub_sign = np.sign(desc[:, ii])
# s_sub = np.sum(desc_sub, 1) # This partial normalization gives slightly better results for AKAZE surf
s_sub = np.sum(np.abs(desc), 1)
desc_sub = np.sqrt(desc_sub.T/s_sub).T
desc[:, ii] = desc_sub*desc_sub_sign
return desc
def normalized_image_coordinates(pixel_coords, width, height):
size = max(width, height)
p = np.empty((len(pixel_coords), 2))
p[:, 0] = (pixel_coords[:, 0] + 0.5 - width / 2.0) / size
p[:, 1] = (pixel_coords[:, 1] + 0.5 - height / 2.0) / size
return p
def denormalized_image_coordinates(norm_coords, width, height):
size = max(width, height)
p = np.empty((len(norm_coords), 2))
p[:, 0] = norm_coords[:, 0] * size - 0.5 + width / 2.0
p[:, 1] = norm_coords[:, 1] * size - 0.5 + height / 2.0
return p
def mask_and_normalize_features(points, desc, colors, width, height, config):
masks = np.array(config.get('masks',[]))
for mask in masks:
mask = [mask[0]*height, mask[1]*width, mask[2]*height, mask[3]*width]
ids = np.invert ( (points[:,1] > mask[0]) *
(points[:,1] < mask[2]) *
(points[:,0] > mask[1]) *
(points[:,0] < mask[3]) )
points = points[ids]
desc = desc[ids]
colors = colors[ids]
points[:, :2] = normalized_image_coordinates(points[:, :2], width, height)
return points, desc, colors
def extract_features_sift(image, config):
detector = cv2.FeatureDetector_create('SIFT')
descriptor = cv2.DescriptorExtractor_create('SIFT')
detector.setDouble('edgeThreshold', config.get('sift_edge_threshold', 10))
sift_peak_threshold = float(config.get('sift_peak_threshold', 0.1))
while True:
logger.debug('Computing sift with threshold {0}'.format(sift_peak_threshold))
t = time.time()
detector.setDouble("contrastThreshold", sift_peak_threshold)
points = detector.detect(image)
logger.debug('Found {0} points in {1}s'.format( len(points), time.time()-t ))
if len(points) < config.get('feature_min_frames', 0) and sift_peak_threshold > 0.0001:
sift_peak_threshold = (sift_peak_threshold * 2) / 3
logger.debug('reducing threshold')
else:
logger.debug('done')
break
points, desc = descriptor.compute(image, points)
if config.get('feature_root', False): desc = root_feature(desc)
points = np.array([(i.pt[0], i.pt[1], i.size, i.angle) for i in points])
return points, desc
def extract_features_surf(image, config):
detector = cv2.FeatureDetector_create('SURF')
descriptor = cv2.DescriptorExtractor_create('SURF')
surf_hessian_threshold = config.get('surf_hessian_threshold', 3000)
detector.setDouble('hessianThreshold', surf_hessian_threshold)
detector.setDouble('nOctaves', config.get('surf_n_octaves', 4))
detector.setDouble('nOctaveLayers', config.get('surf_n_octavelayers', 2))
detector.setInt('upright', config.get('surf_upright',0))
while True:
logger.debug('Computing surf with threshold {0}'.format(surf_hessian_threshold))
t = time.time()
detector.setDouble("hessianThreshold", surf_hessian_threshold) #default: 0.04
points = detector.detect(image)
logger.debug('Found {0} points in {1}s'.format( len(points), time.time()-t ))
if len(points) < config.get('feature_min_frames', 0) and surf_hessian_threshold > 0.0001:
surf_hessian_threshold = (surf_hessian_threshold * 2) / 3
logger.debug('reducing threshold')
else:
logger.debug('done')
break
points, desc = descriptor.compute(image, points)
if config.get('feature_root', False): desc = root_feature_surf(desc, partial=True)
points = np.array([(i.pt[0], i.pt[1], i.size, i.angle) for i in points])
return points, desc
def akaze_descriptor_type(name):
d = csfm.AkazeDescriptorType.__dict__
if name in d:
return d[name]
else:
logger.debug('Wrong akaze descriptor type')
return d['MSURF']
def extract_features_akaze(image, config):
options = csfm.AKAZEOptions()
options.omax = config.get('akaze_omax', 4)
akaze_descriptor_name = config.get('akaze_descriptor', 'MSURF')
options.descriptor = akaze_descriptor_type(akaze_descriptor_name)
options.descriptor_size = config.get('akaze_descriptor_size', 0)
options.descriptor_channels = config.get('akaze_descriptor_channels', 3)
options.process_size = config.get('feature_process_size', -1)
options.dthreshold = config.get('akaze_dthreshold', 0.001)
options.kcontrast_percentile = config.get('akaze_kcontrast_percentile', 0.7)
options.use_isotropic_diffusion = config.get('akaze_use_isotropic_diffusion', False)
options.target_num_features = config.get('feature_min_frames', 0)
options.use_adaptive_suppression = config.get('feature_use_adaptive_suppression', False)
logger.debug('Computing AKAZE with threshold {0}'.format(options.dthreshold))
t = time.time()
points, desc = csfm.akaze(image, options)
logger.debug('Found {0} points in {1}s'.format( len(points), time.time()-t ))
if config.get('feature_root', False):
if akaze_descriptor_name in ["SURF_UPRIGHT", "MSURF_UPRIGHT"]:
desc = root_feature_surf(desc, partial=True)
elif akaze_descriptor_name in ["SURF", "MSURF"]:
desc = root_feature_surf(desc, partial=False)
points = points.astype(float)
return points, desc
def extract_features_hahog(image, config):
t = time.time()
points, desc = csfm.hahog(image.astype(np.float32) / 255, # VlFeat expects pixel values between 0, 1
peak_threshold = config.get('hahog_peak_threshold', 0.003),
edge_threshold = config.get('hahog_edge_threshold', 10),
target_num_features = config.get('feature_min_frames', 0),
use_adaptive_suppression = config.get('feature_use_adaptive_suppression', False))
if config.get('feature_root', False):
desc = np.sqrt(desc)
uchar_scaling = 362 # x * 512 < 256 => sqrt(x) * 362 < 256
else:
uchar_scaling = 512
if config.get('hahog_normalize_to_uchar', False):
desc = (uchar_scaling * desc).clip(0, 255).round()
logger.debug('Found {0} points in {1}s'.format( len(points), time.time()-t ))
return points, desc
def extract_features(color_image, config):
assert len(color_image.shape) == 3
color_image = resized_image(color_image, config)
image = cv2.cvtColor(color_image, cv2.COLOR_RGB2GRAY)
feature_type = config.get('feature_type','SIFT').upper()
if feature_type == 'SIFT':
points, desc = extract_features_sift(image, config)
elif feature_type == 'SURF':
points, desc = extract_features_surf(image, config)
elif feature_type == 'AKAZE':
points, desc = extract_features_akaze(image, config)
elif feature_type == 'HAHOG':
points, desc = extract_features_hahog(image, config)
else:
raise ValueError('Unknown feature type (must be SURF, SIFT, AKAZE or HAHOG)')
xs = points[:,0].round().astype(int)
ys = points[:,1].round().astype(int)
colors = color_image[ys, xs]
return mask_and_normalize_features(points, desc, colors, image.shape[1], image.shape[0], config)
def build_flann_index(features, config):
FLANN_INDEX_LINEAR = 0
FLANN_INDEX_KDTREE = 1
FLANN_INDEX_KMEANS = 2
FLANN_INDEX_COMPOSITE = 3
FLANN_INDEX_KDTREE_SINGLE = 4
FLANN_INDEX_HIERARCHICAL = 5
FLANN_INDEX_LSH = 6
if features.dtype.type is np.float32:
FLANN_INDEX_METHOD = FLANN_INDEX_KMEANS
else:
FLANN_INDEX_METHOD = FLANN_INDEX_LSH
flann_params = dict(algorithm=FLANN_INDEX_METHOD,
branching=config.get('flann_branching', 16),
iterations=config.get('flann_iterations', 20))
index = cv2.flann_Index(features, flann_params)
return index
|
p4datasystems/CarnotKE | refs/heads/master | jyhton/Lib/distutils/command/build.py | 250 | """distutils.command.build
Implements the Distutils 'build' command."""
__revision__ = "$Id$"
import sys, os
from distutils.util import get_platform
from distutils.core import Command
from distutils.errors import DistutilsOptionError
def show_compilers():
from distutils.ccompiler import show_compilers
show_compilers()
class build(Command):
description = "build everything needed to install"
user_options = [
('build-base=', 'b',
"base directory for build library"),
('build-purelib=', None,
"build directory for platform-neutral distributions"),
('build-platlib=', None,
"build directory for platform-specific distributions"),
('build-lib=', None,
"build directory for all distribution (defaults to either " +
"build-purelib or build-platlib"),
('build-scripts=', None,
"build directory for scripts"),
('build-temp=', 't',
"temporary build directory"),
('plat-name=', 'p',
"platform name to build for, if supported "
"(default: %s)" % get_platform()),
('compiler=', 'c',
"specify the compiler type"),
('debug', 'g',
"compile extensions and libraries with debugging information"),
('force', 'f',
"forcibly build everything (ignore file timestamps)"),
('executable=', 'e',
"specify final destination interpreter path (build.py)"),
]
boolean_options = ['debug', 'force']
help_options = [
('help-compiler', None,
"list available compilers", show_compilers),
]
def initialize_options(self):
self.build_base = 'build'
# these are decided only after 'build_base' has its final value
# (unless overridden by the user or client)
self.build_purelib = None
self.build_platlib = None
self.build_lib = None
self.build_temp = None
self.build_scripts = None
self.compiler = None
self.plat_name = None
self.debug = None
self.force = 0
self.executable = None
def finalize_options(self):
if self.plat_name is None:
self.plat_name = get_platform()
else:
# plat-name only supported for windows (other platforms are
# supported via ./configure flags, if at all). Avoid misleading
# other platforms.
if os.name != 'nt':
raise DistutilsOptionError(
"--plat-name only supported on Windows (try "
"using './configure --help' on your platform)")
plat_specifier = ".%s-%s" % (self.plat_name, sys.version[0:3])
# Make it so Python 2.x and Python 2.x with --with-pydebug don't
# share the same build directories. Doing so confuses the build
# process for C modules
if hasattr(sys, 'gettotalrefcount'):
plat_specifier += '-pydebug'
# 'build_purelib' and 'build_platlib' just default to 'lib' and
# 'lib.<plat>' under the base build directory. We only use one of
# them for a given distribution, though --
if self.build_purelib is None:
self.build_purelib = os.path.join(self.build_base, 'lib')
if self.build_platlib is None:
self.build_platlib = os.path.join(self.build_base,
'lib' + plat_specifier)
# 'build_lib' is the actual directory that we will use for this
# particular module distribution -- if user didn't supply it, pick
# one of 'build_purelib' or 'build_platlib'.
if self.build_lib is None:
if self.distribution.ext_modules:
self.build_lib = self.build_platlib
else:
self.build_lib = self.build_purelib
# 'build_temp' -- temporary directory for compiler turds,
# "build/temp.<plat>"
if self.build_temp is None:
self.build_temp = os.path.join(self.build_base,
'temp' + plat_specifier)
if self.build_scripts is None:
self.build_scripts = os.path.join(self.build_base,
'scripts-' + sys.version[0:3])
if self.executable is None:
self.executable = os.path.normpath(sys.executable)
def run(self):
# Run all relevant sub-commands. This will be some subset of:
# - build_py - pure Python modules
# - build_clib - standalone C libraries
# - build_ext - Python extensions
# - build_scripts - (Python) scripts
for cmd_name in self.get_sub_commands():
self.run_command(cmd_name)
# -- Predicates for the sub-command list ---------------------------
def has_pure_modules (self):
return self.distribution.has_pure_modules()
def has_c_libraries (self):
return self.distribution.has_c_libraries()
def has_ext_modules (self):
return self.distribution.has_ext_modules()
def has_scripts (self):
return self.distribution.has_scripts()
sub_commands = [('build_py', has_pure_modules),
('build_clib', has_c_libraries),
('build_ext', has_ext_modules),
('build_scripts', has_scripts),
]
|
zapcoop/vertex | refs/heads/master | vertex_api/service/viewsets/__init__.py | 1 | from .ticket import *
from .team import *
from .update import *
from .note import *
from .ticket_subscriber import *
|
SimonWang2014/DockerConsoleApp | refs/heads/master | libs/stormed-amqp/build/lib.linux-x86_64-2.7/stormed/method/__init__.py | 4 | from stormed.method.codegen import id2class
|
citrix-openstack-build/nova | refs/heads/master | nova/servicegroup/drivers/zk.py | 13 | # Copyright (c) AT&T 2012-2013 Yun Mao <yunmao@gmail.com>
# Copyright 2012 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import eventlet
from oslo.config import cfg
from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common import loopingcall
from nova.servicegroup import api
evzookeeper = importutils.try_import('evzookeeper')
membership = importutils.try_import('evzookeeper.membership')
zookeeper = importutils.try_import('zookeeper')
zk_driver_opts = [
cfg.StrOpt('address',
help='The ZooKeeper addresses for servicegroup service in the '
'format of host1:port,host2:port,host3:port'),
cfg.IntOpt('recv_timeout',
default=4000,
help='recv_timeout parameter for the zk session'),
cfg.StrOpt('sg_prefix',
default="/servicegroups",
help='The prefix used in ZooKeeper to store ephemeral nodes'),
cfg.IntOpt('sg_retry_interval',
default=5,
help='Number of seconds to wait until retrying to join the '
'session'),
]
CONF = cfg.CONF
CONF.register_opts(zk_driver_opts, group="zookeeper")
LOG = logging.getLogger(__name__)
class ZooKeeperDriver(api.ServiceGroupDriver):
"""ZooKeeper driver for the service group API."""
def __init__(self, *args, **kwargs):
"""Create the zk session object."""
if not all([evzookeeper, membership, zookeeper]):
raise ImportError('zookeeper module not found')
null = open(os.devnull, "w")
self._session = evzookeeper.ZKSession(CONF.zookeeper.address,
recv_timeout=
CONF.zookeeper.recv_timeout,
zklog_fd=null)
self._memberships = {}
self._monitors = {}
# Make sure the prefix exists
try:
self._session.create(CONF.zookeeper.sg_prefix, "",
acl=[evzookeeper.ZOO_OPEN_ACL_UNSAFE])
except zookeeper.NodeExistsException:
pass
super(ZooKeeperDriver, self).__init__()
def join(self, member_id, group, service=None):
"""Join the given service with its group."""
LOG.debug(_('ZooKeeperDriver: join new member %(id)s to the '
'%(gr)s group, service=%(sr)s'),
{'id': member_id, 'gr': group, 'sr': service})
member = self._memberships.get((group, member_id), None)
if member is None:
# the first time to join. Generate a new object
path = "%s/%s" % (CONF.zookeeper.sg_prefix, group)
try:
member = membership.Membership(self._session, path, member_id)
except RuntimeError:
LOG.exception(_("Unable to join. It is possible that either "
"another node exists with the same name, or "
"this node just restarted. We will try "
"again in a short while to make sure."))
eventlet.sleep(CONF.zookeeper.sg_retry_interval)
member = membership.Membership(self._session, path, member_id)
self._memberships[(group, member_id)] = member
return FakeLoopingCall(self, member_id, group)
def leave(self, member_id, group):
"""Remove the given member from the service group."""
LOG.debug(_('ZooKeeperDriver.leave: %(member)s from group %(group)s'),
{'member': member_id, 'group': group})
try:
key = (group, member_id)
member = self._memberships[key]
member.leave()
del self._memberships[key]
except KeyError:
LOG.error(_('ZooKeeperDriver.leave: %(id)s has not joined to the '
'%(gr)s group'), {'id': member_id, 'gr': group})
def is_up(self, service_ref):
group_id = service_ref['topic']
member_id = service_ref['host']
all_members = self.get_all(group_id)
return member_id in all_members
def get_all(self, group_id):
"""Return all members in a list, or a ServiceGroupUnavailable
exception.
"""
monitor = self._monitors.get(group_id, None)
if monitor is None:
path = "%s/%s" % (CONF.zookeeper.sg_prefix, group_id)
monitor = membership.MembershipMonitor(self._session, path)
self._monitors[group_id] = monitor
# Note(maoy): When initialized for the first time, it takes a
# while to retrieve all members from zookeeper. To prevent
# None to be returned, we sleep 5 sec max to wait for data to
# be ready.
for _retry in range(50):
eventlet.sleep(0.1)
all_members = monitor.get_all()
if all_members is not None:
return all_members
all_members = monitor.get_all()
if all_members is None:
raise exception.ServiceGroupUnavailable(driver="ZooKeeperDriver")
return all_members
class FakeLoopingCall(loopingcall.LoopingCallBase):
"""The fake Looping Call implementation, created for backward
compatibility with a membership based on DB.
"""
def __init__(self, driver, host, group):
self._driver = driver
self._group = group
self._host = host
def stop(self):
self._driver.leave(self._host, self._group)
def start(self, interval, initial_delay=None):
pass
def wait(self):
pass
|
bokeh/bokeh | refs/heads/branch-2.4 | tests/unit/bokeh/application/handlers/test_handler.py | 1 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2021, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations # isort:skip
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Module under test
import bokeh.application.handlers.handler as bahh # isort:skip
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
class Test_Handler:
# Public methods ----------------------------------------------------------
def test_create(self) -> None:
h = bahh.Handler()
assert h.failed == False
assert h.url_path() is None
assert h.static_path() is None
assert h.error is None
assert h.error_detail is None
def test_modify_document_abstract(self) -> None:
h = bahh.Handler()
with pytest.raises(NotImplementedError):
h.modify_document("doc")
def test_default_server_hooks_return_none(self) -> None:
h = bahh.Handler()
assert h.on_server_loaded("context") is None
assert h.on_server_unloaded("context") is None
async def test_default_sesssion_hooks_return_none(self) -> None:
h = bahh.Handler()
assert await h.on_session_created("context") is None
assert await h.on_session_destroyed("context") is None
def test_static_path(self) -> None:
h = bahh.Handler()
assert h.static_path() is None
h._static = "path"
assert h.static_path() == "path"
h._failed = True
assert h.static_path() is None
def test_process_request(self) -> None:
h = bahh.Handler()
assert h.process_request("request") == {}
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
GUBotDev/mavlink | refs/heads/master | pymavlink/tools/mavtomfile.py | 45 | #!/usr/bin/env python
'''
convert a MAVLink tlog file to a MATLab mfile
'''
import sys, os
import re
from pymavlink import mavutil
def process_tlog(filename):
'''convert a tlog to a .m file'''
print("Processing %s" % filename)
mlog = mavutil.mavlink_connection(filename, dialect=args.dialect, zero_time_base=True)
# first walk the entire file, grabbing all messages into a hash of lists,
#and the first message of each type into a hash
msg_types = {}
msg_lists = {}
types = args.types
if types is not None:
types = types.split(',')
# note that Octave doesn't like any extra '.', '*', '-', characters in the filename
(head, tail) = os.path.split(filename)
basename = '.'.join(tail.split('.')[:-1])
mfilename = re.sub('[\.\-\+\*]','_', basename) + '.m'
# Octave also doesn't like files that don't start with a letter
if (re.match('^[a-zA-z]', mfilename) == None):
mfilename = 'm_' + mfilename
if head is not None:
mfilename = os.path.join(head, mfilename)
print("Creating %s" % mfilename)
f = open(mfilename, "w")
type_counters = {}
while True:
m = mlog.recv_match(condition=args.condition)
if m is None:
break
if types is not None and m.get_type() not in types:
continue
if m.get_type() == 'BAD_DATA':
continue
fieldnames = m._fieldnames
mtype = m.get_type()
if mtype in ['FMT', 'PARM']:
continue
if mtype not in type_counters:
type_counters[mtype] = 0
f.write("%s.columns = {'timestamp'" % mtype)
for field in fieldnames:
val = getattr(m, field)
if not isinstance(val, str):
if type(val) is not list:
f.write(",'%s'" % field)
else:
for i in range(0, len(val)):
f.write(",'%s%d'" % (field, i + 1))
f.write("};\n")
type_counters[mtype] += 1
f.write("%s.data(%u,:) = [%f" % (mtype, type_counters[mtype], m._timestamp))
for field in m._fieldnames:
val = getattr(m, field)
if not isinstance(val, str):
if type(val) is not list:
f.write(",%.20g" % val)
else:
for i in range(0, len(val)):
f.write(",%.20g" % val[i])
f.write("];\n")
f.close()
from argparse import ArgumentParser
parser = ArgumentParser(description=__doc__)
parser.add_argument("--condition", default=None, help="select packets by condition")
parser.add_argument("-o", "--output", default=None, help="output filename")
parser.add_argument("--types", default=None, help="types of messages (comma separated)")
parser.add_argument("--dialect", default="ardupilotmega", help="MAVLink dialect")
parser.add_argument("logs", metavar="LOG", nargs="+")
args = parser.parse_args()
for filename in args.logs:
process_tlog(filename)
|
YihaoLu/statsmodels | refs/heads/master | statsmodels/examples/ex_scatter_ellipse.py | 39 | '''example for grid of scatter plots with probability ellipses
Author: Josef Perktold
License: BSD-3
'''
from statsmodels.compat.python import lrange
import numpy as np
import matplotlib.pyplot as plt
from statsmodels.graphics.plot_grids import scatter_ellipse
nvars = 6
mmean = np.arange(1.,nvars+1)/nvars * 1.5
rho = 0.5
#dcorr = rho*np.ones((nvars, nvars)) + (1-rho)*np.eye(nvars)
r = np.random.uniform(-0.99, 0.99, size=(nvars, nvars))
##from scipy import stats
##r = stats.rdist.rvs(1, size=(nvars, nvars))
r = (r + r.T) / 2.
assert np.allclose(r, r.T)
mcorr = r
mcorr[lrange(nvars), lrange(nvars)] = 1
#dcorr = np.array([[1, 0.5, 0.1],[0.5, 1, -0.2], [0.1, -0.2, 1]])
mstd = np.arange(1.,nvars+1)/nvars
mcov = mcorr * np.outer(mstd, mstd)
evals = np.linalg.eigvalsh(mcov)
assert evals.min > 0 #assert positive definite
nobs = 100
data = np.random.multivariate_normal(mmean, mcov, size=nobs)
dmean = data.mean(0)
dcov = np.cov(data, rowvar=0)
print(dmean)
print(dcov)
dcorr = np.corrcoef(data, rowvar=0)
dcorr[np.triu_indices(nvars)] = 0
print(dcorr)
#default
#fig = scatter_ellipse(data, level=[0.5, 0.75, 0.95])
#used for checking
#fig = scatter_ellipse(data, level=[0.5, 0.75, 0.95], add_titles=True, keep_ticks=True)
#check varnames
varnames = ['var%d' % i for i in range(nvars)]
fig = scatter_ellipse(data, level=0.9, varnames=varnames)
plt.show()
|
andrejb/cloudant_bigcouch | refs/heads/master | couchjs/scons/scons-local-2.0.1/SCons/Tool/sgilink.py | 61 | """SCons.Tool.sgilink
Tool-specific initialization for the SGI MIPSPro linker on SGI.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/sgilink.py 5134 2010/08/16 23:02:40 bdeegan"
import SCons.Util
import link
linkers = ['CC', 'cc']
def generate(env):
"""Add Builders and construction variables for MIPSPro to an Environment."""
link.generate(env)
env['LINK'] = env.Detect(linkers) or 'cc'
env['SHLINKFLAGS'] = SCons.Util.CLVar('$LINKFLAGS -shared')
# __RPATH is set to $_RPATH in the platform specification if that
# platform supports it.
env.Append(LINKFLAGS=['$__RPATH'])
env['RPATHPREFIX'] = '-rpath '
env['RPATHSUFFIX'] = ''
env['_RPATH'] = '${_concat(RPATHPREFIX, RPATH, RPATHSUFFIX, __env__)}'
def exists(env):
return env.Detect(linkers)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
LeeKamentsky/CellProfiler | refs/heads/master | cellprofiler/tests/test_cellprofiler.py | 1 | '''test_cellprofiler - test the CellProfiler command-line interface
Copyright (c) 2003-2009 Massachusetts Institute of Technology
Copyright (c) 2009-2015 Broad Institute
All rights reserved.
Please see the AUTHORS file for credits.
Website: http://www.cellprofiler.org
'''
import datetime
import dateutil.parser
import os
import unittest
from cStringIO import StringIO
import shutil
import subprocess
import sys
import tempfile
from cellprofiler.modules.tests import \
example_images_directory, maybe_download_example_images
import CellProfiler
if hasattr(sys, 'frozen'):
ARGLIST_START = [sys.executable]
else:
ARGLIST_START = ["CellProfiler.py", "-b"]
@unittest.skipIf(sys.platform != 'win32', "Skip tests on all but Windows")
class TestCellProfiler(unittest.TestCase):
def run_cellprofiler(self, *args):
'''Run CellProfiler with the given arguments list
returns STDOUT from running it.
'''
if hasattr(sys, "frozen"):
args = [sys.argv[0]] + list(args)
return subprocess.check_output(args)
elif sys.platform == 'darwin':
# hopeless to try and find the right homebrew command
self.skipTest("Can't start Python properly on OS/X + homebrew")
else:
test_dir = os.path.dirname(__file__)
cellprofiler_dir = os.path.dirname(test_dir)
root_dir = os.path.dirname(cellprofiler_dir)
cellprofiler_path = os.path.join(root_dir, "CellProfiler.py")
self.assertTrue(os.path.isfile(cellprofiler_path))
args = [sys.executable, cellprofiler_path,
"--do-not-build", "--do-not-fetch"] + list(args)
return subprocess.check_output(args, cwd=root_dir)
def test_01_01_html(self):
path = tempfile.mkdtemp()
try:
self.run_cellprofiler("--html", "-o", path)
filenames = os.listdir(path)
self.assertTrue("index.html" in filenames)
finally:
shutil.rmtree(path)
@unittest.skipIf(hasattr(sys, "frozen"),
"Code statistics are not available in frozen-mode")
def test_01_02_code_statistics(self):
old_stdout = sys.stdout
fake_stdout = StringIO(
self.run_cellprofiler("--code-statistics"))
fake_stdout.seek(0)
found_module_stats = False
found_setting_stats = False
found_lines_of_code = False
for line in fake_stdout.readlines():
if line.startswith("# of built-in modules"):
found_module_stats = True
elif line.startswith("# of settings"):
found_setting_stats = True
elif line.startswith("# of lines of code"):
found_lines_of_code = True
self.assertTrue(found_module_stats)
self.assertTrue(found_setting_stats)
self.assertTrue(found_lines_of_code)
def test_01_03_version(self):
import cellprofiler.utilities.version as V
output = self.run_cellprofiler("--version")
version = dict([tuple(line.strip().split(" "))
for line in output.split("\n")
if " " in line])
self.assertEqual(version["CellProfiler"], V.dotted_version)
self.assertEqual(version["Git"], V.git_hash)
self.assertEqual(int(version["Version"][:8]),
int(V.version_number / 1000000))
built = dateutil.parser.parse(version["Built"])
self.assertLessEqual(built.date(), datetime.date.today())
def test_02_01_run_headless(self):
output_directory = tempfile.mkdtemp()
temp_directory = os.path.join(output_directory, "temp")
os.mkdir(temp_directory)
try:
#
# Run with a .cp file
#
input_directory = maybe_download_example_images(
["ExampleHT29"],
['AS_09125_050116030001_D03f00d0.tif',
'AS_09125_050116030001_D03f00d1.tif',
'AS_09125_050116030001_D03f00d2.tif',
'ExampleHT29.cp', 'k27IllumCorrControlv1.mat'])
pipeline_file = os.path.join(input_directory, "ExampleHT29.cp")
measurements_file = os.path.join(output_directory, "Measurements.h5")
done_file = os.path.join(output_directory, "Done.txt")
self.run_cellprofiler("-c", "-r",
"-i", input_directory,
"-o", output_directory,
"-p", pipeline_file,
"-d", done_file,
"-t", temp_directory,
measurements_file)
import cellprofiler.preferences as cpprefs
self.assertTrue(os.path.exists(measurements_file))
self.assertTrue(os.path.exists(done_file))
#
# Re-run using the measurements file.
#
m2_file = os.path.join(output_directory, "M2.h5")
self.run_cellprofiler("-c", "-r",
"-i", input_directory,
"-o", output_directory,
"-p", measurements_file,
m2_file)
self.assertTrue(os.path.exists(m2_file))
finally:
shutil.rmtree(output_directory)
|
eugenehp/bootstrap | refs/heads/master | node_modules/npm-shrinkwrap/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/input.py | 292 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from compiler.ast import Const
from compiler.ast import Dict
from compiler.ast import Discard
from compiler.ast import List
from compiler.ast import Module
from compiler.ast import Node
from compiler.ast import Stmt
import compiler
import gyp.common
import gyp.simple_copy
import multiprocessing
import optparse
import os.path
import re
import shlex
import signal
import subprocess
import sys
import threading
import time
import traceback
from gyp.common import GypError
from gyp.common import OrderedSet
# A list of types that are treated as linkable.
linkable_types = ['executable', 'shared_library', 'loadable_module']
# A list of sections that contain links to other targets.
dependency_sections = ['dependencies', 'export_dependent_settings']
# base_path_sections is a list of sections defined by GYP that contain
# pathnames. The generators can provide more keys, the two lists are merged
# into path_sections, but you should call IsPathSection instead of using either
# list directly.
base_path_sections = [
'destination',
'files',
'include_dirs',
'inputs',
'libraries',
'outputs',
'sources',
]
path_sections = set()
# These per-process dictionaries are used to cache build file data when loading
# in parallel mode.
per_process_data = {}
per_process_aux_data = {}
def IsPathSection(section):
# If section ends in one of the '=+?!' characters, it's applied to a section
# without the trailing characters. '/' is notably absent from this list,
# because there's no way for a regular expression to be treated as a path.
while section[-1:] in '=+?!':
section = section[:-1]
if section in path_sections:
return True
# Sections mathing the regexp '_(dir|file|path)s?$' are also
# considered PathSections. Using manual string matching since that
# is much faster than the regexp and this can be called hundreds of
# thousands of times so micro performance matters.
if "_" in section:
tail = section[-6:]
if tail[-1] == 's':
tail = tail[:-1]
if tail[-5:] in ('_file', '_path'):
return True
return tail[-4:] == '_dir'
return False
# base_non_configuration_keys is a list of key names that belong in the target
# itself and should not be propagated into its configurations. It is merged
# with a list that can come from the generator to
# create non_configuration_keys.
base_non_configuration_keys = [
# Sections that must exist inside targets and not configurations.
'actions',
'configurations',
'copies',
'default_configuration',
'dependencies',
'dependencies_original',
'libraries',
'postbuilds',
'product_dir',
'product_extension',
'product_name',
'product_prefix',
'rules',
'run_as',
'sources',
'standalone_static_library',
'suppress_wildcard',
'target_name',
'toolset',
'toolsets',
'type',
# Sections that can be found inside targets or configurations, but that
# should not be propagated from targets into their configurations.
'variables',
]
non_configuration_keys = []
# Keys that do not belong inside a configuration dictionary.
invalid_configuration_keys = [
'actions',
'all_dependent_settings',
'configurations',
'dependencies',
'direct_dependent_settings',
'libraries',
'link_settings',
'sources',
'standalone_static_library',
'target_name',
'type',
]
# Controls whether or not the generator supports multiple toolsets.
multiple_toolsets = False
# Paths for converting filelist paths to output paths: {
# toplevel,
# qualified_output_dir,
# }
generator_filelist_paths = None
def GetIncludedBuildFiles(build_file_path, aux_data, included=None):
"""Return a list of all build files included into build_file_path.
The returned list will contain build_file_path as well as all other files
that it included, either directly or indirectly. Note that the list may
contain files that were included into a conditional section that evaluated
to false and was not merged into build_file_path's dict.
aux_data is a dict containing a key for each build file or included build
file. Those keys provide access to dicts whose "included" keys contain
lists of all other files included by the build file.
included should be left at its default None value by external callers. It
is used for recursion.
The returned list will not contain any duplicate entries. Each build file
in the list will be relative to the current directory.
"""
if included == None:
included = []
if build_file_path in included:
return included
included.append(build_file_path)
for included_build_file in aux_data[build_file_path].get('included', []):
GetIncludedBuildFiles(included_build_file, aux_data, included)
return included
def CheckedEval(file_contents):
"""Return the eval of a gyp file.
The gyp file is restricted to dictionaries and lists only, and
repeated keys are not allowed.
Note that this is slower than eval() is.
"""
ast = compiler.parse(file_contents)
assert isinstance(ast, Module)
c1 = ast.getChildren()
assert c1[0] is None
assert isinstance(c1[1], Stmt)
c2 = c1[1].getChildren()
assert isinstance(c2[0], Discard)
c3 = c2[0].getChildren()
assert len(c3) == 1
return CheckNode(c3[0], [])
def CheckNode(node, keypath):
if isinstance(node, Dict):
c = node.getChildren()
dict = {}
for n in range(0, len(c), 2):
assert isinstance(c[n], Const)
key = c[n].getChildren()[0]
if key in dict:
raise GypError("Key '" + key + "' repeated at level " +
repr(len(keypath) + 1) + " with key path '" +
'.'.join(keypath) + "'")
kp = list(keypath) # Make a copy of the list for descending this node.
kp.append(key)
dict[key] = CheckNode(c[n + 1], kp)
return dict
elif isinstance(node, List):
c = node.getChildren()
children = []
for index, child in enumerate(c):
kp = list(keypath) # Copy list.
kp.append(repr(index))
children.append(CheckNode(child, kp))
return children
elif isinstance(node, Const):
return node.getChildren()[0]
else:
raise TypeError("Unknown AST node at key path '" + '.'.join(keypath) +
"': " + repr(node))
def LoadOneBuildFile(build_file_path, data, aux_data, includes,
is_target, check):
if build_file_path in data:
return data[build_file_path]
if os.path.exists(build_file_path):
build_file_contents = open(build_file_path).read()
else:
raise GypError("%s not found (cwd: %s)" % (build_file_path, os.getcwd()))
build_file_data = None
try:
if check:
build_file_data = CheckedEval(build_file_contents)
else:
build_file_data = eval(build_file_contents, {'__builtins__': None},
None)
except SyntaxError, e:
e.filename = build_file_path
raise
except Exception, e:
gyp.common.ExceptionAppend(e, 'while reading ' + build_file_path)
raise
if type(build_file_data) is not dict:
raise GypError("%s does not evaluate to a dictionary." % build_file_path)
data[build_file_path] = build_file_data
aux_data[build_file_path] = {}
# Scan for includes and merge them in.
if ('skip_includes' not in build_file_data or
not build_file_data['skip_includes']):
try:
if is_target:
LoadBuildFileIncludesIntoDict(build_file_data, build_file_path, data,
aux_data, includes, check)
else:
LoadBuildFileIncludesIntoDict(build_file_data, build_file_path, data,
aux_data, None, check)
except Exception, e:
gyp.common.ExceptionAppend(e,
'while reading includes of ' + build_file_path)
raise
return build_file_data
def LoadBuildFileIncludesIntoDict(subdict, subdict_path, data, aux_data,
includes, check):
includes_list = []
if includes != None:
includes_list.extend(includes)
if 'includes' in subdict:
for include in subdict['includes']:
# "include" is specified relative to subdict_path, so compute the real
# path to include by appending the provided "include" to the directory
# in which subdict_path resides.
relative_include = \
os.path.normpath(os.path.join(os.path.dirname(subdict_path), include))
includes_list.append(relative_include)
# Unhook the includes list, it's no longer needed.
del subdict['includes']
# Merge in the included files.
for include in includes_list:
if not 'included' in aux_data[subdict_path]:
aux_data[subdict_path]['included'] = []
aux_data[subdict_path]['included'].append(include)
gyp.DebugOutput(gyp.DEBUG_INCLUDES, "Loading Included File: '%s'", include)
MergeDicts(subdict,
LoadOneBuildFile(include, data, aux_data, None, False, check),
subdict_path, include)
# Recurse into subdictionaries.
for k, v in subdict.iteritems():
if type(v) is dict:
LoadBuildFileIncludesIntoDict(v, subdict_path, data, aux_data,
None, check)
elif type(v) is list:
LoadBuildFileIncludesIntoList(v, subdict_path, data, aux_data,
check)
# This recurses into lists so that it can look for dicts.
def LoadBuildFileIncludesIntoList(sublist, sublist_path, data, aux_data, check):
for item in sublist:
if type(item) is dict:
LoadBuildFileIncludesIntoDict(item, sublist_path, data, aux_data,
None, check)
elif type(item) is list:
LoadBuildFileIncludesIntoList(item, sublist_path, data, aux_data, check)
# Processes toolsets in all the targets. This recurses into condition entries
# since they can contain toolsets as well.
def ProcessToolsetsInDict(data):
if 'targets' in data:
target_list = data['targets']
new_target_list = []
for target in target_list:
# If this target already has an explicit 'toolset', and no 'toolsets'
# list, don't modify it further.
if 'toolset' in target and 'toolsets' not in target:
new_target_list.append(target)
continue
if multiple_toolsets:
toolsets = target.get('toolsets', ['target'])
else:
toolsets = ['target']
# Make sure this 'toolsets' definition is only processed once.
if 'toolsets' in target:
del target['toolsets']
if len(toolsets) > 0:
# Optimization: only do copies if more than one toolset is specified.
for build in toolsets[1:]:
new_target = gyp.simple_copy.deepcopy(target)
new_target['toolset'] = build
new_target_list.append(new_target)
target['toolset'] = toolsets[0]
new_target_list.append(target)
data['targets'] = new_target_list
if 'conditions' in data:
for condition in data['conditions']:
if type(condition) is list:
for condition_dict in condition[1:]:
if type(condition_dict) is dict:
ProcessToolsetsInDict(condition_dict)
# TODO(mark): I don't love this name. It just means that it's going to load
# a build file that contains targets and is expected to provide a targets dict
# that contains the targets...
def LoadTargetBuildFile(build_file_path, data, aux_data, variables, includes,
depth, check, load_dependencies):
# If depth is set, predefine the DEPTH variable to be a relative path from
# this build file's directory to the directory identified by depth.
if depth:
# TODO(dglazkov) The backslash/forward-slash replacement at the end is a
# temporary measure. This should really be addressed by keeping all paths
# in POSIX until actual project generation.
d = gyp.common.RelativePath(depth, os.path.dirname(build_file_path))
if d == '':
variables['DEPTH'] = '.'
else:
variables['DEPTH'] = d.replace('\\', '/')
# The 'target_build_files' key is only set when loading target build files in
# the non-parallel code path, where LoadTargetBuildFile is called
# recursively. In the parallel code path, we don't need to check whether the
# |build_file_path| has already been loaded, because the 'scheduled' set in
# ParallelState guarantees that we never load the same |build_file_path|
# twice.
if 'target_build_files' in data:
if build_file_path in data['target_build_files']:
# Already loaded.
return False
data['target_build_files'].add(build_file_path)
gyp.DebugOutput(gyp.DEBUG_INCLUDES,
"Loading Target Build File '%s'", build_file_path)
build_file_data = LoadOneBuildFile(build_file_path, data, aux_data,
includes, True, check)
# Store DEPTH for later use in generators.
build_file_data['_DEPTH'] = depth
# Set up the included_files key indicating which .gyp files contributed to
# this target dict.
if 'included_files' in build_file_data:
raise GypError(build_file_path + ' must not contain included_files key')
included = GetIncludedBuildFiles(build_file_path, aux_data)
build_file_data['included_files'] = []
for included_file in included:
# included_file is relative to the current directory, but it needs to
# be made relative to build_file_path's directory.
included_relative = \
gyp.common.RelativePath(included_file,
os.path.dirname(build_file_path))
build_file_data['included_files'].append(included_relative)
# Do a first round of toolsets expansion so that conditions can be defined
# per toolset.
ProcessToolsetsInDict(build_file_data)
# Apply "pre"/"early" variable expansions and condition evaluations.
ProcessVariablesAndConditionsInDict(
build_file_data, PHASE_EARLY, variables, build_file_path)
# Since some toolsets might have been defined conditionally, perform
# a second round of toolsets expansion now.
ProcessToolsetsInDict(build_file_data)
# Look at each project's target_defaults dict, and merge settings into
# targets.
if 'target_defaults' in build_file_data:
if 'targets' not in build_file_data:
raise GypError("Unable to find targets in build file %s" %
build_file_path)
index = 0
while index < len(build_file_data['targets']):
# This procedure needs to give the impression that target_defaults is
# used as defaults, and the individual targets inherit from that.
# The individual targets need to be merged into the defaults. Make
# a deep copy of the defaults for each target, merge the target dict
# as found in the input file into that copy, and then hook up the
# copy with the target-specific data merged into it as the replacement
# target dict.
old_target_dict = build_file_data['targets'][index]
new_target_dict = gyp.simple_copy.deepcopy(
build_file_data['target_defaults'])
MergeDicts(new_target_dict, old_target_dict,
build_file_path, build_file_path)
build_file_data['targets'][index] = new_target_dict
index += 1
# No longer needed.
del build_file_data['target_defaults']
# Look for dependencies. This means that dependency resolution occurs
# after "pre" conditionals and variable expansion, but before "post" -
# in other words, you can't put a "dependencies" section inside a "post"
# conditional within a target.
dependencies = []
if 'targets' in build_file_data:
for target_dict in build_file_data['targets']:
if 'dependencies' not in target_dict:
continue
for dependency in target_dict['dependencies']:
dependencies.append(
gyp.common.ResolveTarget(build_file_path, dependency, None)[0])
if load_dependencies:
for dependency in dependencies:
try:
LoadTargetBuildFile(dependency, data, aux_data, variables,
includes, depth, check, load_dependencies)
except Exception, e:
gyp.common.ExceptionAppend(
e, 'while loading dependencies of %s' % build_file_path)
raise
else:
return (build_file_path, dependencies)
def CallLoadTargetBuildFile(global_flags,
build_file_path, variables,
includes, depth, check,
generator_input_info):
"""Wrapper around LoadTargetBuildFile for parallel processing.
This wrapper is used when LoadTargetBuildFile is executed in
a worker process.
"""
try:
signal.signal(signal.SIGINT, signal.SIG_IGN)
# Apply globals so that the worker process behaves the same.
for key, value in global_flags.iteritems():
globals()[key] = value
SetGeneratorGlobals(generator_input_info)
result = LoadTargetBuildFile(build_file_path, per_process_data,
per_process_aux_data, variables,
includes, depth, check, False)
if not result:
return result
(build_file_path, dependencies) = result
# We can safely pop the build_file_data from per_process_data because it
# will never be referenced by this process again, so we don't need to keep
# it in the cache.
build_file_data = per_process_data.pop(build_file_path)
# This gets serialized and sent back to the main process via a pipe.
# It's handled in LoadTargetBuildFileCallback.
return (build_file_path,
build_file_data,
dependencies)
except GypError, e:
sys.stderr.write("gyp: %s\n" % e)
return None
except Exception, e:
print >>sys.stderr, 'Exception:', e
print >>sys.stderr, traceback.format_exc()
return None
class ParallelProcessingError(Exception):
pass
class ParallelState(object):
"""Class to keep track of state when processing input files in parallel.
If build files are loaded in parallel, use this to keep track of
state during farming out and processing parallel jobs. It's stored
in a global so that the callback function can have access to it.
"""
def __init__(self):
# The multiprocessing pool.
self.pool = None
# The condition variable used to protect this object and notify
# the main loop when there might be more data to process.
self.condition = None
# The "data" dict that was passed to LoadTargetBuildFileParallel
self.data = None
# The number of parallel calls outstanding; decremented when a response
# was received.
self.pending = 0
# The set of all build files that have been scheduled, so we don't
# schedule the same one twice.
self.scheduled = set()
# A list of dependency build file paths that haven't been scheduled yet.
self.dependencies = []
# Flag to indicate if there was an error in a child process.
self.error = False
def LoadTargetBuildFileCallback(self, result):
"""Handle the results of running LoadTargetBuildFile in another process.
"""
self.condition.acquire()
if not result:
self.error = True
self.condition.notify()
self.condition.release()
return
(build_file_path0, build_file_data0, dependencies0) = result
self.data[build_file_path0] = build_file_data0
self.data['target_build_files'].add(build_file_path0)
for new_dependency in dependencies0:
if new_dependency not in self.scheduled:
self.scheduled.add(new_dependency)
self.dependencies.append(new_dependency)
self.pending -= 1
self.condition.notify()
self.condition.release()
def LoadTargetBuildFilesParallel(build_files, data, variables, includes, depth,
check, generator_input_info):
parallel_state = ParallelState()
parallel_state.condition = threading.Condition()
# Make copies of the build_files argument that we can modify while working.
parallel_state.dependencies = list(build_files)
parallel_state.scheduled = set(build_files)
parallel_state.pending = 0
parallel_state.data = data
try:
parallel_state.condition.acquire()
while parallel_state.dependencies or parallel_state.pending:
if parallel_state.error:
break
if not parallel_state.dependencies:
parallel_state.condition.wait()
continue
dependency = parallel_state.dependencies.pop()
parallel_state.pending += 1
global_flags = {
'path_sections': globals()['path_sections'],
'non_configuration_keys': globals()['non_configuration_keys'],
'multiple_toolsets': globals()['multiple_toolsets']}
if not parallel_state.pool:
parallel_state.pool = multiprocessing.Pool(multiprocessing.cpu_count())
parallel_state.pool.apply_async(
CallLoadTargetBuildFile,
args = (global_flags, dependency,
variables, includes, depth, check, generator_input_info),
callback = parallel_state.LoadTargetBuildFileCallback)
except KeyboardInterrupt, e:
parallel_state.pool.terminate()
raise e
parallel_state.condition.release()
parallel_state.pool.close()
parallel_state.pool.join()
parallel_state.pool = None
if parallel_state.error:
sys.exit(1)
# Look for the bracket that matches the first bracket seen in a
# string, and return the start and end as a tuple. For example, if
# the input is something like "<(foo <(bar)) blah", then it would
# return (1, 13), indicating the entire string except for the leading
# "<" and trailing " blah".
LBRACKETS= set('{[(')
BRACKETS = {'}': '{', ']': '[', ')': '('}
def FindEnclosingBracketGroup(input_str):
stack = []
start = -1
for index, char in enumerate(input_str):
if char in LBRACKETS:
stack.append(char)
if start == -1:
start = index
elif char in BRACKETS:
if not stack:
return (-1, -1)
if stack.pop() != BRACKETS[char]:
return (-1, -1)
if not stack:
return (start, index + 1)
return (-1, -1)
def IsStrCanonicalInt(string):
"""Returns True if |string| is in its canonical integer form.
The canonical form is such that str(int(string)) == string.
"""
if type(string) is str:
# This function is called a lot so for maximum performance, avoid
# involving regexps which would otherwise make the code much
# shorter. Regexps would need twice the time of this function.
if string:
if string == "0":
return True
if string[0] == "-":
string = string[1:]
if not string:
return False
if '1' <= string[0] <= '9':
return string.isdigit()
return False
# This matches things like "<(asdf)", "<!(cmd)", "<!@(cmd)", "<|(list)",
# "<!interpreter(arguments)", "<([list])", and even "<([)" and "<(<())".
# In the last case, the inner "<()" is captured in match['content'].
early_variable_re = re.compile(
r'(?P<replace>(?P<type><(?:(?:!?@?)|\|)?)'
r'(?P<command_string>[-a-zA-Z0-9_.]+)?'
r'\((?P<is_array>\s*\[?)'
r'(?P<content>.*?)(\]?)\))')
# This matches the same as early_variable_re, but with '>' instead of '<'.
late_variable_re = re.compile(
r'(?P<replace>(?P<type>>(?:(?:!?@?)|\|)?)'
r'(?P<command_string>[-a-zA-Z0-9_.]+)?'
r'\((?P<is_array>\s*\[?)'
r'(?P<content>.*?)(\]?)\))')
# This matches the same as early_variable_re, but with '^' instead of '<'.
latelate_variable_re = re.compile(
r'(?P<replace>(?P<type>[\^](?:(?:!?@?)|\|)?)'
r'(?P<command_string>[-a-zA-Z0-9_.]+)?'
r'\((?P<is_array>\s*\[?)'
r'(?P<content>.*?)(\]?)\))')
# Global cache of results from running commands so they don't have to be run
# more then once.
cached_command_results = {}
def FixupPlatformCommand(cmd):
if sys.platform == 'win32':
if type(cmd) is list:
cmd = [re.sub('^cat ', 'type ', cmd[0])] + cmd[1:]
else:
cmd = re.sub('^cat ', 'type ', cmd)
return cmd
PHASE_EARLY = 0
PHASE_LATE = 1
PHASE_LATELATE = 2
def ExpandVariables(input, phase, variables, build_file):
# Look for the pattern that gets expanded into variables
if phase == PHASE_EARLY:
variable_re = early_variable_re
expansion_symbol = '<'
elif phase == PHASE_LATE:
variable_re = late_variable_re
expansion_symbol = '>'
elif phase == PHASE_LATELATE:
variable_re = latelate_variable_re
expansion_symbol = '^'
else:
assert False
input_str = str(input)
if IsStrCanonicalInt(input_str):
return int(input_str)
# Do a quick scan to determine if an expensive regex search is warranted.
if expansion_symbol not in input_str:
return input_str
# Get the entire list of matches as a list of MatchObject instances.
# (using findall here would return strings instead of MatchObjects).
matches = list(variable_re.finditer(input_str))
if not matches:
return input_str
output = input_str
# Reverse the list of matches so that replacements are done right-to-left.
# That ensures that earlier replacements won't mess up the string in a
# way that causes later calls to find the earlier substituted text instead
# of what's intended for replacement.
matches.reverse()
for match_group in matches:
match = match_group.groupdict()
gyp.DebugOutput(gyp.DEBUG_VARIABLES, "Matches: %r", match)
# match['replace'] is the substring to look for, match['type']
# is the character code for the replacement type (< > <! >! <| >| <@
# >@ <!@ >!@), match['is_array'] contains a '[' for command
# arrays, and match['content'] is the name of the variable (< >)
# or command to run (<! >!). match['command_string'] is an optional
# command string. Currently, only 'pymod_do_main' is supported.
# run_command is true if a ! variant is used.
run_command = '!' in match['type']
command_string = match['command_string']
# file_list is true if a | variant is used.
file_list = '|' in match['type']
# Capture these now so we can adjust them later.
replace_start = match_group.start('replace')
replace_end = match_group.end('replace')
# Find the ending paren, and re-evaluate the contained string.
(c_start, c_end) = FindEnclosingBracketGroup(input_str[replace_start:])
# Adjust the replacement range to match the entire command
# found by FindEnclosingBracketGroup (since the variable_re
# probably doesn't match the entire command if it contained
# nested variables).
replace_end = replace_start + c_end
# Find the "real" replacement, matching the appropriate closing
# paren, and adjust the replacement start and end.
replacement = input_str[replace_start:replace_end]
# Figure out what the contents of the variable parens are.
contents_start = replace_start + c_start + 1
contents_end = replace_end - 1
contents = input_str[contents_start:contents_end]
# Do filter substitution now for <|().
# Admittedly, this is different than the evaluation order in other
# contexts. However, since filtration has no chance to run on <|(),
# this seems like the only obvious way to give them access to filters.
if file_list:
processed_variables = gyp.simple_copy.deepcopy(variables)
ProcessListFiltersInDict(contents, processed_variables)
# Recurse to expand variables in the contents
contents = ExpandVariables(contents, phase,
processed_variables, build_file)
else:
# Recurse to expand variables in the contents
contents = ExpandVariables(contents, phase, variables, build_file)
# Strip off leading/trailing whitespace so that variable matches are
# simpler below (and because they are rarely needed).
contents = contents.strip()
# expand_to_list is true if an @ variant is used. In that case,
# the expansion should result in a list. Note that the caller
# is to be expecting a list in return, and not all callers do
# because not all are working in list context. Also, for list
# expansions, there can be no other text besides the variable
# expansion in the input string.
expand_to_list = '@' in match['type'] and input_str == replacement
if run_command or file_list:
# Find the build file's directory, so commands can be run or file lists
# generated relative to it.
build_file_dir = os.path.dirname(build_file)
if build_file_dir == '' and not file_list:
# If build_file is just a leaf filename indicating a file in the
# current directory, build_file_dir might be an empty string. Set
# it to None to signal to subprocess.Popen that it should run the
# command in the current directory.
build_file_dir = None
# Support <|(listfile.txt ...) which generates a file
# containing items from a gyp list, generated at gyp time.
# This works around actions/rules which have more inputs than will
# fit on the command line.
if file_list:
if type(contents) is list:
contents_list = contents
else:
contents_list = contents.split(' ')
replacement = contents_list[0]
if os.path.isabs(replacement):
raise GypError('| cannot handle absolute paths, got "%s"' % replacement)
if not generator_filelist_paths:
path = os.path.join(build_file_dir, replacement)
else:
if os.path.isabs(build_file_dir):
toplevel = generator_filelist_paths['toplevel']
rel_build_file_dir = gyp.common.RelativePath(build_file_dir, toplevel)
else:
rel_build_file_dir = build_file_dir
qualified_out_dir = generator_filelist_paths['qualified_out_dir']
path = os.path.join(qualified_out_dir, rel_build_file_dir, replacement)
gyp.common.EnsureDirExists(path)
replacement = gyp.common.RelativePath(path, build_file_dir)
f = gyp.common.WriteOnDiff(path)
for i in contents_list[1:]:
f.write('%s\n' % i)
f.close()
elif run_command:
use_shell = True
if match['is_array']:
contents = eval(contents)
use_shell = False
# Check for a cached value to avoid executing commands, or generating
# file lists more than once. The cache key contains the command to be
# run as well as the directory to run it from, to account for commands
# that depend on their current directory.
# TODO(http://code.google.com/p/gyp/issues/detail?id=111): In theory,
# someone could author a set of GYP files where each time the command
# is invoked it produces different output by design. When the need
# arises, the syntax should be extended to support no caching off a
# command's output so it is run every time.
cache_key = (str(contents), build_file_dir)
cached_value = cached_command_results.get(cache_key, None)
if cached_value is None:
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Executing command '%s' in directory '%s'",
contents, build_file_dir)
replacement = ''
if command_string == 'pymod_do_main':
# <!pymod_do_main(modulename param eters) loads |modulename| as a
# python module and then calls that module's DoMain() function,
# passing ["param", "eters"] as a single list argument. For modules
# that don't load quickly, this can be faster than
# <!(python modulename param eters). Do this in |build_file_dir|.
oldwd = os.getcwd() # Python doesn't like os.open('.'): no fchdir.
if build_file_dir: # build_file_dir may be None (see above).
os.chdir(build_file_dir)
try:
parsed_contents = shlex.split(contents)
try:
py_module = __import__(parsed_contents[0])
except ImportError as e:
raise GypError("Error importing pymod_do_main"
"module (%s): %s" % (parsed_contents[0], e))
replacement = str(py_module.DoMain(parsed_contents[1:])).rstrip()
finally:
os.chdir(oldwd)
assert replacement != None
elif command_string:
raise GypError("Unknown command string '%s' in '%s'." %
(command_string, contents))
else:
# Fix up command with platform specific workarounds.
contents = FixupPlatformCommand(contents)
p = subprocess.Popen(contents, shell=use_shell,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
cwd=build_file_dir)
p_stdout, p_stderr = p.communicate('')
if p.wait() != 0 or p_stderr:
sys.stderr.write(p_stderr)
# Simulate check_call behavior, since check_call only exists
# in python 2.5 and later.
raise GypError("Call to '%s' returned exit status %d." %
(contents, p.returncode))
replacement = p_stdout.rstrip()
cached_command_results[cache_key] = replacement
else:
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Had cache value for command '%s' in directory '%s'",
contents,build_file_dir)
replacement = cached_value
else:
if not contents in variables:
if contents[-1] in ['!', '/']:
# In order to allow cross-compiles (nacl) to happen more naturally,
# we will allow references to >(sources/) etc. to resolve to
# and empty list if undefined. This allows actions to:
# 'action!': [
# '>@(_sources!)',
# ],
# 'action/': [
# '>@(_sources/)',
# ],
replacement = []
else:
raise GypError('Undefined variable ' + contents +
' in ' + build_file)
else:
replacement = variables[contents]
if type(replacement) is list:
for item in replacement:
if not contents[-1] == '/' and type(item) not in (str, int):
raise GypError('Variable ' + contents +
' must expand to a string or list of strings; ' +
'list contains a ' +
item.__class__.__name__)
# Run through the list and handle variable expansions in it. Since
# the list is guaranteed not to contain dicts, this won't do anything
# with conditions sections.
ProcessVariablesAndConditionsInList(replacement, phase, variables,
build_file)
elif type(replacement) not in (str, int):
raise GypError('Variable ' + contents +
' must expand to a string or list of strings; ' +
'found a ' + replacement.__class__.__name__)
if expand_to_list:
# Expanding in list context. It's guaranteed that there's only one
# replacement to do in |input_str| and that it's this replacement. See
# above.
if type(replacement) is list:
# If it's already a list, make a copy.
output = replacement[:]
else:
# Split it the same way sh would split arguments.
output = shlex.split(str(replacement))
else:
# Expanding in string context.
encoded_replacement = ''
if type(replacement) is list:
# When expanding a list into string context, turn the list items
# into a string in a way that will work with a subprocess call.
#
# TODO(mark): This isn't completely correct. This should
# call a generator-provided function that observes the
# proper list-to-argument quoting rules on a specific
# platform instead of just calling the POSIX encoding
# routine.
encoded_replacement = gyp.common.EncodePOSIXShellList(replacement)
else:
encoded_replacement = replacement
output = output[:replace_start] + str(encoded_replacement) + \
output[replace_end:]
# Prepare for the next match iteration.
input_str = output
if output == input:
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Found only identity matches on %r, avoiding infinite "
"recursion.",
output)
else:
# Look for more matches now that we've replaced some, to deal with
# expanding local variables (variables defined in the same
# variables block as this one).
gyp.DebugOutput(gyp.DEBUG_VARIABLES, "Found output %r, recursing.", output)
if type(output) is list:
if output and type(output[0]) is list:
# Leave output alone if it's a list of lists.
# We don't want such lists to be stringified.
pass
else:
new_output = []
for item in output:
new_output.append(
ExpandVariables(item, phase, variables, build_file))
output = new_output
else:
output = ExpandVariables(output, phase, variables, build_file)
# Convert all strings that are canonically-represented integers into integers.
if type(output) is list:
for index in xrange(0, len(output)):
if IsStrCanonicalInt(output[index]):
output[index] = int(output[index])
elif IsStrCanonicalInt(output):
output = int(output)
return output
# The same condition is often evaluated over and over again so it
# makes sense to cache as much as possible between evaluations.
cached_conditions_asts = {}
def EvalCondition(condition, conditions_key, phase, variables, build_file):
"""Returns the dict that should be used or None if the result was
that nothing should be used."""
if type(condition) is not list:
raise GypError(conditions_key + ' must be a list')
if len(condition) < 2:
# It's possible that condition[0] won't work in which case this
# attempt will raise its own IndexError. That's probably fine.
raise GypError(conditions_key + ' ' + condition[0] +
' must be at least length 2, not ' + str(len(condition)))
i = 0
result = None
while i < len(condition):
cond_expr = condition[i]
true_dict = condition[i + 1]
if type(true_dict) is not dict:
raise GypError('{} {} must be followed by a dictionary, not {}'.format(
conditions_key, cond_expr, type(true_dict)))
if len(condition) > i + 2 and type(condition[i + 2]) is dict:
false_dict = condition[i + 2]
i = i + 3
if i != len(condition):
raise GypError('{} {} has {} unexpected trailing items'.format(
conditions_key, cond_expr, len(condition) - i))
else:
false_dict = None
i = i + 2
if result == None:
result = EvalSingleCondition(
cond_expr, true_dict, false_dict, phase, variables, build_file)
return result
def EvalSingleCondition(
cond_expr, true_dict, false_dict, phase, variables, build_file):
"""Returns true_dict if cond_expr evaluates to true, and false_dict
otherwise."""
# Do expansions on the condition itself. Since the conditon can naturally
# contain variable references without needing to resort to GYP expansion
# syntax, this is of dubious value for variables, but someone might want to
# use a command expansion directly inside a condition.
cond_expr_expanded = ExpandVariables(cond_expr, phase, variables,
build_file)
if type(cond_expr_expanded) not in (str, int):
raise ValueError(
'Variable expansion in this context permits str and int ' + \
'only, found ' + cond_expr_expanded.__class__.__name__)
try:
if cond_expr_expanded in cached_conditions_asts:
ast_code = cached_conditions_asts[cond_expr_expanded]
else:
ast_code = compile(cond_expr_expanded, '<string>', 'eval')
cached_conditions_asts[cond_expr_expanded] = ast_code
if eval(ast_code, {'__builtins__': None}, variables):
return true_dict
return false_dict
except SyntaxError, e:
syntax_error = SyntaxError('%s while evaluating condition \'%s\' in %s '
'at character %d.' %
(str(e.args[0]), e.text, build_file, e.offset),
e.filename, e.lineno, e.offset, e.text)
raise syntax_error
except NameError, e:
gyp.common.ExceptionAppend(e, 'while evaluating condition \'%s\' in %s' %
(cond_expr_expanded, build_file))
raise GypError(e)
def ProcessConditionsInDict(the_dict, phase, variables, build_file):
# Process a 'conditions' or 'target_conditions' section in the_dict,
# depending on phase.
# early -> conditions
# late -> target_conditions
# latelate -> no conditions
#
# Each item in a conditions list consists of cond_expr, a string expression
# evaluated as the condition, and true_dict, a dict that will be merged into
# the_dict if cond_expr evaluates to true. Optionally, a third item,
# false_dict, may be present. false_dict is merged into the_dict if
# cond_expr evaluates to false.
#
# Any dict merged into the_dict will be recursively processed for nested
# conditionals and other expansions, also according to phase, immediately
# prior to being merged.
if phase == PHASE_EARLY:
conditions_key = 'conditions'
elif phase == PHASE_LATE:
conditions_key = 'target_conditions'
elif phase == PHASE_LATELATE:
return
else:
assert False
if not conditions_key in the_dict:
return
conditions_list = the_dict[conditions_key]
# Unhook the conditions list, it's no longer needed.
del the_dict[conditions_key]
for condition in conditions_list:
merge_dict = EvalCondition(condition, conditions_key, phase, variables,
build_file)
if merge_dict != None:
# Expand variables and nested conditinals in the merge_dict before
# merging it.
ProcessVariablesAndConditionsInDict(merge_dict, phase,
variables, build_file)
MergeDicts(the_dict, merge_dict, build_file, build_file)
def LoadAutomaticVariablesFromDict(variables, the_dict):
# Any keys with plain string values in the_dict become automatic variables.
# The variable name is the key name with a "_" character prepended.
for key, value in the_dict.iteritems():
if type(value) in (str, int, list):
variables['_' + key] = value
def LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key):
# Any keys in the_dict's "variables" dict, if it has one, becomes a
# variable. The variable name is the key name in the "variables" dict.
# Variables that end with the % character are set only if they are unset in
# the variables dict. the_dict_key is the name of the key that accesses
# the_dict in the_dict's parent dict. If the_dict's parent is not a dict
# (it could be a list or it could be parentless because it is a root dict),
# the_dict_key will be None.
for key, value in the_dict.get('variables', {}).iteritems():
if type(value) not in (str, int, list):
continue
if key.endswith('%'):
variable_name = key[:-1]
if variable_name in variables:
# If the variable is already set, don't set it.
continue
if the_dict_key is 'variables' and variable_name in the_dict:
# If the variable is set without a % in the_dict, and the_dict is a
# variables dict (making |variables| a varaibles sub-dict of a
# variables dict), use the_dict's definition.
value = the_dict[variable_name]
else:
variable_name = key
variables[variable_name] = value
def ProcessVariablesAndConditionsInDict(the_dict, phase, variables_in,
build_file, the_dict_key=None):
"""Handle all variable and command expansion and conditional evaluation.
This function is the public entry point for all variable expansions and
conditional evaluations. The variables_in dictionary will not be modified
by this function.
"""
# Make a copy of the variables_in dict that can be modified during the
# loading of automatics and the loading of the variables dict.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
if 'variables' in the_dict:
# Make sure all the local variables are added to the variables
# list before we process them so that you can reference one
# variable from another. They will be fully expanded by recursion
# in ExpandVariables.
for key, value in the_dict['variables'].iteritems():
variables[key] = value
# Handle the associated variables dict first, so that any variable
# references within can be resolved prior to using them as variables.
# Pass a copy of the variables dict to avoid having it be tainted.
# Otherwise, it would have extra automatics added for everything that
# should just be an ordinary variable in this scope.
ProcessVariablesAndConditionsInDict(the_dict['variables'], phase,
variables, build_file, 'variables')
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
for key, value in the_dict.iteritems():
# Skip "variables", which was already processed if present.
if key != 'variables' and type(value) is str:
expanded = ExpandVariables(value, phase, variables, build_file)
if type(expanded) not in (str, int):
raise ValueError(
'Variable expansion in this context permits str and int ' + \
'only, found ' + expanded.__class__.__name__ + ' for ' + key)
the_dict[key] = expanded
# Variable expansion may have resulted in changes to automatics. Reload.
# TODO(mark): Optimization: only reload if no changes were made.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
# Process conditions in this dict. This is done after variable expansion
# so that conditions may take advantage of expanded variables. For example,
# if the_dict contains:
# {'type': '<(library_type)',
# 'conditions': [['_type=="static_library"', { ... }]]},
# _type, as used in the condition, will only be set to the value of
# library_type if variable expansion is performed before condition
# processing. However, condition processing should occur prior to recursion
# so that variables (both automatic and "variables" dict type) may be
# adjusted by conditions sections, merged into the_dict, and have the
# intended impact on contained dicts.
#
# This arrangement means that a "conditions" section containing a "variables"
# section will only have those variables effective in subdicts, not in
# the_dict. The workaround is to put a "conditions" section within a
# "variables" section. For example:
# {'conditions': [['os=="mac"', {'variables': {'define': 'IS_MAC'}}]],
# 'defines': ['<(define)'],
# 'my_subdict': {'defines': ['<(define)']}},
# will not result in "IS_MAC" being appended to the "defines" list in the
# current scope but would result in it being appended to the "defines" list
# within "my_subdict". By comparison:
# {'variables': {'conditions': [['os=="mac"', {'define': 'IS_MAC'}]]},
# 'defines': ['<(define)'],
# 'my_subdict': {'defines': ['<(define)']}},
# will append "IS_MAC" to both "defines" lists.
# Evaluate conditions sections, allowing variable expansions within them
# as well as nested conditionals. This will process a 'conditions' or
# 'target_conditions' section, perform appropriate merging and recursive
# conditional and variable processing, and then remove the conditions section
# from the_dict if it is present.
ProcessConditionsInDict(the_dict, phase, variables, build_file)
# Conditional processing may have resulted in changes to automatics or the
# variables dict. Reload.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
# Recurse into child dicts, or process child lists which may result in
# further recursion into descendant dicts.
for key, value in the_dict.iteritems():
# Skip "variables" and string values, which were already processed if
# present.
if key == 'variables' or type(value) is str:
continue
if type(value) is dict:
# Pass a copy of the variables dict so that subdicts can't influence
# parents.
ProcessVariablesAndConditionsInDict(value, phase, variables,
build_file, key)
elif type(value) is list:
# The list itself can't influence the variables dict, and
# ProcessVariablesAndConditionsInList will make copies of the variables
# dict if it needs to pass it to something that can influence it. No
# copy is necessary here.
ProcessVariablesAndConditionsInList(value, phase, variables,
build_file)
elif type(value) is not int:
raise TypeError('Unknown type ' + value.__class__.__name__ + \
' for ' + key)
def ProcessVariablesAndConditionsInList(the_list, phase, variables,
build_file):
# Iterate using an index so that new values can be assigned into the_list.
index = 0
while index < len(the_list):
item = the_list[index]
if type(item) is dict:
# Make a copy of the variables dict so that it won't influence anything
# outside of its own scope.
ProcessVariablesAndConditionsInDict(item, phase, variables, build_file)
elif type(item) is list:
ProcessVariablesAndConditionsInList(item, phase, variables, build_file)
elif type(item) is str:
expanded = ExpandVariables(item, phase, variables, build_file)
if type(expanded) in (str, int):
the_list[index] = expanded
elif type(expanded) is list:
the_list[index:index+1] = expanded
index += len(expanded)
# index now identifies the next item to examine. Continue right now
# without falling into the index increment below.
continue
else:
raise ValueError(
'Variable expansion in this context permits strings and ' + \
'lists only, found ' + expanded.__class__.__name__ + ' at ' + \
index)
elif type(item) is not int:
raise TypeError('Unknown type ' + item.__class__.__name__ + \
' at index ' + index)
index = index + 1
def BuildTargetsDict(data):
"""Builds a dict mapping fully-qualified target names to their target dicts.
|data| is a dict mapping loaded build files by pathname relative to the
current directory. Values in |data| are build file contents. For each
|data| value with a "targets" key, the value of the "targets" key is taken
as a list containing target dicts. Each target's fully-qualified name is
constructed from the pathname of the build file (|data| key) and its
"target_name" property. These fully-qualified names are used as the keys
in the returned dict. These keys provide access to the target dicts,
the dicts in the "targets" lists.
"""
targets = {}
for build_file in data['target_build_files']:
for target in data[build_file].get('targets', []):
target_name = gyp.common.QualifiedTarget(build_file,
target['target_name'],
target['toolset'])
if target_name in targets:
raise GypError('Duplicate target definitions for ' + target_name)
targets[target_name] = target
return targets
def QualifyDependencies(targets):
"""Make dependency links fully-qualified relative to the current directory.
|targets| is a dict mapping fully-qualified target names to their target
dicts. For each target in this dict, keys known to contain dependency
links are examined, and any dependencies referenced will be rewritten
so that they are fully-qualified and relative to the current directory.
All rewritten dependencies are suitable for use as keys to |targets| or a
similar dict.
"""
all_dependency_sections = [dep + op
for dep in dependency_sections
for op in ('', '!', '/')]
for target, target_dict in targets.iteritems():
target_build_file = gyp.common.BuildFile(target)
toolset = target_dict['toolset']
for dependency_key in all_dependency_sections:
dependencies = target_dict.get(dependency_key, [])
for index in xrange(0, len(dependencies)):
dep_file, dep_target, dep_toolset = gyp.common.ResolveTarget(
target_build_file, dependencies[index], toolset)
if not multiple_toolsets:
# Ignore toolset specification in the dependency if it is specified.
dep_toolset = toolset
dependency = gyp.common.QualifiedTarget(dep_file,
dep_target,
dep_toolset)
dependencies[index] = dependency
# Make sure anything appearing in a list other than "dependencies" also
# appears in the "dependencies" list.
if dependency_key != 'dependencies' and \
dependency not in target_dict['dependencies']:
raise GypError('Found ' + dependency + ' in ' + dependency_key +
' of ' + target + ', but not in dependencies')
def ExpandWildcardDependencies(targets, data):
"""Expands dependencies specified as build_file:*.
For each target in |targets|, examines sections containing links to other
targets. If any such section contains a link of the form build_file:*, it
is taken as a wildcard link, and is expanded to list each target in
build_file. The |data| dict provides access to build file dicts.
Any target that does not wish to be included by wildcard can provide an
optional "suppress_wildcard" key in its target dict. When present and
true, a wildcard dependency link will not include such targets.
All dependency names, including the keys to |targets| and the values in each
dependency list, must be qualified when this function is called.
"""
for target, target_dict in targets.iteritems():
toolset = target_dict['toolset']
target_build_file = gyp.common.BuildFile(target)
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
# Loop this way instead of "for dependency in" or "for index in xrange"
# because the dependencies list will be modified within the loop body.
index = 0
while index < len(dependencies):
(dependency_build_file, dependency_target, dependency_toolset) = \
gyp.common.ParseQualifiedTarget(dependencies[index])
if dependency_target != '*' and dependency_toolset != '*':
# Not a wildcard. Keep it moving.
index = index + 1
continue
if dependency_build_file == target_build_file:
# It's an error for a target to depend on all other targets in
# the same file, because a target cannot depend on itself.
raise GypError('Found wildcard in ' + dependency_key + ' of ' +
target + ' referring to same build file')
# Take the wildcard out and adjust the index so that the next
# dependency in the list will be processed the next time through the
# loop.
del dependencies[index]
index = index - 1
# Loop through the targets in the other build file, adding them to
# this target's list of dependencies in place of the removed
# wildcard.
dependency_target_dicts = data[dependency_build_file]['targets']
for dependency_target_dict in dependency_target_dicts:
if int(dependency_target_dict.get('suppress_wildcard', False)):
continue
dependency_target_name = dependency_target_dict['target_name']
if (dependency_target != '*' and
dependency_target != dependency_target_name):
continue
dependency_target_toolset = dependency_target_dict['toolset']
if (dependency_toolset != '*' and
dependency_toolset != dependency_target_toolset):
continue
dependency = gyp.common.QualifiedTarget(dependency_build_file,
dependency_target_name,
dependency_target_toolset)
index = index + 1
dependencies.insert(index, dependency)
index = index + 1
def Unify(l):
"""Removes duplicate elements from l, keeping the first element."""
seen = {}
return [seen.setdefault(e, e) for e in l if e not in seen]
def RemoveDuplicateDependencies(targets):
"""Makes sure every dependency appears only once in all targets's dependency
lists."""
for target_name, target_dict in targets.iteritems():
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
if dependencies:
target_dict[dependency_key] = Unify(dependencies)
def Filter(l, item):
"""Removes item from l."""
res = {}
return [res.setdefault(e, e) for e in l if e != item]
def RemoveSelfDependencies(targets):
"""Remove self dependencies from targets that have the prune_self_dependency
variable set."""
for target_name, target_dict in targets.iteritems():
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
if dependencies:
for t in dependencies:
if t == target_name:
if targets[t].get('variables', {}).get('prune_self_dependency', 0):
target_dict[dependency_key] = Filter(dependencies, target_name)
def RemoveLinkDependenciesFromNoneTargets(targets):
"""Remove dependencies having the 'link_dependency' attribute from the 'none'
targets."""
for target_name, target_dict in targets.iteritems():
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
if dependencies:
for t in dependencies:
if target_dict.get('type', None) == 'none':
if targets[t].get('variables', {}).get('link_dependency', 0):
target_dict[dependency_key] = \
Filter(target_dict[dependency_key], t)
class DependencyGraphNode(object):
"""
Attributes:
ref: A reference to an object that this DependencyGraphNode represents.
dependencies: List of DependencyGraphNodes on which this one depends.
dependents: List of DependencyGraphNodes that depend on this one.
"""
class CircularException(GypError):
pass
def __init__(self, ref):
self.ref = ref
self.dependencies = []
self.dependents = []
def __repr__(self):
return '<DependencyGraphNode: %r>' % self.ref
def FlattenToList(self):
# flat_list is the sorted list of dependencies - actually, the list items
# are the "ref" attributes of DependencyGraphNodes. Every target will
# appear in flat_list after all of its dependencies, and before all of its
# dependents.
flat_list = OrderedSet()
# in_degree_zeros is the list of DependencyGraphNodes that have no
# dependencies not in flat_list. Initially, it is a copy of the children
# of this node, because when the graph was built, nodes with no
# dependencies were made implicit dependents of the root node.
in_degree_zeros = set(self.dependents[:])
while in_degree_zeros:
# Nodes in in_degree_zeros have no dependencies not in flat_list, so they
# can be appended to flat_list. Take these nodes out of in_degree_zeros
# as work progresses, so that the next node to process from the list can
# always be accessed at a consistent position.
node = in_degree_zeros.pop()
flat_list.add(node.ref)
# Look at dependents of the node just added to flat_list. Some of them
# may now belong in in_degree_zeros.
for node_dependent in node.dependents:
is_in_degree_zero = True
# TODO: We want to check through the
# node_dependent.dependencies list but if it's long and we
# always start at the beginning, then we get O(n^2) behaviour.
for node_dependent_dependency in node_dependent.dependencies:
if not node_dependent_dependency.ref in flat_list:
# The dependent one or more dependencies not in flat_list. There
# will be more chances to add it to flat_list when examining
# it again as a dependent of those other dependencies, provided
# that there are no cycles.
is_in_degree_zero = False
break
if is_in_degree_zero:
# All of the dependent's dependencies are already in flat_list. Add
# it to in_degree_zeros where it will be processed in a future
# iteration of the outer loop.
in_degree_zeros.add(node_dependent)
return list(flat_list)
def FindCycles(self):
"""
Returns a list of cycles in the graph, where each cycle is its own list.
"""
results = []
visited = set()
def Visit(node, path):
for child in node.dependents:
if child in path:
results.append([child] + path[:path.index(child) + 1])
elif not child in visited:
visited.add(child)
Visit(child, [child] + path)
visited.add(self)
Visit(self, [self])
return results
def DirectDependencies(self, dependencies=None):
"""Returns a list of just direct dependencies."""
if dependencies == None:
dependencies = []
for dependency in self.dependencies:
# Check for None, corresponding to the root node.
if dependency.ref != None and dependency.ref not in dependencies:
dependencies.append(dependency.ref)
return dependencies
def _AddImportedDependencies(self, targets, dependencies=None):
"""Given a list of direct dependencies, adds indirect dependencies that
other dependencies have declared to export their settings.
This method does not operate on self. Rather, it operates on the list
of dependencies in the |dependencies| argument. For each dependency in
that list, if any declares that it exports the settings of one of its
own dependencies, those dependencies whose settings are "passed through"
are added to the list. As new items are added to the list, they too will
be processed, so it is possible to import settings through multiple levels
of dependencies.
This method is not terribly useful on its own, it depends on being
"primed" with a list of direct dependencies such as one provided by
DirectDependencies. DirectAndImportedDependencies is intended to be the
public entry point.
"""
if dependencies == None:
dependencies = []
index = 0
while index < len(dependencies):
dependency = dependencies[index]
dependency_dict = targets[dependency]
# Add any dependencies whose settings should be imported to the list
# if not already present. Newly-added items will be checked for
# their own imports when the list iteration reaches them.
# Rather than simply appending new items, insert them after the
# dependency that exported them. This is done to more closely match
# the depth-first method used by DeepDependencies.
add_index = 1
for imported_dependency in \
dependency_dict.get('export_dependent_settings', []):
if imported_dependency not in dependencies:
dependencies.insert(index + add_index, imported_dependency)
add_index = add_index + 1
index = index + 1
return dependencies
def DirectAndImportedDependencies(self, targets, dependencies=None):
"""Returns a list of a target's direct dependencies and all indirect
dependencies that a dependency has advertised settings should be exported
through the dependency for.
"""
dependencies = self.DirectDependencies(dependencies)
return self._AddImportedDependencies(targets, dependencies)
def DeepDependencies(self, dependencies=None):
"""Returns an OrderedSet of all of a target's dependencies, recursively."""
if dependencies is None:
# Using a list to get ordered output and a set to do fast "is it
# already added" checks.
dependencies = OrderedSet()
for dependency in self.dependencies:
# Check for None, corresponding to the root node.
if dependency.ref is None:
continue
if dependency.ref not in dependencies:
dependencies.add(dependency.ref)
dependency.DeepDependencies(dependencies)
return dependencies
def _LinkDependenciesInternal(self, targets, include_shared_libraries,
dependencies=None, initial=True):
"""Returns an OrderedSet of dependency targets that are linked
into this target.
This function has a split personality, depending on the setting of
|initial|. Outside callers should always leave |initial| at its default
setting.
When adding a target to the list of dependencies, this function will
recurse into itself with |initial| set to False, to collect dependencies
that are linked into the linkable target for which the list is being built.
If |include_shared_libraries| is False, the resulting dependencies will not
include shared_library targets that are linked into this target.
"""
if dependencies is None:
# Using a list to get ordered output and a set to do fast "is it
# already added" checks.
dependencies = OrderedSet()
# Check for None, corresponding to the root node.
if self.ref is None:
return dependencies
# It's kind of sucky that |targets| has to be passed into this function,
# but that's presently the easiest way to access the target dicts so that
# this function can find target types.
if 'target_name' not in targets[self.ref]:
raise GypError("Missing 'target_name' field in target.")
if 'type' not in targets[self.ref]:
raise GypError("Missing 'type' field in target %s" %
targets[self.ref]['target_name'])
target_type = targets[self.ref]['type']
is_linkable = target_type in linkable_types
if initial and not is_linkable:
# If this is the first target being examined and it's not linkable,
# return an empty list of link dependencies, because the link
# dependencies are intended to apply to the target itself (initial is
# True) and this target won't be linked.
return dependencies
# Don't traverse 'none' targets if explicitly excluded.
if (target_type == 'none' and
not targets[self.ref].get('dependencies_traverse', True)):
dependencies.add(self.ref)
return dependencies
# Executables and loadable modules are already fully and finally linked.
# Nothing else can be a link dependency of them, there can only be
# dependencies in the sense that a dependent target might run an
# executable or load the loadable_module.
if not initial and target_type in ('executable', 'loadable_module'):
return dependencies
# Shared libraries are already fully linked. They should only be included
# in |dependencies| when adjusting static library dependencies (in order to
# link against the shared_library's import lib), but should not be included
# in |dependencies| when propagating link_settings.
# The |include_shared_libraries| flag controls which of these two cases we
# are handling.
if (not initial and target_type == 'shared_library' and
not include_shared_libraries):
return dependencies
# The target is linkable, add it to the list of link dependencies.
if self.ref not in dependencies:
dependencies.add(self.ref)
if initial or not is_linkable:
# If this is a subsequent target and it's linkable, don't look any
# further for linkable dependencies, as they'll already be linked into
# this target linkable. Always look at dependencies of the initial
# target, and always look at dependencies of non-linkables.
for dependency in self.dependencies:
dependency._LinkDependenciesInternal(targets,
include_shared_libraries,
dependencies, False)
return dependencies
def DependenciesForLinkSettings(self, targets):
"""
Returns a list of dependency targets whose link_settings should be merged
into this target.
"""
# TODO(sbaig) Currently, chrome depends on the bug that shared libraries'
# link_settings are propagated. So for now, we will allow it, unless the
# 'allow_sharedlib_linksettings_propagation' flag is explicitly set to
# False. Once chrome is fixed, we can remove this flag.
include_shared_libraries = \
targets[self.ref].get('allow_sharedlib_linksettings_propagation', True)
return self._LinkDependenciesInternal(targets, include_shared_libraries)
def DependenciesToLinkAgainst(self, targets):
"""
Returns a list of dependency targets that are linked into this target.
"""
return self._LinkDependenciesInternal(targets, True)
def BuildDependencyList(targets):
# Create a DependencyGraphNode for each target. Put it into a dict for easy
# access.
dependency_nodes = {}
for target, spec in targets.iteritems():
if target not in dependency_nodes:
dependency_nodes[target] = DependencyGraphNode(target)
# Set up the dependency links. Targets that have no dependencies are treated
# as dependent on root_node.
root_node = DependencyGraphNode(None)
for target, spec in targets.iteritems():
target_node = dependency_nodes[target]
target_build_file = gyp.common.BuildFile(target)
dependencies = spec.get('dependencies')
if not dependencies:
target_node.dependencies = [root_node]
root_node.dependents.append(target_node)
else:
for dependency in dependencies:
dependency_node = dependency_nodes.get(dependency)
if not dependency_node:
raise GypError("Dependency '%s' not found while "
"trying to load target %s" % (dependency, target))
target_node.dependencies.append(dependency_node)
dependency_node.dependents.append(target_node)
flat_list = root_node.FlattenToList()
# If there's anything left unvisited, there must be a circular dependency
# (cycle).
if len(flat_list) != len(targets):
if not root_node.dependents:
# If all targets have dependencies, add the first target as a dependent
# of root_node so that the cycle can be discovered from root_node.
target = targets.keys()[0]
target_node = dependency_nodes[target]
target_node.dependencies.append(root_node)
root_node.dependents.append(target_node)
cycles = []
for cycle in root_node.FindCycles():
paths = [node.ref for node in cycle]
cycles.append('Cycle: %s' % ' -> '.join(paths))
raise DependencyGraphNode.CircularException(
'Cycles in dependency graph detected:\n' + '\n'.join(cycles))
return [dependency_nodes, flat_list]
def VerifyNoGYPFileCircularDependencies(targets):
# Create a DependencyGraphNode for each gyp file containing a target. Put
# it into a dict for easy access.
dependency_nodes = {}
for target in targets.iterkeys():
build_file = gyp.common.BuildFile(target)
if not build_file in dependency_nodes:
dependency_nodes[build_file] = DependencyGraphNode(build_file)
# Set up the dependency links.
for target, spec in targets.iteritems():
build_file = gyp.common.BuildFile(target)
build_file_node = dependency_nodes[build_file]
target_dependencies = spec.get('dependencies', [])
for dependency in target_dependencies:
try:
dependency_build_file = gyp.common.BuildFile(dependency)
except GypError, e:
gyp.common.ExceptionAppend(
e, 'while computing dependencies of .gyp file %s' % build_file)
raise
if dependency_build_file == build_file:
# A .gyp file is allowed to refer back to itself.
continue
dependency_node = dependency_nodes.get(dependency_build_file)
if not dependency_node:
raise GypError("Dependancy '%s' not found" % dependency_build_file)
if dependency_node not in build_file_node.dependencies:
build_file_node.dependencies.append(dependency_node)
dependency_node.dependents.append(build_file_node)
# Files that have no dependencies are treated as dependent on root_node.
root_node = DependencyGraphNode(None)
for build_file_node in dependency_nodes.itervalues():
if len(build_file_node.dependencies) == 0:
build_file_node.dependencies.append(root_node)
root_node.dependents.append(build_file_node)
flat_list = root_node.FlattenToList()
# If there's anything left unvisited, there must be a circular dependency
# (cycle).
if len(flat_list) != len(dependency_nodes):
if not root_node.dependents:
# If all files have dependencies, add the first file as a dependent
# of root_node so that the cycle can be discovered from root_node.
file_node = dependency_nodes.values()[0]
file_node.dependencies.append(root_node)
root_node.dependents.append(file_node)
cycles = []
for cycle in root_node.FindCycles():
paths = [node.ref for node in cycle]
cycles.append('Cycle: %s' % ' -> '.join(paths))
raise DependencyGraphNode.CircularException(
'Cycles in .gyp file dependency graph detected:\n' + '\n'.join(cycles))
def DoDependentSettings(key, flat_list, targets, dependency_nodes):
# key should be one of all_dependent_settings, direct_dependent_settings,
# or link_settings.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
if key == 'all_dependent_settings':
dependencies = dependency_nodes[target].DeepDependencies()
elif key == 'direct_dependent_settings':
dependencies = \
dependency_nodes[target].DirectAndImportedDependencies(targets)
elif key == 'link_settings':
dependencies = \
dependency_nodes[target].DependenciesForLinkSettings(targets)
else:
raise GypError("DoDependentSettings doesn't know how to determine "
'dependencies for ' + key)
for dependency in dependencies:
dependency_dict = targets[dependency]
if not key in dependency_dict:
continue
dependency_build_file = gyp.common.BuildFile(dependency)
MergeDicts(target_dict, dependency_dict[key],
build_file, dependency_build_file)
def AdjustStaticLibraryDependencies(flat_list, targets, dependency_nodes,
sort_dependencies):
# Recompute target "dependencies" properties. For each static library
# target, remove "dependencies" entries referring to other static libraries,
# unless the dependency has the "hard_dependency" attribute set. For each
# linkable target, add a "dependencies" entry referring to all of the
# target's computed list of link dependencies (including static libraries
# if no such entry is already present.
for target in flat_list:
target_dict = targets[target]
target_type = target_dict['type']
if target_type == 'static_library':
if not 'dependencies' in target_dict:
continue
target_dict['dependencies_original'] = target_dict.get(
'dependencies', [])[:]
# A static library should not depend on another static library unless
# the dependency relationship is "hard," which should only be done when
# a dependent relies on some side effect other than just the build
# product, like a rule or action output. Further, if a target has a
# non-hard dependency, but that dependency exports a hard dependency,
# the non-hard dependency can safely be removed, but the exported hard
# dependency must be added to the target to keep the same dependency
# ordering.
dependencies = \
dependency_nodes[target].DirectAndImportedDependencies(targets)
index = 0
while index < len(dependencies):
dependency = dependencies[index]
dependency_dict = targets[dependency]
# Remove every non-hard static library dependency and remove every
# non-static library dependency that isn't a direct dependency.
if (dependency_dict['type'] == 'static_library' and \
not dependency_dict.get('hard_dependency', False)) or \
(dependency_dict['type'] != 'static_library' and \
not dependency in target_dict['dependencies']):
# Take the dependency out of the list, and don't increment index
# because the next dependency to analyze will shift into the index
# formerly occupied by the one being removed.
del dependencies[index]
else:
index = index + 1
# Update the dependencies. If the dependencies list is empty, it's not
# needed, so unhook it.
if len(dependencies) > 0:
target_dict['dependencies'] = dependencies
else:
del target_dict['dependencies']
elif target_type in linkable_types:
# Get a list of dependency targets that should be linked into this
# target. Add them to the dependencies list if they're not already
# present.
link_dependencies = \
dependency_nodes[target].DependenciesToLinkAgainst(targets)
for dependency in link_dependencies:
if dependency == target:
continue
if not 'dependencies' in target_dict:
target_dict['dependencies'] = []
if not dependency in target_dict['dependencies']:
target_dict['dependencies'].append(dependency)
# Sort the dependencies list in the order from dependents to dependencies.
# e.g. If A and B depend on C and C depends on D, sort them in A, B, C, D.
# Note: flat_list is already sorted in the order from dependencies to
# dependents.
if sort_dependencies and 'dependencies' in target_dict:
target_dict['dependencies'] = [dep for dep in reversed(flat_list)
if dep in target_dict['dependencies']]
# Initialize this here to speed up MakePathRelative.
exception_re = re.compile(r'''["']?[-/$<>^]''')
def MakePathRelative(to_file, fro_file, item):
# If item is a relative path, it's relative to the build file dict that it's
# coming from. Fix it up to make it relative to the build file dict that
# it's going into.
# Exception: any |item| that begins with these special characters is
# returned without modification.
# / Used when a path is already absolute (shortcut optimization;
# such paths would be returned as absolute anyway)
# $ Used for build environment variables
# - Used for some build environment flags (such as -lapr-1 in a
# "libraries" section)
# < Used for our own variable and command expansions (see ExpandVariables)
# > Used for our own variable and command expansions (see ExpandVariables)
# ^ Used for our own variable and command expansions (see ExpandVariables)
#
# "/' Used when a value is quoted. If these are present, then we
# check the second character instead.
#
if to_file == fro_file or exception_re.match(item):
return item
else:
# TODO(dglazkov) The backslash/forward-slash replacement at the end is a
# temporary measure. This should really be addressed by keeping all paths
# in POSIX until actual project generation.
ret = os.path.normpath(os.path.join(
gyp.common.RelativePath(os.path.dirname(fro_file),
os.path.dirname(to_file)),
item)).replace('\\', '/')
if item[-1] == '/':
ret += '/'
return ret
def MergeLists(to, fro, to_file, fro_file, is_paths=False, append=True):
# Python documentation recommends objects which do not support hash
# set this value to None. Python library objects follow this rule.
is_hashable = lambda val: val.__hash__
# If x is hashable, returns whether x is in s. Else returns whether x is in l.
def is_in_set_or_list(x, s, l):
if is_hashable(x):
return x in s
return x in l
prepend_index = 0
# Make membership testing of hashables in |to| (in particular, strings)
# faster.
hashable_to_set = set(x for x in to if is_hashable(x))
for item in fro:
singleton = False
if type(item) in (str, int):
# The cheap and easy case.
if is_paths:
to_item = MakePathRelative(to_file, fro_file, item)
else:
to_item = item
if not (type(item) is str and item.startswith('-')):
# Any string that doesn't begin with a "-" is a singleton - it can
# only appear once in a list, to be enforced by the list merge append
# or prepend.
singleton = True
elif type(item) is dict:
# Make a copy of the dictionary, continuing to look for paths to fix.
# The other intelligent aspects of merge processing won't apply because
# item is being merged into an empty dict.
to_item = {}
MergeDicts(to_item, item, to_file, fro_file)
elif type(item) is list:
# Recurse, making a copy of the list. If the list contains any
# descendant dicts, path fixing will occur. Note that here, custom
# values for is_paths and append are dropped; those are only to be
# applied to |to| and |fro|, not sublists of |fro|. append shouldn't
# matter anyway because the new |to_item| list is empty.
to_item = []
MergeLists(to_item, item, to_file, fro_file)
else:
raise TypeError(
'Attempt to merge list item of unsupported type ' + \
item.__class__.__name__)
if append:
# If appending a singleton that's already in the list, don't append.
# This ensures that the earliest occurrence of the item will stay put.
if not singleton or not is_in_set_or_list(to_item, hashable_to_set, to):
to.append(to_item)
if is_hashable(to_item):
hashable_to_set.add(to_item)
else:
# If prepending a singleton that's already in the list, remove the
# existing instance and proceed with the prepend. This ensures that the
# item appears at the earliest possible position in the list.
while singleton and to_item in to:
to.remove(to_item)
# Don't just insert everything at index 0. That would prepend the new
# items to the list in reverse order, which would be an unwelcome
# surprise.
to.insert(prepend_index, to_item)
if is_hashable(to_item):
hashable_to_set.add(to_item)
prepend_index = prepend_index + 1
def MergeDicts(to, fro, to_file, fro_file):
# I wanted to name the parameter "from" but it's a Python keyword...
for k, v in fro.iteritems():
# It would be nice to do "if not k in to: to[k] = v" but that wouldn't give
# copy semantics. Something else may want to merge from the |fro| dict
# later, and having the same dict ref pointed to twice in the tree isn't
# what anyone wants considering that the dicts may subsequently be
# modified.
if k in to:
bad_merge = False
if type(v) in (str, int):
if type(to[k]) not in (str, int):
bad_merge = True
elif type(v) is not type(to[k]):
bad_merge = True
if bad_merge:
raise TypeError(
'Attempt to merge dict value of type ' + v.__class__.__name__ + \
' into incompatible type ' + to[k].__class__.__name__ + \
' for key ' + k)
if type(v) in (str, int):
# Overwrite the existing value, if any. Cheap and easy.
is_path = IsPathSection(k)
if is_path:
to[k] = MakePathRelative(to_file, fro_file, v)
else:
to[k] = v
elif type(v) is dict:
# Recurse, guaranteeing copies will be made of objects that require it.
if not k in to:
to[k] = {}
MergeDicts(to[k], v, to_file, fro_file)
elif type(v) is list:
# Lists in dicts can be merged with different policies, depending on
# how the key in the "from" dict (k, the from-key) is written.
#
# If the from-key has ...the to-list will have this action
# this character appended:... applied when receiving the from-list:
# = replace
# + prepend
# ? set, only if to-list does not yet exist
# (none) append
#
# This logic is list-specific, but since it relies on the associated
# dict key, it's checked in this dict-oriented function.
ext = k[-1]
append = True
if ext == '=':
list_base = k[:-1]
lists_incompatible = [list_base, list_base + '?']
to[list_base] = []
elif ext == '+':
list_base = k[:-1]
lists_incompatible = [list_base + '=', list_base + '?']
append = False
elif ext == '?':
list_base = k[:-1]
lists_incompatible = [list_base, list_base + '=', list_base + '+']
else:
list_base = k
lists_incompatible = [list_base + '=', list_base + '?']
# Some combinations of merge policies appearing together are meaningless.
# It's stupid to replace and append simultaneously, for example. Append
# and prepend are the only policies that can coexist.
for list_incompatible in lists_incompatible:
if list_incompatible in fro:
raise GypError('Incompatible list policies ' + k + ' and ' +
list_incompatible)
if list_base in to:
if ext == '?':
# If the key ends in "?", the list will only be merged if it doesn't
# already exist.
continue
elif type(to[list_base]) is not list:
# This may not have been checked above if merging in a list with an
# extension character.
raise TypeError(
'Attempt to merge dict value of type ' + v.__class__.__name__ + \
' into incompatible type ' + to[list_base].__class__.__name__ + \
' for key ' + list_base + '(' + k + ')')
else:
to[list_base] = []
# Call MergeLists, which will make copies of objects that require it.
# MergeLists can recurse back into MergeDicts, although this will be
# to make copies of dicts (with paths fixed), there will be no
# subsequent dict "merging" once entering a list because lists are
# always replaced, appended to, or prepended to.
is_paths = IsPathSection(list_base)
MergeLists(to[list_base], v, to_file, fro_file, is_paths, append)
else:
raise TypeError(
'Attempt to merge dict value of unsupported type ' + \
v.__class__.__name__ + ' for key ' + k)
def MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, configuration, visited):
# Skip if previously visted.
if configuration in visited:
return
# Look at this configuration.
configuration_dict = target_dict['configurations'][configuration]
# Merge in parents.
for parent in configuration_dict.get('inherit_from', []):
MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, parent, visited + [configuration])
# Merge it into the new config.
MergeDicts(new_configuration_dict, configuration_dict,
build_file, build_file)
# Drop abstract.
if 'abstract' in new_configuration_dict:
del new_configuration_dict['abstract']
def SetUpConfigurations(target, target_dict):
# key_suffixes is a list of key suffixes that might appear on key names.
# These suffixes are handled in conditional evaluations (for =, +, and ?)
# and rules/exclude processing (for ! and /). Keys with these suffixes
# should be treated the same as keys without.
key_suffixes = ['=', '+', '?', '!', '/']
build_file = gyp.common.BuildFile(target)
# Provide a single configuration by default if none exists.
# TODO(mark): Signal an error if default_configurations exists but
# configurations does not.
if not 'configurations' in target_dict:
target_dict['configurations'] = {'Default': {}}
if not 'default_configuration' in target_dict:
concrete = [i for (i, config) in target_dict['configurations'].iteritems()
if not config.get('abstract')]
target_dict['default_configuration'] = sorted(concrete)[0]
merged_configurations = {}
configs = target_dict['configurations']
for (configuration, old_configuration_dict) in configs.iteritems():
# Skip abstract configurations (saves work only).
if old_configuration_dict.get('abstract'):
continue
# Configurations inherit (most) settings from the enclosing target scope.
# Get the inheritance relationship right by making a copy of the target
# dict.
new_configuration_dict = {}
for (key, target_val) in target_dict.iteritems():
key_ext = key[-1:]
if key_ext in key_suffixes:
key_base = key[:-1]
else:
key_base = key
if not key_base in non_configuration_keys:
new_configuration_dict[key] = gyp.simple_copy.deepcopy(target_val)
# Merge in configuration (with all its parents first).
MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, configuration, [])
merged_configurations[configuration] = new_configuration_dict
# Put the new configurations back into the target dict as a configuration.
for configuration in merged_configurations.keys():
target_dict['configurations'][configuration] = (
merged_configurations[configuration])
# Now drop all the abstract ones.
for configuration in target_dict['configurations'].keys():
old_configuration_dict = target_dict['configurations'][configuration]
if old_configuration_dict.get('abstract'):
del target_dict['configurations'][configuration]
# Now that all of the target's configurations have been built, go through
# the target dict's keys and remove everything that's been moved into a
# "configurations" section.
delete_keys = []
for key in target_dict:
key_ext = key[-1:]
if key_ext in key_suffixes:
key_base = key[:-1]
else:
key_base = key
if not key_base in non_configuration_keys:
delete_keys.append(key)
for key in delete_keys:
del target_dict[key]
# Check the configurations to see if they contain invalid keys.
for configuration in target_dict['configurations'].keys():
configuration_dict = target_dict['configurations'][configuration]
for key in configuration_dict.keys():
if key in invalid_configuration_keys:
raise GypError('%s not allowed in the %s configuration, found in '
'target %s' % (key, configuration, target))
def ProcessListFiltersInDict(name, the_dict):
"""Process regular expression and exclusion-based filters on lists.
An exclusion list is in a dict key named with a trailing "!", like
"sources!". Every item in such a list is removed from the associated
main list, which in this example, would be "sources". Removed items are
placed into a "sources_excluded" list in the dict.
Regular expression (regex) filters are contained in dict keys named with a
trailing "/", such as "sources/" to operate on the "sources" list. Regex
filters in a dict take the form:
'sources/': [ ['exclude', '_(linux|mac|win)\\.cc$'],
['include', '_mac\\.cc$'] ],
The first filter says to exclude all files ending in _linux.cc, _mac.cc, and
_win.cc. The second filter then includes all files ending in _mac.cc that
are now or were once in the "sources" list. Items matching an "exclude"
filter are subject to the same processing as would occur if they were listed
by name in an exclusion list (ending in "!"). Items matching an "include"
filter are brought back into the main list if previously excluded by an
exclusion list or exclusion regex filter. Subsequent matching "exclude"
patterns can still cause items to be excluded after matching an "include".
"""
# Look through the dictionary for any lists whose keys end in "!" or "/".
# These are lists that will be treated as exclude lists and regular
# expression-based exclude/include lists. Collect the lists that are
# needed first, looking for the lists that they operate on, and assemble
# then into |lists|. This is done in a separate loop up front, because
# the _included and _excluded keys need to be added to the_dict, and that
# can't be done while iterating through it.
lists = []
del_lists = []
for key, value in the_dict.iteritems():
operation = key[-1]
if operation != '!' and operation != '/':
continue
if type(value) is not list:
raise ValueError(name + ' key ' + key + ' must be list, not ' + \
value.__class__.__name__)
list_key = key[:-1]
if list_key not in the_dict:
# This happens when there's a list like "sources!" but no corresponding
# "sources" list. Since there's nothing for it to operate on, queue up
# the "sources!" list for deletion now.
del_lists.append(key)
continue
if type(the_dict[list_key]) is not list:
value = the_dict[list_key]
raise ValueError(name + ' key ' + list_key + \
' must be list, not ' + \
value.__class__.__name__ + ' when applying ' + \
{'!': 'exclusion', '/': 'regex'}[operation])
if not list_key in lists:
lists.append(list_key)
# Delete the lists that are known to be unneeded at this point.
for del_list in del_lists:
del the_dict[del_list]
for list_key in lists:
the_list = the_dict[list_key]
# Initialize the list_actions list, which is parallel to the_list. Each
# item in list_actions identifies whether the corresponding item in
# the_list should be excluded, unconditionally preserved (included), or
# whether no exclusion or inclusion has been applied. Items for which
# no exclusion or inclusion has been applied (yet) have value -1, items
# excluded have value 0, and items included have value 1. Includes and
# excludes override previous actions. All items in list_actions are
# initialized to -1 because no excludes or includes have been processed
# yet.
list_actions = list((-1,) * len(the_list))
exclude_key = list_key + '!'
if exclude_key in the_dict:
for exclude_item in the_dict[exclude_key]:
for index in xrange(0, len(the_list)):
if exclude_item == the_list[index]:
# This item matches the exclude_item, so set its action to 0
# (exclude).
list_actions[index] = 0
# The "whatever!" list is no longer needed, dump it.
del the_dict[exclude_key]
regex_key = list_key + '/'
if regex_key in the_dict:
for regex_item in the_dict[regex_key]:
[action, pattern] = regex_item
pattern_re = re.compile(pattern)
if action == 'exclude':
# This item matches an exclude regex, so set its value to 0 (exclude).
action_value = 0
elif action == 'include':
# This item matches an include regex, so set its value to 1 (include).
action_value = 1
else:
# This is an action that doesn't make any sense.
raise ValueError('Unrecognized action ' + action + ' in ' + name + \
' key ' + regex_key)
for index in xrange(0, len(the_list)):
list_item = the_list[index]
if list_actions[index] == action_value:
# Even if the regex matches, nothing will change so continue (regex
# searches are expensive).
continue
if pattern_re.search(list_item):
# Regular expression match.
list_actions[index] = action_value
# The "whatever/" list is no longer needed, dump it.
del the_dict[regex_key]
# Add excluded items to the excluded list.
#
# Note that exclude_key ("sources!") is different from excluded_key
# ("sources_excluded"). The exclude_key list is input and it was already
# processed and deleted; the excluded_key list is output and it's about
# to be created.
excluded_key = list_key + '_excluded'
if excluded_key in the_dict:
raise GypError(name + ' key ' + excluded_key +
' must not be present prior '
' to applying exclusion/regex filters for ' + list_key)
excluded_list = []
# Go backwards through the list_actions list so that as items are deleted,
# the indices of items that haven't been seen yet don't shift. That means
# that things need to be prepended to excluded_list to maintain them in the
# same order that they existed in the_list.
for index in xrange(len(list_actions) - 1, -1, -1):
if list_actions[index] == 0:
# Dump anything with action 0 (exclude). Keep anything with action 1
# (include) or -1 (no include or exclude seen for the item).
excluded_list.insert(0, the_list[index])
del the_list[index]
# If anything was excluded, put the excluded list into the_dict at
# excluded_key.
if len(excluded_list) > 0:
the_dict[excluded_key] = excluded_list
# Now recurse into subdicts and lists that may contain dicts.
for key, value in the_dict.iteritems():
if type(value) is dict:
ProcessListFiltersInDict(key, value)
elif type(value) is list:
ProcessListFiltersInList(key, value)
def ProcessListFiltersInList(name, the_list):
for item in the_list:
if type(item) is dict:
ProcessListFiltersInDict(name, item)
elif type(item) is list:
ProcessListFiltersInList(name, item)
def ValidateTargetType(target, target_dict):
"""Ensures the 'type' field on the target is one of the known types.
Arguments:
target: string, name of target.
target_dict: dict, target spec.
Raises an exception on error.
"""
VALID_TARGET_TYPES = ('executable', 'loadable_module',
'static_library', 'shared_library',
'none')
target_type = target_dict.get('type', None)
if target_type not in VALID_TARGET_TYPES:
raise GypError("Target %s has an invalid target type '%s'. "
"Must be one of %s." %
(target, target_type, '/'.join(VALID_TARGET_TYPES)))
if (target_dict.get('standalone_static_library', 0) and
not target_type == 'static_library'):
raise GypError('Target %s has type %s but standalone_static_library flag is'
' only valid for static_library type.' % (target,
target_type))
def ValidateRulesInTarget(target, target_dict, extra_sources_for_rules):
"""Ensures that the rules sections in target_dict are valid and consistent,
and determines which sources they apply to.
Arguments:
target: string, name of target.
target_dict: dict, target spec containing "rules" and "sources" lists.
extra_sources_for_rules: a list of keys to scan for rule matches in
addition to 'sources'.
"""
# Dicts to map between values found in rules' 'rule_name' and 'extension'
# keys and the rule dicts themselves.
rule_names = {}
rule_extensions = {}
rules = target_dict.get('rules', [])
for rule in rules:
# Make sure that there's no conflict among rule names and extensions.
rule_name = rule['rule_name']
if rule_name in rule_names:
raise GypError('rule %s exists in duplicate, target %s' %
(rule_name, target))
rule_names[rule_name] = rule
rule_extension = rule['extension']
if rule_extension.startswith('.'):
rule_extension = rule_extension[1:]
if rule_extension in rule_extensions:
raise GypError(('extension %s associated with multiple rules, ' +
'target %s rules %s and %s') %
(rule_extension, target,
rule_extensions[rule_extension]['rule_name'],
rule_name))
rule_extensions[rule_extension] = rule
# Make sure rule_sources isn't already there. It's going to be
# created below if needed.
if 'rule_sources' in rule:
raise GypError(
'rule_sources must not exist in input, target %s rule %s' %
(target, rule_name))
rule_sources = []
source_keys = ['sources']
source_keys.extend(extra_sources_for_rules)
for source_key in source_keys:
for source in target_dict.get(source_key, []):
(source_root, source_extension) = os.path.splitext(source)
if source_extension.startswith('.'):
source_extension = source_extension[1:]
if source_extension == rule_extension:
rule_sources.append(source)
if len(rule_sources) > 0:
rule['rule_sources'] = rule_sources
def ValidateRunAsInTarget(target, target_dict, build_file):
target_name = target_dict.get('target_name')
run_as = target_dict.get('run_as')
if not run_as:
return
if type(run_as) is not dict:
raise GypError("The 'run_as' in target %s from file %s should be a "
"dictionary." %
(target_name, build_file))
action = run_as.get('action')
if not action:
raise GypError("The 'run_as' in target %s from file %s must have an "
"'action' section." %
(target_name, build_file))
if type(action) is not list:
raise GypError("The 'action' for 'run_as' in target %s from file %s "
"must be a list." %
(target_name, build_file))
working_directory = run_as.get('working_directory')
if working_directory and type(working_directory) is not str:
raise GypError("The 'working_directory' for 'run_as' in target %s "
"in file %s should be a string." %
(target_name, build_file))
environment = run_as.get('environment')
if environment and type(environment) is not dict:
raise GypError("The 'environment' for 'run_as' in target %s "
"in file %s should be a dictionary." %
(target_name, build_file))
def ValidateActionsInTarget(target, target_dict, build_file):
'''Validates the inputs to the actions in a target.'''
target_name = target_dict.get('target_name')
actions = target_dict.get('actions', [])
for action in actions:
action_name = action.get('action_name')
if not action_name:
raise GypError("Anonymous action in target %s. "
"An action must have an 'action_name' field." %
target_name)
inputs = action.get('inputs', None)
if inputs is None:
raise GypError('Action in target %s has no inputs.' % target_name)
action_command = action.get('action')
if action_command and not action_command[0]:
raise GypError("Empty action as command in target %s." % target_name)
def TurnIntIntoStrInDict(the_dict):
"""Given dict the_dict, recursively converts all integers into strings.
"""
# Use items instead of iteritems because there's no need to try to look at
# reinserted keys and their associated values.
for k, v in the_dict.items():
if type(v) is int:
v = str(v)
the_dict[k] = v
elif type(v) is dict:
TurnIntIntoStrInDict(v)
elif type(v) is list:
TurnIntIntoStrInList(v)
if type(k) is int:
del the_dict[k]
the_dict[str(k)] = v
def TurnIntIntoStrInList(the_list):
"""Given list the_list, recursively converts all integers into strings.
"""
for index in xrange(0, len(the_list)):
item = the_list[index]
if type(item) is int:
the_list[index] = str(item)
elif type(item) is dict:
TurnIntIntoStrInDict(item)
elif type(item) is list:
TurnIntIntoStrInList(item)
def PruneUnwantedTargets(targets, flat_list, dependency_nodes, root_targets,
data):
"""Return only the targets that are deep dependencies of |root_targets|."""
qualified_root_targets = []
for target in root_targets:
target = target.strip()
qualified_targets = gyp.common.FindQualifiedTargets(target, flat_list)
if not qualified_targets:
raise GypError("Could not find target %s" % target)
qualified_root_targets.extend(qualified_targets)
wanted_targets = {}
for target in qualified_root_targets:
wanted_targets[target] = targets[target]
for dependency in dependency_nodes[target].DeepDependencies():
wanted_targets[dependency] = targets[dependency]
wanted_flat_list = [t for t in flat_list if t in wanted_targets]
# Prune unwanted targets from each build_file's data dict.
for build_file in data['target_build_files']:
if not 'targets' in data[build_file]:
continue
new_targets = []
for target in data[build_file]['targets']:
qualified_name = gyp.common.QualifiedTarget(build_file,
target['target_name'],
target['toolset'])
if qualified_name in wanted_targets:
new_targets.append(target)
data[build_file]['targets'] = new_targets
return wanted_targets, wanted_flat_list
def VerifyNoCollidingTargets(targets):
"""Verify that no two targets in the same directory share the same name.
Arguments:
targets: A list of targets in the form 'path/to/file.gyp:target_name'.
"""
# Keep a dict going from 'subdirectory:target_name' to 'foo.gyp'.
used = {}
for target in targets:
# Separate out 'path/to/file.gyp, 'target_name' from
# 'path/to/file.gyp:target_name'.
path, name = target.rsplit(':', 1)
# Separate out 'path/to', 'file.gyp' from 'path/to/file.gyp'.
subdir, gyp = os.path.split(path)
# Use '.' for the current directory '', so that the error messages make
# more sense.
if not subdir:
subdir = '.'
# Prepare a key like 'path/to:target_name'.
key = subdir + ':' + name
if key in used:
# Complain if this target is already used.
raise GypError('Duplicate target name "%s" in directory "%s" used both '
'in "%s" and "%s".' % (name, subdir, gyp, used[key]))
used[key] = gyp
def SetGeneratorGlobals(generator_input_info):
# Set up path_sections and non_configuration_keys with the default data plus
# the generator-specific data.
global path_sections
path_sections = set(base_path_sections)
path_sections.update(generator_input_info['path_sections'])
global non_configuration_keys
non_configuration_keys = base_non_configuration_keys[:]
non_configuration_keys.extend(generator_input_info['non_configuration_keys'])
global multiple_toolsets
multiple_toolsets = generator_input_info[
'generator_supports_multiple_toolsets']
global generator_filelist_paths
generator_filelist_paths = generator_input_info['generator_filelist_paths']
def Load(build_files, variables, includes, depth, generator_input_info, check,
circular_check, parallel, root_targets):
SetGeneratorGlobals(generator_input_info)
# A generator can have other lists (in addition to sources) be processed
# for rules.
extra_sources_for_rules = generator_input_info['extra_sources_for_rules']
# Load build files. This loads every target-containing build file into
# the |data| dictionary such that the keys to |data| are build file names,
# and the values are the entire build file contents after "early" or "pre"
# processing has been done and includes have been resolved.
# NOTE: data contains both "target" files (.gyp) and "includes" (.gypi), as
# well as meta-data (e.g. 'included_files' key). 'target_build_files' keeps
# track of the keys corresponding to "target" files.
data = {'target_build_files': set()}
# Normalize paths everywhere. This is important because paths will be
# used as keys to the data dict and for references between input files.
build_files = set(map(os.path.normpath, build_files))
if parallel:
LoadTargetBuildFilesParallel(build_files, data, variables, includes, depth,
check, generator_input_info)
else:
aux_data = {}
for build_file in build_files:
try:
LoadTargetBuildFile(build_file, data, aux_data,
variables, includes, depth, check, True)
except Exception, e:
gyp.common.ExceptionAppend(e, 'while trying to load %s' % build_file)
raise
# Build a dict to access each target's subdict by qualified name.
targets = BuildTargetsDict(data)
# Fully qualify all dependency links.
QualifyDependencies(targets)
# Remove self-dependencies from targets that have 'prune_self_dependencies'
# set to 1.
RemoveSelfDependencies(targets)
# Expand dependencies specified as build_file:*.
ExpandWildcardDependencies(targets, data)
# Remove all dependencies marked as 'link_dependency' from the targets of
# type 'none'.
RemoveLinkDependenciesFromNoneTargets(targets)
# Apply exclude (!) and regex (/) list filters only for dependency_sections.
for target_name, target_dict in targets.iteritems():
tmp_dict = {}
for key_base in dependency_sections:
for op in ('', '!', '/'):
key = key_base + op
if key in target_dict:
tmp_dict[key] = target_dict[key]
del target_dict[key]
ProcessListFiltersInDict(target_name, tmp_dict)
# Write the results back to |target_dict|.
for key in tmp_dict:
target_dict[key] = tmp_dict[key]
# Make sure every dependency appears at most once.
RemoveDuplicateDependencies(targets)
if circular_check:
# Make sure that any targets in a.gyp don't contain dependencies in other
# .gyp files that further depend on a.gyp.
VerifyNoGYPFileCircularDependencies(targets)
[dependency_nodes, flat_list] = BuildDependencyList(targets)
if root_targets:
# Remove, from |targets| and |flat_list|, the targets that are not deep
# dependencies of the targets specified in |root_targets|.
targets, flat_list = PruneUnwantedTargets(
targets, flat_list, dependency_nodes, root_targets, data)
# Check that no two targets in the same directory have the same name.
VerifyNoCollidingTargets(flat_list)
# Handle dependent settings of various types.
for settings_type in ['all_dependent_settings',
'direct_dependent_settings',
'link_settings']:
DoDependentSettings(settings_type, flat_list, targets, dependency_nodes)
# Take out the dependent settings now that they've been published to all
# of the targets that require them.
for target in flat_list:
if settings_type in targets[target]:
del targets[target][settings_type]
# Make sure static libraries don't declare dependencies on other static
# libraries, but that linkables depend on all unlinked static libraries
# that they need so that their link steps will be correct.
gii = generator_input_info
if gii['generator_wants_static_library_dependencies_adjusted']:
AdjustStaticLibraryDependencies(flat_list, targets, dependency_nodes,
gii['generator_wants_sorted_dependencies'])
# Apply "post"/"late"/"target" variable expansions and condition evaluations.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ProcessVariablesAndConditionsInDict(
target_dict, PHASE_LATE, variables, build_file)
# Move everything that can go into a "configurations" section into one.
for target in flat_list:
target_dict = targets[target]
SetUpConfigurations(target, target_dict)
# Apply exclude (!) and regex (/) list filters.
for target in flat_list:
target_dict = targets[target]
ProcessListFiltersInDict(target, target_dict)
# Apply "latelate" variable expansions and condition evaluations.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ProcessVariablesAndConditionsInDict(
target_dict, PHASE_LATELATE, variables, build_file)
# Make sure that the rules make sense, and build up rule_sources lists as
# needed. Not all generators will need to use the rule_sources lists, but
# some may, and it seems best to build the list in a common spot.
# Also validate actions and run_as elements in targets.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ValidateTargetType(target, target_dict)
ValidateRulesInTarget(target, target_dict, extra_sources_for_rules)
ValidateRunAsInTarget(target, target_dict, build_file)
ValidateActionsInTarget(target, target_dict, build_file)
# Generators might not expect ints. Turn them into strs.
TurnIntIntoStrInDict(data)
# TODO(mark): Return |data| for now because the generator needs a list of
# build files that came in. In the future, maybe it should just accept
# a list, and not the whole data dict.
return [flat_list, targets, data]
|
zakandrewking/cobrapy | refs/heads/master | cobra/solvers/esolver.py | 1 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from fractions import Fraction
from os import devnull, unlink
from os.path import isfile
from subprocess import CalledProcessError, check_call, check_output
from tempfile import NamedTemporaryFile
from six.moves import zip
from cobra.core.solution import LegacySolution
from cobra.solvers import cglpk
from cobra.solvers.wrappers import *
# detect paths to system calls for esolver and gzip
with open(devnull, "w") as DEVNULL:
try:
ESOLVER_COMMAND = check_output(["which", "esolver"],
stderr=DEVNULL).strip()
__esolver_version__ = check_output(["esolver", "-v"], stderr=DEVNULL)
except CalledProcessError:
raise RuntimeError("esolver command not found")
try:
GZIP_COMMAND = check_output(["which", "gzip"], stderr=DEVNULL).strip()
except CalledProcessError:
raise RuntimeError("gzip command not found")
del DEVNULL
solver_name = "esolver"
class Esolver(cglpk.GLP):
"""contain an LP which will be solved through the QSopt_ex
The LP is stored using a GLPK object, and written out to an
LP file which is then solved by the esolver command."""
def __init__(self, cobra_model=None):
cglpk.GLP.__init__(self, cobra_model)
self.solution_filepath = None
self.basis_filepath = None
self.rational_solution = False
self.verbose = False
self.clean_up = True # clean up files
def _clean(self, filename):
"""remove old files"""
if self.clean_up and filename is not None and isfile(filename):
unlink(filename)
def set_parameter(self, parameter_name, value):
if parameter_name == "GLP":
raise Exception("can not be set this way")
if parameter_name == "objective_sense":
self.set_objective_sense(value)
if not hasattr(self, parameter_name):
raise ValueError("Unkonwn parameter '%s'" % parameter_name)
setattr(self, parameter_name, value)
def solve_problem(self, **solver_parameters):
if "objective_sense" in solver_parameters:
self.set_objective_sense(solver_parameters.pop("objective_sense"))
for key, value in solver_parameters.items():
self.set_parameter(key, value)
# remove the old solution file
self._clean(self.solution_filepath)
with NamedTemporaryFile(suffix=".lp", delete=False) as f:
lp_filepath = f.name
self.write(lp_filepath)
existing_basis = self.basis_filepath
with NamedTemporaryFile(suffix=".bas", delete=False) as f:
self.basis_filepath = f.name
with NamedTemporaryFile(suffix=".sol") as f:
self.solution_filepath = f.name
command = [ESOLVER_COMMAND, "-b", self.basis_filepath,
"-O", self.solution_filepath]
if existing_basis is not None and isfile(existing_basis):
command.extend(["-B", existing_basis])
command.extend(["-L", lp_filepath])
command_kwargs = {}
if self.verbose:
print(" ".join(command))
DEVNULL = None
else:
DEVNULL = open(devnull, 'wb')
command_kwargs["stdout"] = DEVNULL
command_kwargs["stderr"] = DEVNULL
try:
check_call(command, **command_kwargs)
failed = False
except CalledProcessError as e:
failed = True
if failed:
self.basis_filepath = existing_basis
existing_basis = None
# Sometimes on failure a solution isn't written out
if not isfile(self.solution_filepath):
with open(self.solution_filepath, "w") as outfile:
outfile.write("=infeasible\n")
elif isfile(self.solution_filepath + ".gz"):
# the solution may be written out compressed
check_call([GZIP_COMMAND, "-d", self.solution_filepath + ".gz"])
if DEVNULL is not None:
DEVNULL.close()
self._clean(lp_filepath)
self._clean(existing_basis) # replaced with the new basis
def get_status(self):
with open(self.solution_filepath) as infile:
return infile.readline().split("=")[1].strip().lower()
def _format(self, value):
"""convert a string value into either a fraction or float"""
value = Fraction(value)
return value if self.rational_solution else float(value)
def get_objective_value(self):
with open(self.solution_filepath) as infile:
status = infile.readline().split("=")[1].strip().lower()
if status != "optimal":
raise RuntimeError("status not optimal")
infile.readline()
return self._format(infile.readline().split("=")[1].strip())
def format_solution(self, cobra_model):
m = cobra_model
solution = LegacySolution(None)
with open(self.solution_filepath) as infile:
solution.status = infile.readline().split("=")[1].strip().lower()
if solution.status != "optimal":
return solution
infile.readline()
solution.f = self._format(Fraction(infile.readline()
.split("=")[1].strip()))
infile.readline()
value_dict = {}
for line in infile:
if line.endswith(":\n"):
break
varname, value = line.split("=")
value_dict[varname.strip()] = self._format(value.strip())
dual_dict = {}
for line in infile:
if line.endswith(":\n"):
break
varname, value = line.split("=")
dual_dict[varname.strip()] = self._format(value.strip())
solution.x = [value_dict.get("x_%d" % (i + 1), 0)
for i in range(len(m.reactions))]
solution.x_dict = {r.id: v for r, v in zip(m.reactions, solution.x)}
solution.y = [dual_dict.get("r_%d" % (i + 1), 0)
for i in range(len(m.metabolites))]
solution.y_dict = {m.id: v for m, v in zip(m.metabolites, solution.y)}
return solution
# wrappers for the classmethods at the module level
create_problem = Esolver.create_problem
solve = Esolver.solve
|
christianurich/VIBe2UrbanSim | refs/heads/master | 3rdparty/opus/src/opus_gui/scenarios_manager/__init__.py | 538 | # Opus/UrbanSim urban simulation software.
# Copyright (C) 2005-2009 University of Washington
# See opus_core/LICENSE
|
afonsinoguimaraes/repository.magellan | refs/heads/master | plugin.video.SportsDevil/lib/downloader.py | 25 | # -*- coding: utf-8 -*-
import common
import urllib
import os.path
import xbmc, xbmcgui
class Downloader(object):
def __init__(self):
self.pDialog = None
def downloadWithJDownloader(self, url, title):
common.runPlugin('plugin://plugin.program.jdownloader/?action=addlink&url=' + url)
common.showNotification('Sent to JDownloader:')
def downloadMovie(self, url, path, title, extension):
if not os.path.exists(path):
common.log('Path does not exist')
return None
if title == '':
common.log('No title given')
return None
file_path = xbmc.makeLegalFilename(os.path.join(path, title + extension))
file_path = urllib.unquote_plus(file_path)
# Overwrite existing file?
if os.path.isfile(file_path):
self.pDialog = xbmcgui.Dialog()
if not common.ask('File already exists. Overwrite?\n' + os.path.basename(file_path)):
title = common.showOSK(urllib.unquote_plus(title), common.translate(30102))
if not title:
return None
file_path = xbmc.makeLegalFilename(os.path.join(path, title + extension))
file_path = urllib.unquote_plus(file_path)
success = self.__download(url, file_path)
if success:
return file_path
else:
return None
def __download(self, url, file_path):
try:
# Setup progress dialog and download
self.pDialog = xbmcgui.DialogProgress()
self.pDialog.create('SportsDevil', common.translate(30050), common.translate(30051))
urllib.urlretrieve(url, file_path, self.video_report_hook)
self.pDialog.close()
return True
except IOError:
self.pDialog.close()
common.showError(common.translate(30053))
except KeyboardInterrupt:
self.pDialog.close()
return False
def video_report_hook(self, count, blocksize, totalsize):
percent = int(float(count * blocksize * 100) / totalsize)
self.pDialog.update(percent, common.translate(30050), common.translate(30051))
if self.pDialog.iscanceled():
raise KeyboardInterrupt
|
BlakeTeam/VHDLCodeGenerator | refs/heads/master | blocks/Standard Library/Multiplexer.py | 1 | #-------------------------------------------------------------------------------
# PROJECT: VHDL Code Generator
# NAME: Dynamic Multiplexer
#
# LICENSE: GNU-GPL V3
#-------------------------------------------------------------------------------
__isBlock__ = True
__className__ = "Multiplexer"
__win__ = "MuxWindow"
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from PyQt4 import uic
from lib.Block import *
class Multiplexer(Block):
""" MULTIPLEXER
PORTS SPECIFICATIONS
"""
# TODO: Specifications of multiplexer (Documentation)
def __init__(self,system,numInput,sizeInput,defaultOutput='Z',enabler=True,enablerActiveSymbol = '0'):
"""
:param name:
:param muxInput: Total of multiplexed input
:param size: Size of each input
:param system:
:stdLogic defaultOutput: It only can be 0/1/Z
:param enabler:
:bit enablerActiveSymbol: It only can be 0/1. No hi Z available
"""
self.defaultOutput = defaultOutput*sizeInput
self.defaultOutput = self.defaultOutput.upper()
self.enablerActiveSymbol = enablerActiveSymbol
self.enabler = enabler
self.numMuxIn = numInput
self.selBits = len(bin(numInput - 1)) - 2 # Binary Input Selector
self.name = "Multiplexer"
self.HiZ = "Z"*sizeInput
input_vector = [sizeInput]*self.numMuxIn + [self.selBits] + ([1] if enabler else [])
output_vector = [sizeInput]
super().__init__(input_vector,output_vector,system,self.name)
# Settings port names
self.setInputName("SELECT",self.numMuxIn)
if self.enabler == True:
self.setInputName("EN",self.numMuxIn + 1)
self.setOutputName("out",0)
self.variables = [("CHOSEN",sizeInput)]
def generate(self):
filetext = ""
if self.enabler == False:
filetext += "%s <= "%(self.getOutputSignalName(0))
for i in range(self.numMuxIn):
selbinary = bin(i)[2:]
filetext += "%s when (%s = %s) else\n"%(self.getInputSignalName(i),self.getInputSignalName(self.numMuxIn),"'"+selbinary+"'" if self.getInputSignalSize(self.numMuxIn) == 1 else '"'+selbinary+'"')
filetext += "%s when others;\n"%(("'"+self.defaultOutput+"'") if (len(self.defaultOutput) == 1) else ('"'+self.defaultOutput+'"'))
else:
filetext += "%s <= "%(self.name + "__" + self.variables[0][0])
for i in range(self.numMuxIn):
selbinary = bin(i)[2:]
filetext += "%s when (%s = %s) else\n"%(self.getInputSignalName(i),self.getInputSignalName(self.numMuxIn),"'"+selbinary+"'" if self.getInputSignalSize(self.numMuxIn) == 1 else '"'+selbinary+'"')
filetext += "%s when others;\n"%("'"+self.defaultOutput+"'" if len(self.defaultOutput) == 1 else '"'+self.defaultOutput+'"')
filetext += "%s <= %s when %s = %s else\n"%(self.getOutputSignalName(0),self.getVariableSignalName(0),self.getInputSignalName(self.numMuxIn + 1),"'"+self.enablerActiveSymbol+"'")
filetext += "%s when others;\n"%(("'"+self.HiZ+"'") if (len(self.HiZ) == 1) else ('"'+self.HiZ+'"'))
return filetext
class MuxWindow(QWidget):
accept = pyqtSignal(list)
def __init__(self,parent = None):
super().__init__()
self.ui = uic.loadUi("blocks\\Standard Library\\Multiplexer.ui",self)
self.ui.acceptButton.clicked.connect(self.accepted)
def accepted(self):
numInput = self.ui.numInput.value()
sizeInput = self.ui.sizeInput.value()
includeEnabler = self.ui.enabler.isChecked()
if includeEnabler:
activeSymbol = '0' if self.ui.symb0.isChecked() else '1'
else:
activeSymbol = None
defaultOutput = '0' if self.ui.defOut0.isChecked() else ('1' if self.ui.defOut1.isChecked() else 'Z')
self.accept.emit([numInput,sizeInput,defaultOutput,includeEnabler,activeSymbol])
self.close()
|
BeiLuoShiMen/nupic | refs/heads/master | tests/swarming/__init__.py | 175 | #! /usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
|
openshine/ModemManager | refs/heads/master | test/enable.py | 3 | #!/usr/bin/python
# -*- Mode: python; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details:
#
# Copyright (C) 2009 - 2010 Red Hat, Inc.
#
import sys, dbus
MM_DBUS_SERVICE='org.freedesktop.ModemManager'
MM_DBUS_PATH='/org/freedesktop/ModemManager'
MM_DBUS_INTERFACE_MODEM='org.freedesktop.ModemManager.Modem'
bus = dbus.SystemBus()
objpath = sys.argv[1]
if objpath[:1] != '/':
objpath = "/org/freedesktop/ModemManager/Modems/" + str(objpath)
proxy = bus.get_object(MM_DBUS_SERVICE, objpath)
modem = dbus.Interface(proxy, dbus_interface=MM_DBUS_INTERFACE_MODEM)
modem.Enable (True)
|
thePugalist/MozDef | refs/heads/master | alerts/bro_notice.py | 12 | #!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Copyright (c) 2014 Mozilla Corporation
#
# Contributors:
# Anthony Verez averez@mozilla.com
from lib.alerttask import AlertTask
class AlertBroNotice(AlertTask):
def main(self):
# look for events in last 30 mins
date_timedelta = dict(minutes=30)
# Configure filters by importing a kibana dashboard
self.filtersFromKibanaDash('bro_notice_dashboard.json', date_timedelta)
# Search events
self.searchEventsSimple()
self.walkEvents()
# Set alert properties
def onEvent(self, event):
category = 'bro'
tags = ['bro']
severity = 'NOTICE'
# the summary of the alert is the one of the event
summary = event['_source']['summary']
# Create the alert object based on these properties
return self.createAlertDict(summary, category, tags, [event], severity) |
Semi-global/edx-platform | refs/heads/master | lms/djangoapps/notifier_api/tests.py | 115 | import itertools
import ddt
from django.conf import settings
from django.test.client import RequestFactory
from django.test.utils import override_settings
from openedx.core.djangoapps.course_groups.models import CourseUserGroup
from django_comment_common.models import Role, Permission
from lang_pref import LANGUAGE_KEY
from notification_prefs import NOTIFICATION_PREF_KEY
from notifier_api.views import NotifierUsersViewSet
from opaque_keys.edx.locator import CourseLocator
from student.models import CourseEnrollment
from student.tests.factories import UserFactory, CourseEnrollmentFactory
from openedx.core.djangoapps.user_api.models import UserPreference
from openedx.core.djangoapps.user_api.tests.factories import UserPreferenceFactory
from util.testing import UrlResetMixin
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
@ddt.ddt
@override_settings(EDX_API_KEY="test_api_key")
class NotifierUsersViewSetTest(UrlResetMixin, ModuleStoreTestCase):
def setUp(self):
super(NotifierUsersViewSetTest, self).setUp()
self.courses = []
self.cohorts = []
self.user = UserFactory()
self.notification_pref = UserPreferenceFactory(
user=self.user,
key=NOTIFICATION_PREF_KEY,
value="notification pref test value"
)
self.list_view = NotifierUsersViewSet.as_view({"get": "list"})
self.detail_view = NotifierUsersViewSet.as_view({"get": "retrieve"})
def _set_up_course(self, is_course_cohorted, is_user_cohorted, is_moderator):
cohort_config = {"cohorted": True} if is_course_cohorted else {}
course = CourseFactory(
number=("TestCourse{}".format(len(self.courses))),
cohort_config=cohort_config
)
self.courses.append(course)
CourseEnrollmentFactory(user=self.user, course_id=course.id)
if is_user_cohorted:
cohort = CourseUserGroup.objects.create(
name="Test Cohort",
course_id=course.id,
group_type=CourseUserGroup.COHORT
)
cohort.users.add(self.user)
self.cohorts.append(cohort)
if is_moderator:
moderator_perm, _ = Permission.objects.get_or_create(name="see_all_cohorts")
moderator_role = Role.objects.create(name="Moderator", course_id=course.id)
moderator_role.permissions.add(moderator_perm)
self.user.roles.add(moderator_role)
def _assert_basic_user_info_correct(self, user, result_user):
self.assertEqual(result_user["id"], user.id)
self.assertEqual(result_user["email"], user.email)
self.assertEqual(result_user["name"], user.profile.name)
def test_without_api_key(self):
request = RequestFactory().get("dummy")
for view in [self.list_view, self.detail_view]:
response = view(request)
self.assertEqual(response.status_code, 403)
# Detail view tests
def _make_detail_request(self):
request = RequestFactory().get("dummy", HTTP_X_EDX_API_KEY=settings.EDX_API_KEY)
return self.detail_view(
request,
**{NotifierUsersViewSet.lookup_field: str(self.user.id)}
)
def _get_detail(self):
response = self._make_detail_request()
self.assertEqual(response.status_code, 200)
self.assertEqual(
set(response.data.keys()),
{"id", "email", "name", "preferences", "course_info"}
)
return response.data
def test_detail_invalid_user(self):
UserPreference.objects.all().delete()
response = self._make_detail_request()
self.assertEqual(response.status_code, 404)
def test_basic_user_info(self):
result = self._get_detail()
self._assert_basic_user_info_correct(self.user, result)
def test_course_info(self):
expected_course_info = {}
for is_course_cohorted, is_user_cohorted, is_moderator in (
itertools.product([True, False], [True, False], [True, False])
):
self._set_up_course(is_course_cohorted, is_user_cohorted, is_moderator)
expected_course_info[unicode(self.courses[-1].id)] = {
"cohort_id": self.cohorts[-1].id if is_user_cohorted else None,
"see_all_cohorts": is_moderator or not is_course_cohorted
}
result = self._get_detail()
self.assertEqual(result["course_info"], expected_course_info)
def test_course_info_unenrolled(self):
self._set_up_course(False, False, False)
course_id = self.courses[0].id
CourseEnrollment.unenroll(self.user, course_id)
result = self._get_detail()
self.assertNotIn(unicode(course_id), result["course_info"])
def test_course_info_no_enrollments(self):
result = self._get_detail()
self.assertEqual(result["course_info"], {})
def test_course_info_non_existent_course_enrollment(self):
CourseEnrollmentFactory(
user=self.user,
course_id=CourseLocator(org="dummy", course="dummy", run="non_existent")
)
result = self._get_detail()
self.assertEqual(result["course_info"], {})
def test_preferences(self):
lang_pref = UserPreferenceFactory(
user=self.user,
key=LANGUAGE_KEY,
value="language pref test value"
)
UserPreferenceFactory(user=self.user, key="non_included_key")
result = self._get_detail()
self.assertEqual(
result["preferences"],
{
NOTIFICATION_PREF_KEY: self.notification_pref.value,
LANGUAGE_KEY: lang_pref.value,
}
)
# List view tests
def _make_list_request(self, page, page_size):
request = RequestFactory().get(
"dummy",
{"page": page, "page_size": page_size},
HTTP_X_EDX_API_KEY=settings.EDX_API_KEY
)
return self.list_view(request)
def _get_list(self, page=1, page_size=None):
response = self._make_list_request(page, page_size)
self.assertEqual(response.status_code, 200)
self.assertEqual(
set(response.data.keys()),
{"count", "next", "previous", "results"}
)
return response.data["results"]
def test_no_users(self):
UserPreference.objects.all().delete()
results = self._get_list()
self.assertEqual(len(results), 0)
def test_multiple_users(self):
other_user = UserFactory()
other_notification_pref = UserPreferenceFactory(
user=other_user,
key=NOTIFICATION_PREF_KEY,
value="other value"
)
self._set_up_course(is_course_cohorted=True, is_user_cohorted=True, is_moderator=False)
self._set_up_course(is_course_cohorted=False, is_user_cohorted=False, is_moderator=False)
# Users have different sets of enrollments
CourseEnrollmentFactory(user=other_user, course_id=self.courses[0].id)
result_map = {result["id"]: result for result in self._get_list()}
self.assertEqual(set(result_map.keys()), {self.user.id, other_user.id})
for user in [self.user, other_user]:
self._assert_basic_user_info_correct(user, result_map[user.id])
self.assertEqual(
result_map[self.user.id]["preferences"],
{NOTIFICATION_PREF_KEY: self.notification_pref.value}
)
self.assertEqual(
result_map[other_user.id]["preferences"],
{NOTIFICATION_PREF_KEY: other_notification_pref.value}
)
self.assertEqual(
result_map[self.user.id]["course_info"],
{
unicode(self.courses[0].id): {
"cohort_id": self.cohorts[0].id,
"see_all_cohorts": False,
},
unicode(self.courses[1].id): {
"cohort_id": None,
"see_all_cohorts": True,
},
}
)
self.assertEqual(
result_map[other_user.id]["course_info"],
{
unicode(self.courses[0].id): {
"cohort_id": None,
"see_all_cohorts": False,
},
}
)
@ddt.data(
3, # Factor of num of results
5, # Non-factor of num of results
12, # Num of results
15 # More than num of results
)
def test_pagination(self, page_size):
num_users = 12
users = [self.user]
while len(users) < num_users:
new_user = UserFactory()
users.append(new_user)
UserPreferenceFactory(user=new_user, key=NOTIFICATION_PREF_KEY)
num_pages = (num_users - 1) / page_size + 1
result_list = []
for i in range(1, num_pages + 1):
result_list.extend(self._get_list(page=i, page_size=page_size))
result_map = {result["id"]: result for result in result_list}
self.assertEqual(len(result_list), num_users)
for user in users:
self._assert_basic_user_info_correct(user, result_map[user.id])
self.assertEqual(
self._make_list_request(page=(num_pages + 1), page_size=page_size).status_code,
404
)
def test_db_access(self):
for _ in range(10):
new_user = UserFactory()
UserPreferenceFactory(user=new_user, key=NOTIFICATION_PREF_KEY)
# The number of queries is one for the users plus one for each prefetch
# in NotifierUsersViewSet (roles__permissions does one for each table).
with self.assertNumQueries(6):
self._get_list()
|
bwsblake/lettercounter | refs/heads/master | django-norel-env/lib/python2.7/site-packages/django/utils/unittest/util.py | 751 | """Various utility functions."""
__unittest = True
_MAX_LENGTH = 80
def safe_repr(obj, short=False):
try:
result = repr(obj)
except Exception:
result = object.__repr__(obj)
if not short or len(result) < _MAX_LENGTH:
return result
return result[:_MAX_LENGTH] + ' [truncated]...'
def safe_str(obj):
try:
return str(obj)
except Exception:
return object.__str__(obj)
def strclass(cls):
return "%s.%s" % (cls.__module__, cls.__name__)
def sorted_list_difference(expected, actual):
"""Finds elements in only one or the other of two, sorted input lists.
Returns a two-element tuple of lists. The first list contains those
elements in the "expected" list but not in the "actual" list, and the
second contains those elements in the "actual" list but not in the
"expected" list. Duplicate elements in either input list are ignored.
"""
i = j = 0
missing = []
unexpected = []
while True:
try:
e = expected[i]
a = actual[j]
if e < a:
missing.append(e)
i += 1
while expected[i] == e:
i += 1
elif e > a:
unexpected.append(a)
j += 1
while actual[j] == a:
j += 1
else:
i += 1
try:
while expected[i] == e:
i += 1
finally:
j += 1
while actual[j] == a:
j += 1
except IndexError:
missing.extend(expected[i:])
unexpected.extend(actual[j:])
break
return missing, unexpected
def unorderable_list_difference(expected, actual, ignore_duplicate=False):
"""Same behavior as sorted_list_difference but
for lists of unorderable items (like dicts).
As it does a linear search per item (remove) it
has O(n*n) performance.
"""
missing = []
unexpected = []
while expected:
item = expected.pop()
try:
actual.remove(item)
except ValueError:
missing.append(item)
if ignore_duplicate:
for lst in expected, actual:
try:
while True:
lst.remove(item)
except ValueError:
pass
if ignore_duplicate:
while actual:
item = actual.pop()
unexpected.append(item)
try:
while True:
actual.remove(item)
except ValueError:
pass
return missing, unexpected
# anything left in actual is unexpected
return missing, actual
|
hanzz/qsimkit | refs/heads/master | 3rdparty/pythonqt21-qt462/examples/PyCustomMetaTypeExample/example.py | 3 | from PythonQt.example import CustomObject
# create a new object
custom = CustomObject("John","Doe")
# print the methods available
print dir(custom)
# set a name
custom.setFirstName("Mike")
custom.setLastName("Michels")
# get the name
print custom.firstName() + " " + custom.lastName();
|
gohin/django | refs/heads/master | tests/forms_tests/views.py | 452 | from django import forms
from django.views.generic.edit import UpdateView
from .models import Article
class ArticleForm(forms.ModelForm):
content = forms.CharField(strip=False, widget=forms.Textarea)
class Meta:
model = Article
fields = '__all__'
class ArticleFormView(UpdateView):
model = Article
success_url = '/'
form_class = ArticleForm
|
eliben/code-for-blog | refs/heads/master | 2017/parsing-rst/rst-link-check.py | 1 | # Simple link checker for reStructuredText (.rst) files.
#
# Run with Python 3.4+
#
# Eli Bendersky [http://eli.thegreenplace.net]
# This code is in the public domain.
import argparse
import sys
import urllib.error
import urllib.request
import docutils.frontend
import docutils.nodes
import docutils.parsers.rst
import docutils.utils
def check_link(uri):
"""Checks a single URI."""
print('... Checking {0}...'.format(uri), end='')
try:
urllib.request.urlopen(uri, timeout=2.0)
print('OK')
except urllib.error.URLError as e:
print('ERROR: unable to open --', e.reason)
except Exception as e:
print('ERROR: exception while opening --', e)
class LinkCheckerVisitor(docutils.nodes.GenericNodeVisitor):
def visit_reference(self, node):
# Catch reference nodes for link-checking.
check_link(node['refuri'])
def default_visit(self, node):
# Pass all other nodes through.
pass
def check_links_in_rst(fileobj):
# Parse the file into a document with the rst parser.
default_settings = docutils.frontend.OptionParser(
components=(docutils.parsers.rst.Parser,)).get_default_values()
document = docutils.utils.new_document(fileobj.name, default_settings)
parser = docutils.parsers.rst.Parser()
parser.parse(fileobj.read(), document)
# Visit the parsed document with our link-checking visitor.
visitor = LinkCheckerVisitor(document)
document.walk(visitor)
def main():
argparser = argparse.ArgumentParser()
argparser.add_argument('infile', nargs='?', type=argparse.FileType('r'),
default=sys.stdin)
args = argparser.parse_args()
print('Reading', args.infile.name)
check_links_in_rst(args.infile)
if __name__ == '__main__':
main()
|
keerts/home-assistant | refs/heads/dev | tests/components/notify/test_apns.py | 6 | """The tests for the APNS component."""
import os
import unittest
from unittest.mock import patch
from unittest.mock import Mock
from apns2.errors import Unregistered
import homeassistant.components.notify as notify
from homeassistant.bootstrap import setup_component
from homeassistant.components.notify.apns import ApnsNotificationService
from homeassistant.config import load_yaml_config_file
from homeassistant.core import State
from tests.common import assert_setup_component, get_test_home_assistant
CONFIG = {
notify.DOMAIN: {
'platform': 'apns',
'name': 'test_app',
'topic': 'testapp.appname',
'cert_file': 'test_app.pem'
}
}
class TestApns(unittest.TestCase):
"""Test the APNS component."""
def setUp(self): # pylint: disable=invalid-name
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
def tearDown(self): # pylint: disable=invalid-name
"""Stop everything that was started."""
self.hass.stop()
@patch('os.path.isfile', Mock(return_value=True))
@patch('os.access', Mock(return_value=True))
def _setup_notify(self):
with assert_setup_component(1) as handle_config:
assert setup_component(self.hass, notify.DOMAIN, CONFIG)
assert handle_config[notify.DOMAIN]
@patch('os.path.isfile', return_value=True)
@patch('os.access', return_value=True)
def test_apns_setup_full(self, mock_access, mock_isfile):
"""Test setup with all data."""
config = {
'notify': {
'platform': 'apns',
'name': 'test_app',
'sandbox': 'True',
'topic': 'testapp.appname',
'cert_file': 'test_app.pem'
}
}
with assert_setup_component(1) as handle_config:
assert setup_component(self.hass, notify.DOMAIN, config)
assert handle_config[notify.DOMAIN]
def test_apns_setup_missing_name(self):
"""Test setup with missing name."""
config = {
'notify': {
'platform': 'apns',
'topic': 'testapp.appname',
'cert_file': 'test_app.pem',
}
}
with assert_setup_component(0) as handle_config:
assert setup_component(self.hass, notify.DOMAIN, config)
assert not handle_config[notify.DOMAIN]
def test_apns_setup_missing_certificate(self):
"""Test setup with missing certificate."""
config = {
'notify': {
'platform': 'apns',
'name': 'test_app',
'topic': 'testapp.appname',
}
}
with assert_setup_component(0) as handle_config:
assert setup_component(self.hass, notify.DOMAIN, config)
assert not handle_config[notify.DOMAIN]
def test_apns_setup_missing_topic(self):
"""Test setup with missing topic."""
config = {
'notify': {
'platform': 'apns',
'name': 'test_app',
'cert_file': 'test_app.pem',
}
}
with assert_setup_component(0) as handle_config:
assert setup_component(self.hass, notify.DOMAIN, config)
assert not handle_config[notify.DOMAIN]
def test_register_new_device(self):
"""Test registering a new device with a name."""
devices_path = self.hass.config.path('test_app_apns.yaml')
with open(devices_path, 'w+') as out:
out.write('5678: {name: test device 2}\n')
self._setup_notify()
self.assertTrue(self.hass.services.call(notify.DOMAIN,
'apns_test_app',
{'push_id': '1234',
'name': 'test device'},
blocking=True))
devices = {str(key): value for (key, value) in
load_yaml_config_file(devices_path).items()}
test_device_1 = devices.get('1234')
test_device_2 = devices.get('5678')
self.assertIsNotNone(test_device_1)
self.assertIsNotNone(test_device_2)
self.assertEqual('test device', test_device_1.get('name'))
os.remove(devices_path)
def test_register_device_without_name(self):
"""Test registering a without a name."""
devices_path = self.hass.config.path('test_app_apns.yaml')
with open(devices_path, 'w+') as out:
out.write('5678: {name: test device 2}\n')
self._setup_notify()
self.assertTrue(self.hass.services.call(notify.DOMAIN, 'apns_test_app',
{'push_id': '1234'},
blocking=True))
devices = {str(key): value for (key, value) in
load_yaml_config_file(devices_path).items()}
test_device = devices.get('1234')
self.assertIsNotNone(test_device)
self.assertIsNone(test_device.get('name'))
os.remove(devices_path)
def test_update_existing_device(self):
"""Test updating an existing device."""
devices_path = self.hass.config.path('test_app_apns.yaml')
with open(devices_path, 'w+') as out:
out.write('1234: {name: test device 1}\n')
out.write('5678: {name: test device 2}\n')
self._setup_notify()
self.assertTrue(self.hass.services.call(notify.DOMAIN,
'apns_test_app',
{'push_id': '1234',
'name': 'updated device 1'},
blocking=True))
devices = {str(key): value for (key, value) in
load_yaml_config_file(devices_path).items()}
test_device_1 = devices.get('1234')
test_device_2 = devices.get('5678')
self.assertIsNotNone(test_device_1)
self.assertIsNotNone(test_device_2)
self.assertEqual('updated device 1', test_device_1.get('name'))
os.remove(devices_path)
def test_update_existing_device_with_tracking_id(self):
"""Test updating an existing device that has a tracking id."""
devices_path = self.hass.config.path('test_app_apns.yaml')
with open(devices_path, 'w+') as out:
out.write('1234: {name: test device 1, '
'tracking_device_id: tracking123}\n')
out.write('5678: {name: test device 2, '
'tracking_device_id: tracking456}\n')
self._setup_notify()
self.assertTrue(self.hass.services.call(notify.DOMAIN,
'apns_test_app',
{'push_id': '1234',
'name': 'updated device 1'},
blocking=True))
devices = {str(key): value for (key, value) in
load_yaml_config_file(devices_path).items()}
test_device_1 = devices.get('1234')
test_device_2 = devices.get('5678')
self.assertIsNotNone(test_device_1)
self.assertIsNotNone(test_device_2)
self.assertEqual('tracking123',
test_device_1.get('tracking_device_id'))
self.assertEqual('tracking456',
test_device_2.get('tracking_device_id'))
os.remove(devices_path)
@patch('apns2.client.APNsClient')
def test_send(self, mock_client):
"""Test updating an existing device."""
send = mock_client.return_value.send_notification
devices_path = self.hass.config.path('test_app_apns.yaml')
with open(devices_path, 'w+') as out:
out.write('1234: {name: test device 1}\n')
self._setup_notify()
self.assertTrue(self.hass.services.call(
'notify', 'test_app',
{'message': 'Hello', 'data': {
'badge': 1,
'sound': 'test.mp3',
'category': 'testing'}},
blocking=True))
self.assertTrue(send.called)
self.assertEqual(1, len(send.mock_calls))
target = send.mock_calls[0][1][0]
payload = send.mock_calls[0][1][1]
self.assertEqual('1234', target)
self.assertEqual('Hello', payload.alert)
self.assertEqual(1, payload.badge)
self.assertEqual('test.mp3', payload.sound)
self.assertEqual('testing', payload.category)
@patch('apns2.client.APNsClient')
def test_send_when_disabled(self, mock_client):
"""Test updating an existing device."""
send = mock_client.return_value.send_notification
devices_path = self.hass.config.path('test_app_apns.yaml')
with open(devices_path, 'w+') as out:
out.write('1234: {name: test device 1, disabled: True}\n')
self._setup_notify()
self.assertTrue(self.hass.services.call(
'notify', 'test_app',
{'message': 'Hello', 'data': {
'badge': 1,
'sound': 'test.mp3',
'category': 'testing'}},
blocking=True))
self.assertFalse(send.called)
@patch('apns2.client.APNsClient')
def test_send_with_state(self, mock_client):
"""Test updating an existing device."""
send = mock_client.return_value.send_notification
devices_path = self.hass.config.path('test_app_apns.yaml')
with open(devices_path, 'w+') as out:
out.write('1234: {name: test device 1, '
'tracking_device_id: tracking123}\n')
out.write('5678: {name: test device 2, '
'tracking_device_id: tracking456}\n')
notify_service = ApnsNotificationService(
self.hass,
'test_app',
'testapp.appname',
False,
'test_app.pem'
)
notify_service.device_state_changed_listener(
'device_tracker.tracking456',
State('device_tracker.tracking456', None),
State('device_tracker.tracking456', 'home'))
self.hass.block_till_done()
notify_service.send_message(message='Hello', target='home')
self.assertTrue(send.called)
self.assertEqual(1, len(send.mock_calls))
target = send.mock_calls[0][1][0]
payload = send.mock_calls[0][1][1]
self.assertEqual('5678', target)
self.assertEqual('Hello', payload.alert)
@patch('apns2.client.APNsClient')
def test_disable_when_unregistered(self, mock_client):
"""Test disabling a device when it is unregistered."""
send = mock_client.return_value.send_notification
send.side_effect = Unregistered()
devices_path = self.hass.config.path('test_app_apns.yaml')
with open(devices_path, 'w+') as out:
out.write('1234: {name: test device 1}\n')
self._setup_notify()
self.assertTrue(self.hass.services.call('notify', 'test_app',
{'message': 'Hello'},
blocking=True))
devices = {str(key): value for (key, value) in
load_yaml_config_file(devices_path).items()}
test_device_1 = devices.get('1234')
self.assertIsNotNone(test_device_1)
self.assertEqual(True, test_device_1.get('disabled'))
os.remove(devices_path)
|
yencarnacion/jaikuengine | refs/heads/master | .google_appengine/lib/django-1.2/tests/regressiontests/templates/loaders.py | 39 | """
Test cases for the template loaders
Note: This test requires setuptools!
"""
from django.conf import settings
if __name__ == '__main__':
settings.configure()
import unittest
import sys
import pkg_resources
import imp
import StringIO
import os.path
import warnings
from django.template import TemplateDoesNotExist, Context
from django.template.loaders.eggs import load_template_source as lts_egg
from django.template.loaders.eggs import Loader as EggLoader
from django.template import loader
from django.test.utils import get_warnings_state, restore_warnings_state
# Mock classes and objects for pkg_resources functions.
class MockProvider(pkg_resources.NullProvider):
def __init__(self, module):
pkg_resources.NullProvider.__init__(self, module)
self.module = module
def _has(self, path):
return path in self.module._resources
def _isdir(self,path):
return False
def get_resource_stream(self, manager, resource_name):
return self.module._resources[resource_name]
def _get(self, path):
return self.module._resources[path].read()
class MockLoader(object):
pass
def create_egg(name, resources):
"""
Creates a mock egg with a list of resources.
name: The name of the module.
resources: A dictionary of resources. Keys are the names and values the data.
"""
egg = imp.new_module(name)
egg.__loader__ = MockLoader()
egg._resources = resources
sys.modules[name] = egg
class DeprecatedEggLoaderTest(unittest.TestCase):
"Test the deprecated load_template_source interface to the egg loader"
def setUp(self):
pkg_resources._provider_factories[MockLoader] = MockProvider
self.empty_egg = create_egg("egg_empty", {})
self.egg_1 = create_egg("egg_1", {
os.path.normcase('templates/y.html') : StringIO.StringIO("y"),
os.path.normcase('templates/x.txt') : StringIO.StringIO("x"),
})
self._old_installed_apps = settings.INSTALLED_APPS
settings.INSTALLED_APPS = []
self._warnings_state = get_warnings_state()
warnings.simplefilter("ignore", PendingDeprecationWarning)
def tearDown(self):
settings.INSTALLED_APPS = self._old_installed_apps
restore_warnings_state(self._warnings_state)
def test_existing(self):
"A template can be loaded from an egg"
settings.INSTALLED_APPS = ['egg_1']
contents, template_name = lts_egg("y.html")
self.assertEqual(contents, "y")
self.assertEqual(template_name, "egg:egg_1:templates/y.html")
class EggLoaderTest(unittest.TestCase):
def setUp(self):
pkg_resources._provider_factories[MockLoader] = MockProvider
self.empty_egg = create_egg("egg_empty", {})
self.egg_1 = create_egg("egg_1", {
os.path.normcase('templates/y.html') : StringIO.StringIO("y"),
os.path.normcase('templates/x.txt') : StringIO.StringIO("x"),
})
self._old_installed_apps = settings.INSTALLED_APPS
settings.INSTALLED_APPS = []
def tearDown(self):
settings.INSTALLED_APPS = self._old_installed_apps
def test_empty(self):
"Loading any template on an empty egg should fail"
settings.INSTALLED_APPS = ['egg_empty']
egg_loader = EggLoader()
self.assertRaises(TemplateDoesNotExist, egg_loader.load_template_source, "not-existing.html")
def test_non_existing(self):
"Template loading fails if the template is not in the egg"
settings.INSTALLED_APPS = ['egg_1']
egg_loader = EggLoader()
self.assertRaises(TemplateDoesNotExist, egg_loader.load_template_source, "not-existing.html")
def test_existing(self):
"A template can be loaded from an egg"
settings.INSTALLED_APPS = ['egg_1']
egg_loader = EggLoader()
contents, template_name = egg_loader.load_template_source("y.html")
self.assertEqual(contents, "y")
self.assertEqual(template_name, "egg:egg_1:templates/y.html")
def test_not_installed(self):
"Loading an existent template from an egg not included in INSTALLED_APPS should fail"
settings.INSTALLED_APPS = []
egg_loader = EggLoader()
self.assertRaises(TemplateDoesNotExist, egg_loader.load_template_source, "y.html")
class CachedLoader(unittest.TestCase):
def setUp(self):
self.old_TEMPLATE_LOADERS = settings.TEMPLATE_LOADERS
settings.TEMPLATE_LOADERS = (
('django.template.loaders.cached.Loader', (
'django.template.loaders.filesystem.Loader',
)
),
)
def tearDown(self):
settings.TEMPLATE_LOADERS = self.old_TEMPLATE_LOADERS
def test_templatedir_caching(self):
"Check that the template directories form part of the template cache key. Refs #13573"
# Retrive a template specifying a template directory to check
t1, name = loader.find_template('test.html', (os.path.join(os.path.dirname(__file__), 'templates', 'first'),))
# Now retrieve the same template name, but from a different directory
t2, name = loader.find_template('test.html', (os.path.join(os.path.dirname(__file__), 'templates', 'second'),))
# The two templates should not have the same content
self.assertNotEqual(t1.render(Context({})), t2.render(Context({})))
if __name__ == "__main__":
unittest.main()
|
turbomanage/training-data-analyst | refs/heads/master | courses/machine_learning/deepdive2/structured/solutions/serving/application/lib/pyasn1_modules/rfc4210.py | 7 | #
# This file is part of pyasn1-modules software.
#
# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
# License: http://snmplabs.com/pyasn1/license.html
#
# Certificate Management Protocol structures as per RFC4210
#
# Based on Alex Railean's work
#
from pyasn1.type import char
from pyasn1.type import constraint
from pyasn1.type import namedtype
from pyasn1.type import namedval
from pyasn1.type import tag
from pyasn1.type import univ
from pyasn1.type import useful
from pyasn1_modules import rfc2314
from pyasn1_modules import rfc2459
from pyasn1_modules import rfc2511
MAX = float('inf')
class KeyIdentifier(univ.OctetString):
pass
class CMPCertificate(rfc2459.Certificate):
pass
class OOBCert(CMPCertificate):
pass
class CertAnnContent(CMPCertificate):
pass
class PKIFreeText(univ.SequenceOf):
"""
PKIFreeText ::= SEQUENCE SIZE (1..MAX) OF UTF8String
"""
componentType = char.UTF8String()
sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, MAX)
class PollRepContent(univ.SequenceOf):
"""
PollRepContent ::= SEQUENCE OF SEQUENCE {
certReqId INTEGER,
checkAfter INTEGER, -- time in seconds
reason PKIFreeText OPTIONAL
}
"""
class CertReq(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('certReqId', univ.Integer()),
namedtype.NamedType('checkAfter', univ.Integer()),
namedtype.OptionalNamedType('reason', PKIFreeText())
)
componentType = CertReq()
class PollReqContent(univ.SequenceOf):
"""
PollReqContent ::= SEQUENCE OF SEQUENCE {
certReqId INTEGER
}
"""
class CertReq(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('certReqId', univ.Integer())
)
componentType = CertReq()
class InfoTypeAndValue(univ.Sequence):
"""
InfoTypeAndValue ::= SEQUENCE {
infoType OBJECT IDENTIFIER,
infoValue ANY DEFINED BY infoType OPTIONAL
}"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('infoType', univ.ObjectIdentifier()),
namedtype.OptionalNamedType('infoValue', univ.Any())
)
class GenRepContent(univ.SequenceOf):
componentType = InfoTypeAndValue()
class GenMsgContent(univ.SequenceOf):
componentType = InfoTypeAndValue()
class PKIConfirmContent(univ.Null):
pass
class CRLAnnContent(univ.SequenceOf):
componentType = rfc2459.CertificateList()
class CAKeyUpdAnnContent(univ.Sequence):
"""
CAKeyUpdAnnContent ::= SEQUENCE {
oldWithNew CMPCertificate,
newWithOld CMPCertificate,
newWithNew CMPCertificate
}
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('oldWithNew', CMPCertificate()),
namedtype.NamedType('newWithOld', CMPCertificate()),
namedtype.NamedType('newWithNew', CMPCertificate())
)
class RevDetails(univ.Sequence):
"""
RevDetails ::= SEQUENCE {
certDetails CertTemplate,
crlEntryDetails Extensions OPTIONAL
}
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('certDetails', rfc2511.CertTemplate()),
namedtype.OptionalNamedType('crlEntryDetails', rfc2459.Extensions())
)
class RevReqContent(univ.SequenceOf):
componentType = RevDetails()
class CertOrEncCert(univ.Choice):
"""
CertOrEncCert ::= CHOICE {
certificate [0] CMPCertificate,
encryptedCert [1] EncryptedValue
}
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('certificate', CMPCertificate().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
namedtype.NamedType('encryptedCert', rfc2511.EncryptedValue().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
)
class CertifiedKeyPair(univ.Sequence):
"""
CertifiedKeyPair ::= SEQUENCE {
certOrEncCert CertOrEncCert,
privateKey [0] EncryptedValue OPTIONAL,
publicationInfo [1] PKIPublicationInfo OPTIONAL
}
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('certOrEncCert', CertOrEncCert()),
namedtype.OptionalNamedType('privateKey', rfc2511.EncryptedValue().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
namedtype.OptionalNamedType('publicationInfo', rfc2511.PKIPublicationInfo().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
)
class POPODecKeyRespContent(univ.SequenceOf):
componentType = univ.Integer()
class Challenge(univ.Sequence):
"""
Challenge ::= SEQUENCE {
owf AlgorithmIdentifier OPTIONAL,
witness OCTET STRING,
challenge OCTET STRING
}
"""
componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('owf', rfc2459.AlgorithmIdentifier()),
namedtype.NamedType('witness', univ.OctetString()),
namedtype.NamedType('challenge', univ.OctetString())
)
class PKIStatus(univ.Integer):
"""
PKIStatus ::= INTEGER {
accepted (0),
grantedWithMods (1),
rejection (2),
waiting (3),
revocationWarning (4),
revocationNotification (5),
keyUpdateWarning (6)
}
"""
namedValues = namedval.NamedValues(
('accepted', 0),
('grantedWithMods', 1),
('rejection', 2),
('waiting', 3),
('revocationWarning', 4),
('revocationNotification', 5),
('keyUpdateWarning', 6)
)
class PKIFailureInfo(univ.BitString):
"""
PKIFailureInfo ::= BIT STRING {
badAlg (0),
badMessageCheck (1),
badRequest (2),
badTime (3),
badCertId (4),
badDataFormat (5),
wrongAuthority (6),
incorrectData (7),
missingTimeStamp (8),
badPOP (9),
certRevoked (10),
certConfirmed (11),
wrongIntegrity (12),
badRecipientNonce (13),
timeNotAvailable (14),
unacceptedPolicy (15),
unacceptedExtension (16),
addInfoNotAvailable (17),
badSenderNonce (18),
badCertTemplate (19),
signerNotTrusted (20),
transactionIdInUse (21),
unsupportedVersion (22),
notAuthorized (23),
systemUnavail (24),
systemFailure (25),
duplicateCertReq (26)
"""
namedValues = namedval.NamedValues(
('badAlg', 0),
('badMessageCheck', 1),
('badRequest', 2),
('badTime', 3),
('badCertId', 4),
('badDataFormat', 5),
('wrongAuthority', 6),
('incorrectData', 7),
('missingTimeStamp', 8),
('badPOP', 9),
('certRevoked', 10),
('certConfirmed', 11),
('wrongIntegrity', 12),
('badRecipientNonce', 13),
('timeNotAvailable', 14),
('unacceptedPolicy', 15),
('unacceptedExtension', 16),
('addInfoNotAvailable', 17),
('badSenderNonce', 18),
('badCertTemplate', 19),
('signerNotTrusted', 20),
('transactionIdInUse', 21),
('unsupportedVersion', 22),
('notAuthorized', 23),
('systemUnavail', 24),
('systemFailure', 25),
('duplicateCertReq', 26)
)
class PKIStatusInfo(univ.Sequence):
"""
PKIStatusInfo ::= SEQUENCE {
status PKIStatus,
statusString PKIFreeText OPTIONAL,
failInfo PKIFailureInfo OPTIONAL
}
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('status', PKIStatus()),
namedtype.OptionalNamedType('statusString', PKIFreeText()),
namedtype.OptionalNamedType('failInfo', PKIFailureInfo())
)
class ErrorMsgContent(univ.Sequence):
"""
ErrorMsgContent ::= SEQUENCE {
pKIStatusInfo PKIStatusInfo,
errorCode INTEGER OPTIONAL,
-- implementation-specific error codes
errorDetails PKIFreeText OPTIONAL
-- implementation-specific error details
}
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('pKIStatusInfo', PKIStatusInfo()),
namedtype.OptionalNamedType('errorCode', univ.Integer()),
namedtype.OptionalNamedType('errorDetails', PKIFreeText())
)
class CertStatus(univ.Sequence):
"""
CertStatus ::= SEQUENCE {
certHash OCTET STRING,
certReqId INTEGER,
statusInfo PKIStatusInfo OPTIONAL
}
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('certHash', univ.OctetString()),
namedtype.NamedType('certReqId', univ.Integer()),
namedtype.OptionalNamedType('statusInfo', PKIStatusInfo())
)
class CertConfirmContent(univ.SequenceOf):
componentType = CertStatus()
class RevAnnContent(univ.Sequence):
"""
RevAnnContent ::= SEQUENCE {
status PKIStatus,
certId CertId,
willBeRevokedAt GeneralizedTime,
badSinceDate GeneralizedTime,
crlDetails Extensions OPTIONAL
}
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('status', PKIStatus()),
namedtype.NamedType('certId', rfc2511.CertId()),
namedtype.NamedType('willBeRevokedAt', useful.GeneralizedTime()),
namedtype.NamedType('badSinceDate', useful.GeneralizedTime()),
namedtype.OptionalNamedType('crlDetails', rfc2459.Extensions())
)
class RevRepContent(univ.Sequence):
"""
RevRepContent ::= SEQUENCE {
status SEQUENCE SIZE (1..MAX) OF PKIStatusInfo,
revCerts [0] SEQUENCE SIZE (1..MAX) OF CertId
OPTIONAL,
crls [1] SEQUENCE SIZE (1..MAX) OF CertificateList
OPTIONAL
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType(
'status', univ.SequenceOf(
componentType=PKIStatusInfo(),
sizeSpec=constraint.ValueSizeConstraint(1, MAX)
)
),
namedtype.OptionalNamedType(
'revCerts', univ.SequenceOf(componentType=rfc2511.CertId()).subtype(
sizeSpec=constraint.ValueSizeConstraint(1, MAX),
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)
)
),
namedtype.OptionalNamedType(
'crls', univ.SequenceOf(componentType=rfc2459.CertificateList()).subtype(
sizeSpec=constraint.ValueSizeConstraint(1, MAX),
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)
)
)
)
class KeyRecRepContent(univ.Sequence):
"""
KeyRecRepContent ::= SEQUENCE {
status PKIStatusInfo,
newSigCert [0] CMPCertificate OPTIONAL,
caCerts [1] SEQUENCE SIZE (1..MAX) OF
CMPCertificate OPTIONAL,
keyPairHist [2] SEQUENCE SIZE (1..MAX) OF
CertifiedKeyPair OPTIONAL
}
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('status', PKIStatusInfo()),
namedtype.OptionalNamedType(
'newSigCert', CMPCertificate().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)
)
),
namedtype.OptionalNamedType(
'caCerts', univ.SequenceOf(componentType=CMPCertificate()).subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1),
sizeSpec=constraint.ValueSizeConstraint(1, MAX)
)
),
namedtype.OptionalNamedType('keyPairHist', univ.SequenceOf(componentType=CertifiedKeyPair()).subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2),
sizeSpec=constraint.ValueSizeConstraint(1, MAX))
)
)
class CertResponse(univ.Sequence):
"""
CertResponse ::= SEQUENCE {
certReqId INTEGER,
status PKIStatusInfo,
certifiedKeyPair CertifiedKeyPair OPTIONAL,
rspInfo OCTET STRING OPTIONAL
}
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('certReqId', univ.Integer()),
namedtype.NamedType('status', PKIStatusInfo()),
namedtype.OptionalNamedType('certifiedKeyPair', CertifiedKeyPair()),
namedtype.OptionalNamedType('rspInfo', univ.OctetString())
)
class CertRepMessage(univ.Sequence):
"""
CertRepMessage ::= SEQUENCE {
caPubs [1] SEQUENCE SIZE (1..MAX) OF CMPCertificate
OPTIONAL,
response SEQUENCE OF CertResponse
}
"""
componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType(
'caPubs', univ.SequenceOf(
componentType=CMPCertificate()
).subtype(sizeSpec=constraint.ValueSizeConstraint(1, MAX),
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))
),
namedtype.NamedType('response', univ.SequenceOf(componentType=CertResponse()))
)
class POPODecKeyChallContent(univ.SequenceOf):
componentType = Challenge()
class OOBCertHash(univ.Sequence):
"""
OOBCertHash ::= SEQUENCE {
hashAlg [0] AlgorithmIdentifier OPTIONAL,
certId [1] CertId OPTIONAL,
hashVal BIT STRING
}
"""
componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType(
'hashAlg', rfc2459.AlgorithmIdentifier().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))
),
namedtype.OptionalNamedType(
'certId', rfc2511.CertId().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))
),
namedtype.NamedType('hashVal', univ.BitString())
)
# pyasn1 does not naturally handle recursive definitions, thus this hack:
# NestedMessageContent ::= PKIMessages
class NestedMessageContent(univ.SequenceOf):
"""
NestedMessageContent ::= PKIMessages
"""
componentType = univ.Any()
class DHBMParameter(univ.Sequence):
"""
DHBMParameter ::= SEQUENCE {
owf AlgorithmIdentifier,
-- AlgId for a One-Way Function (SHA-1 recommended)
mac AlgorithmIdentifier
-- the MAC AlgId (e.g., DES-MAC, Triple-DES-MAC [PKCS11],
} -- or HMAC [RFC2104, RFC2202])
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('owf', rfc2459.AlgorithmIdentifier()),
namedtype.NamedType('mac', rfc2459.AlgorithmIdentifier())
)
id_DHBasedMac = univ.ObjectIdentifier('1.2.840.113533.7.66.30')
class PBMParameter(univ.Sequence):
"""
PBMParameter ::= SEQUENCE {
salt OCTET STRING,
owf AlgorithmIdentifier,
iterationCount INTEGER,
mac AlgorithmIdentifier
}
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType(
'salt', univ.OctetString().subtype(subtypeSpec=constraint.ValueSizeConstraint(0, 128))
),
namedtype.NamedType('owf', rfc2459.AlgorithmIdentifier()),
namedtype.NamedType('iterationCount', univ.Integer()),
namedtype.NamedType('mac', rfc2459.AlgorithmIdentifier())
)
id_PasswordBasedMac = univ.ObjectIdentifier('1.2.840.113533.7.66.13')
class PKIProtection(univ.BitString):
pass
# pyasn1 does not naturally handle recursive definitions, thus this hack:
# NestedMessageContent ::= PKIMessages
nestedMessageContent = NestedMessageContent().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 20))
class PKIBody(univ.Choice):
"""
PKIBody ::= CHOICE { -- message-specific body elements
ir [0] CertReqMessages, --Initialization Request
ip [1] CertRepMessage, --Initialization Response
cr [2] CertReqMessages, --Certification Request
cp [3] CertRepMessage, --Certification Response
p10cr [4] CertificationRequest, --imported from [PKCS10]
popdecc [5] POPODecKeyChallContent, --pop Challenge
popdecr [6] POPODecKeyRespContent, --pop Response
kur [7] CertReqMessages, --Key Update Request
kup [8] CertRepMessage, --Key Update Response
krr [9] CertReqMessages, --Key Recovery Request
krp [10] KeyRecRepContent, --Key Recovery Response
rr [11] RevReqContent, --Revocation Request
rp [12] RevRepContent, --Revocation Response
ccr [13] CertReqMessages, --Cross-Cert. Request
ccp [14] CertRepMessage, --Cross-Cert. Response
ckuann [15] CAKeyUpdAnnContent, --CA Key Update Ann.
cann [16] CertAnnContent, --Certificate Ann.
rann [17] RevAnnContent, --Revocation Ann.
crlann [18] CRLAnnContent, --CRL Announcement
pkiconf [19] PKIConfirmContent, --Confirmation
nested [20] NestedMessageContent, --Nested Message
genm [21] GenMsgContent, --General Message
genp [22] GenRepContent, --General Response
error [23] ErrorMsgContent, --Error Message
certConf [24] CertConfirmContent, --Certificate confirm
pollReq [25] PollReqContent, --Polling request
pollRep [26] PollRepContent --Polling response
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType(
'ir', rfc2511.CertReqMessages().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)
)
),
namedtype.NamedType(
'ip', CertRepMessage().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)
)
),
namedtype.NamedType(
'cr', rfc2511.CertReqMessages().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2)
)
),
namedtype.NamedType(
'cp', CertRepMessage().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3)
)
),
namedtype.NamedType(
'p10cr', rfc2314.CertificationRequest().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 4)
)
),
namedtype.NamedType(
'popdecc', POPODecKeyChallContent().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 5)
)
),
namedtype.NamedType(
'popdecr', POPODecKeyRespContent().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 6)
)
),
namedtype.NamedType(
'kur', rfc2511.CertReqMessages().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 7)
)
),
namedtype.NamedType(
'kup', CertRepMessage().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 8)
)
),
namedtype.NamedType(
'krr', rfc2511.CertReqMessages().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 9)
)
),
namedtype.NamedType(
'krp', KeyRecRepContent().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 10)
)
),
namedtype.NamedType(
'rr', RevReqContent().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 11)
)
),
namedtype.NamedType(
'rp', RevRepContent().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 12)
)
),
namedtype.NamedType(
'ccr', rfc2511.CertReqMessages().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 13)
)
),
namedtype.NamedType(
'ccp', CertRepMessage().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 14)
)
),
namedtype.NamedType(
'ckuann', CAKeyUpdAnnContent().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 15)
)
),
namedtype.NamedType(
'cann', CertAnnContent().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 16)
)
),
namedtype.NamedType(
'rann', RevAnnContent().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 17)
)
),
namedtype.NamedType(
'crlann', CRLAnnContent().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 18)
)
),
namedtype.NamedType(
'pkiconf', PKIConfirmContent().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 19)
)
),
namedtype.NamedType(
'nested', nestedMessageContent
),
# namedtype.NamedType('nested', NestedMessageContent().subtype(
# explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,20)
# )
# ),
namedtype.NamedType(
'genm', GenMsgContent().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 21)
)
),
namedtype.NamedType(
'gen', GenRepContent().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 22)
)
),
namedtype.NamedType(
'error', ErrorMsgContent().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 23)
)
),
namedtype.NamedType(
'certConf', CertConfirmContent().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 24)
)
),
namedtype.NamedType(
'pollReq', PollReqContent().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 25)
)
),
namedtype.NamedType(
'pollRep', PollRepContent().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 26)
)
)
)
class PKIHeader(univ.Sequence):
"""
PKIHeader ::= SEQUENCE {
pvno INTEGER { cmp1999(1), cmp2000(2) },
sender GeneralName,
recipient GeneralName,
messageTime [0] GeneralizedTime OPTIONAL,
protectionAlg [1] AlgorithmIdentifier OPTIONAL,
senderKID [2] KeyIdentifier OPTIONAL,
recipKID [3] KeyIdentifier OPTIONAL,
transactionID [4] OCTET STRING OPTIONAL,
senderNonce [5] OCTET STRING OPTIONAL,
recipNonce [6] OCTET STRING OPTIONAL,
freeText [7] PKIFreeText OPTIONAL,
generalInfo [8] SEQUENCE SIZE (1..MAX) OF
InfoTypeAndValue OPTIONAL
}
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType(
'pvno', univ.Integer(
namedValues=namedval.NamedValues(('cmp1999', 1), ('cmp2000', 2))
)
),
namedtype.NamedType('sender', rfc2459.GeneralName()),
namedtype.NamedType('recipient', rfc2459.GeneralName()),
namedtype.OptionalNamedType('messageTime', useful.GeneralizedTime().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('protectionAlg', rfc2459.AlgorithmIdentifier().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))),
namedtype.OptionalNamedType('senderKID', rfc2459.KeyIdentifier().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
namedtype.OptionalNamedType('recipKID', rfc2459.KeyIdentifier().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
namedtype.OptionalNamedType('transactionID', univ.OctetString().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4))),
namedtype.OptionalNamedType('senderNonce', univ.OctetString().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 5))),
namedtype.OptionalNamedType('recipNonce', univ.OctetString().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 6))),
namedtype.OptionalNamedType('freeText', PKIFreeText().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 7))),
namedtype.OptionalNamedType('generalInfo',
univ.SequenceOf(
componentType=InfoTypeAndValue().subtype(
sizeSpec=constraint.ValueSizeConstraint(1, MAX),
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 8)
)
)
)
)
class ProtectedPart(univ.Sequence):
"""
ProtectedPart ::= SEQUENCE {
header PKIHeader,
body PKIBody
}
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('header', PKIHeader()),
namedtype.NamedType('infoValue', PKIBody())
)
class PKIMessage(univ.Sequence):
"""
PKIMessage ::= SEQUENCE {
header PKIHeader,
body PKIBody,
protection [0] PKIProtection OPTIONAL,
extraCerts [1] SEQUENCE SIZE (1..MAX) OF CMPCertificate
OPTIONAL
}"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('header', PKIHeader()),
namedtype.NamedType('body', PKIBody()),
namedtype.OptionalNamedType('protection', PKIProtection().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('extraCerts',
univ.SequenceOf(
componentType=CMPCertificate()
).subtype(
sizeSpec=constraint.ValueSizeConstraint(1, MAX),
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)
)
)
)
class PKIMessages(univ.SequenceOf):
"""
PKIMessages ::= SEQUENCE SIZE (1..MAX) OF PKIMessage
"""
componentType = PKIMessage()
sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, MAX)
# pyasn1 does not naturally handle recursive definitions, thus this hack:
# NestedMessageContent ::= PKIMessages
NestedMessageContent._componentType = PKIMessages()
nestedMessageContent._componentType = PKIMessages()
|
Nebucatnetzer/tamagotchi | refs/heads/master | pygame/lib/python3.4/site-packages/faker/providers/address/sk_SK/__init__.py | 19 | # coding=utf-8
from __future__ import unicode_literals
from .. import Provider as AddressProvider
class Provider(AddressProvider):
city_formats = ('{{city_name}}', )
street_name_formats = ('{{street_name}}', )
street_address_formats = ('{{street_name}} {{building_number}}', )
address_formats = ('{{street_address}}\n{{postcode}} {{city}}', )
building_number_formats = ('####', '###', '##', '#', '#/#')
street_suffixes_long = ('ulica', )
street_suffixes_short = ('ul.', )
postcode_formats = ('### ##', )
cities = (
'Ábelová', 'Abovce', 'Abrahám', 'Abrahámovce', 'Abrahámovce',
'Abramová', 'Abranovce', 'Adidovce', 'Alekšince', 'Andovce',
'Andrejová', 'Ardanovce', 'Ardovo', 'Arnutovce', 'Báb', 'Babie',
'Babín', 'Babiná', 'Babindol', 'Babinec', 'Bacúch', 'Bacúrov', 'Báč',
'Bačka', 'Bačkov', 'Bačkovík', 'Badín', 'Baďan', 'Báhoň', 'Bajany',
'Bajč', 'Bajerov', 'Bajerovce', 'Bajka', 'Bajtava', 'Baka', 'Baláže',
'Baldovce', 'Balog nad Ipľom', 'Baloň', 'Banka', 'Bánov',
'Bánovce nad Bebravou', 'Bánovce nad Ondavou', 'Banská Belá',
'Banská Štiavnica', 'Banská Bystrica', 'Banské', 'Banský Studenec',
'Baňa', 'Bara', 'Barca', 'Bartošovce', 'Bardoňovo',
'Bartošova Lehôtka', 'Bardejov', 'Baška', 'Baškovce', 'Baškovce',
'Bašovce', 'Batizovce', 'Bátorová', 'Bátka', 'Bátorove Kosihy',
'Bátovce', 'Beharovce', 'Beckov', 'Becherov', 'Belá', 'Belá',
'Belá - Dulice', 'Belá nad Cirochou', 'Beladice', 'Belejovce', 'Belín',
'Belina', 'Belince', 'Bellova Ves', 'Beloveža', 'Beluj', 'Beluša',
'Belža', 'Beniakovce', 'Benice', 'Benkovce', 'Beňadiková',
'Beňadikovce', 'Beňadovo', 'Beňatina', 'Beňuš', 'Bernolákovo',
'Bertotovce', 'Beša', 'Beša', 'Bešeňov', 'Bešeňová', 'Betlanovce',
'Betliar', 'Bežovce', 'Bidovce', 'Biel', 'Bielovce', 'Biely Kostol',
'Bijacovce', 'Bílkove Humence', 'Bíňa', 'Bíňovce', 'Biskupice',
'Biskupová', 'Bitarová', 'Blahová', 'Blatná na Ostrove',
'Blatná Polianka', 'Blatné', 'Blatné Remety', 'Blatné Revištia',
'Blatnica', 'Blažice', 'Blažovce', 'Blesovce', 'Blhovce', 'Bobot',
'Bobrov', 'Bobrovček', 'Bobrovec', 'Bobrovník', 'Bočiar', 'Bodíky',
'Bodiná', 'Bodorová', 'Bodovce', 'Bodružal', 'Bodza',
'Bodzianske Lúky', 'Bogliarka', 'Bohdanovce', 'Bohdanovce nad Trnavou',
'Boheľov', 'Bohunice', 'Bohunice', 'Bohúňovo', 'Bojná', 'Bojnice',
'Bojničky', 'Boldog', 'Boleráz', 'Bolešov', 'Boliarov', 'Boľ',
'Boľkovce', 'Borcová', 'Borčany', 'Borčice', 'Borinka', 'Borová',
'Borovce', 'Borský Mikuláš', 'Borský Svätý Jur', 'Borša', 'Bory',
'Bošáca', 'Bošany', 'Bottovo', 'Boťany', 'Bôrka', 'Bracovce', 'Branč',
'Branovo', 'Bratislava', 'Okres Bratislava II', 'Okres Bratislava III',
'Okres Bratislava IV', 'Okres Bratislava V', 'Braväcovo', 'Brdárka',
'Brehov', 'Brehy', 'Brekov', 'Brestov', 'Brestov',
'Brestov nad Laborcom', 'Brestovany', 'Brestovec', 'Brestovec',
'Bretejovce', 'Bretka', 'Breza', 'Brezany', 'Brezina', 'Breziny',
'Breznica', 'Breznička', 'Breznička', 'Brezno', 'Brezolupy', 'Brezov',
'Brezová pod Bradlom', 'Brezovec', 'Brezovica', 'Brezovica',
'Brezovička', 'Brezovka', 'Brežany', 'Brhlovce', 'Brieštie', 'Brodské',
'Brodzany', 'Brunovce', 'Brusnica', 'Brusník', 'Brusno', 'Brutovce',
'Bruty', 'Brvnište', 'Brzotín', 'Buclovany', 'Búč', 'Bučany', 'Budča',
'Budikovany', 'Budimír', 'Budiná', 'Budince', 'Budiš', 'Budkovce',
'Budmerice', 'Buglovce', 'Buková', 'Bukovce', 'Bukovec', 'Bukovec',
'Bukovina', 'Bulhary', 'Bunetice', 'Bunkovce', 'Bušince', 'Bušovce',
'Buzica', 'Buzitka', 'Bystrá', 'Bystrá', 'Bystrany', 'Bystré',
'Bystričany', 'Bystrička', 'Byšta', 'Bytča', 'Bzenica', 'Bzenov',
'Bzince pod Javorinou', 'Bziny', 'Bzovík', 'Bzovská Lehôtka', 'Bžany',
'Cabaj - Čápor', 'Cabov', 'Cakov', 'Cejkov', 'Cernina', 'Cerová',
'Cerovo', 'Cestice', 'Cífer', 'Cigeľ', 'Cigeľka', 'Cigla', 'Cimenná',
'Cinobaňa', 'Čabalovce', 'Čabiny', 'Čabradský Vrbovok', 'Čadca',
'Čachtice', 'Čajkov', 'Čaka', 'Čakajovce', 'Čakanovce', 'Čakanovce',
'Čakany', 'Čaklov', 'Čalovec', 'Čamovce', 'Čaňa', 'Čaradice', 'Čáry',
'Častá', 'Častkov', 'Častkovce', 'Čata', 'Čataj', 'Čavoj', 'Čebovce',
'Čečehov', 'Čečejovce', 'Čechy', 'Čechynce', 'Čekovce', 'Čeláre',
'Čelkova Lehota', 'Čelovce', 'Čelovce', 'Čeľadice', 'Čeľadince',
'Čeľovce', 'Čenkovce', 'Čerenčany', 'Čereňany', 'Čerhov', 'Čerín',
'Čermany', 'Černík', 'Černina', 'Černochov', 'Čertižné',
'Červená Voda', 'Červenica', 'Červenica pri Sabinove', 'Červeník',
'Červený Hrádok', 'Červený Kameň', 'Červený Kláštor', 'Červeňany',
'České Brezovo', 'Čičarovce', 'Čičava', 'Čičmany', 'Číčov', 'Čierna',
'Čierna Lehota', 'Čierna Lehota', 'Čierna nad Tisou', 'Čierna Voda',
'Čierne', 'Čierne Kľačany', 'Čierne nad Topľou', 'Čierne Pole',
'Čierny Balog', 'Čierny Brod', 'Čierny Potok', 'Čifáre',
'Čiližská Radvaň', 'Čimhová', 'Čirč', 'Číž', 'Čižatice', 'Čoltovo',
'Čremošné', 'Čučma', 'Čukalovce', 'Dačov Lom', 'Daletice', 'Danišovce',
'Dargov', 'Davidov', 'Debraď', 'Dedačov', 'Dedina Mládeže', 'Dedinka',
'Dedinky', 'Dechtice', 'Dekýš', 'Demandice', 'Demänovská Dolina',
'Demjata', 'Detrík', 'Detva', 'Detvianska Huta', 'Devičany', 'Devičie',
'Dežerice', 'Diaková', 'Diakovce', 'Diviacka Nová Ves',
'Diviaky nad Nitricou', 'Divín', 'Divina', 'Divinka', 'Dlhá',
'Dlhá nad Kysucou', 'Dlhá nad Oravou', 'Dlhá nad Váhom', 'Dlhá Ves',
'Dlhé Klčovo', 'Dlhé nad Cirochou', 'Dlhé Pole', 'Dlhé Stráže',
'Dlhoňa', 'Dlžín', 'Dobrá', 'Dobrá Niva', 'Dobrá Voda', 'Dobroč',
'Dobrohošť', 'Dobroslava', 'Dobšiná', 'Dohňany', 'Dojč', 'Dolinka',
'Dolná Breznica', 'Dolná Krupá', 'Dolná Lehota', 'Dolná Mariková',
'Dolná Mičiná', 'Dolná Poruba', 'Dolná Seč', 'Dolná Streda',
'Dolná Strehová', 'Dolná Súča', 'Dolná Tižina', 'Dolná Trnávka',
'Dolná Ves', 'Dolná Ždaňa', 'Dolné Dubové', 'Dolné Kočkovce',
'Dolné Lefantovce', 'Dolné Lovčice', 'Dolné Mladonice',
'Dolné Naštice', 'Dolné Obdokovce', 'Dolné Orešany', 'Dolné Otrokovce',
'Dolné Plachtince', 'Dolné Saliby', 'Dolné Semerovce', 'Dolné Srnie',
'Dolné Strháre', 'Dolné Trhovište', 'Dolné Vestenice', 'Dolné Zahorany',
'Dolné Zelenice', 'Dolný Badín', 'Dolný Bar', 'Dolný Harmanec',
'Dolný Hričov', 'Dolný Chotár', 'Dolný Kalník', 'Dolný Kubín',
'Dolný Lieskov', 'Dolný Lopašov', 'Dolný Ohaj', 'Dolný Pial',
'Dolný Štál', 'Dolný Vadičov', 'Doľany', 'Doľany', 'Domadice',
'Domaníky', 'Domaniža', 'Domaňovce', 'Donovaly', 'Drábsko', 'Drahňov',
'Drahovce', 'Dravce', 'Dražice', 'Dražkovce', 'Drážovce', 'Drienčany',
'Drienica', 'Drienov', 'Drienovec', 'Drienovo', 'Drienovská Nová Ves',
'Drietoma', 'Drnava', 'Drňa', 'Družstevná pri Hornáde', 'Drženice',
'Držkovce', 'Dubinné', 'Dubnica nad Váhom', 'Dubnička', 'Dubník',
'Dubno', 'Dubodiel', 'Dubová', 'Dubová', 'Dubovany', 'Dubovce',
'Dubové', 'Dubové', 'Dubovec', 'Dubovica', 'Dúbrava', 'Dúbrava',
'Dúbrava', 'Dúbravica', 'Dúbravka', 'Dúbravy', 'Ducové', 'Dudince',
'Dukovce', 'Dulov', 'Dulova Ves', 'Dulovce', 'Dulovo',
'Dunajská Lužná', 'Dunajov', 'Dunajská Streda', 'Dunajský Klátov',
'Duplín', 'Dvorany nad Nitrou', 'Dvorec', 'Dvorianky', 'Dvorníky',
'Dvorníky - Včeláre', 'Dvory nad Žitavou', 'Ďačov', 'Ďanová',
'Ďapalovce', 'Ďubákovo', 'Ďurčiná', 'Ďurďoš', 'Ďurďošík', 'Ďurďové',
'Ďurkov', 'Ďurková', 'Ďurkovce', 'Egreš', 'Fačkov', 'Falkušovce',
'Farná', 'Fekišovce', 'Figa', 'Fijaš', 'Fiľakovo', 'Fiľakovské Kováče',
'Fintice', 'Folkušová', 'Forbasy', 'Frička', 'Fričkovce', 'Fričovce',
'Fulianka', 'Gabčíkovo', 'Gaboltov', 'Gajary', 'Galanta', 'Galovany',
'Gánovce', 'Gáň', 'Gbelce', 'Gbely', 'Gbeľany', 'Geča', 'Gelnica',
'Gemer', 'Gemerček', 'Gemerská Hôrka', 'Gemerská Panica',
'Gemerská Poloma', 'Gemerská Ves', 'Gemerské Dechtáre',
'Gemerské Michalovce', 'Gemerské Teplice', 'Gemerský Jablonec',
'Gemerský Sad', 'Geraltov', 'Gerlachov', 'Gerlachov', 'Giglovce',
'Giraltovce', 'Girovce', 'Glabušovce', 'Gočaltovo', 'Gočovo',
'Golianovo', 'Gortva', 'Gôtovany', 'Granč - Petrovce',
'Gregorova Vieska', 'Gregorovce', 'Gribov', 'Gruzovce', 'Gyňov',
'Habovka', 'Habura', 'Hačava', 'Háj', 'Háj', 'Hajná Nová Ves',
'Hajnáčka', 'Hájske', 'Hajtovka', 'Haláčovce', 'Halič', 'Haligovce',
'Haluzice', 'Hamuliakovo', 'Handlová', 'Hanigovce', 'Haniska',
'Haniska', 'Hanková', 'Hankovce', 'Hankovce', 'Hanušovce nad Topľou',
'Harakovce', 'Harhaj', 'Harichovce', 'Harmanec', 'Hatalov', 'Hatné',
'Havaj', 'Havka', 'Havranec', 'Hažín', 'Hažín nad Cirochou', 'Hažlín',
'Helcmanovce', 'Heľpa', 'Henckovce', 'Henclová', 'Hencovce',
'Hendrichovce', 'Herľany', 'Hermanovce', 'Hermanovce nad Topľou',
'Hertník', 'Hervartov', 'Hiadeľ', 'Hincovce', 'Hladovka', 'Hlboké',
'Hliník nad Hronom', 'Hlinné', 'Hlivištia', 'Hlohovec', 'Hniezdne',
'Hnilčík', 'Hnilec', 'Hnojné', 'Hnúšťa', 'Hodejov', 'Hodejovec',
'Hodkovce', 'Hodruša - Hámre', 'Hokovce', 'Holčíkovce', 'Holiare',
'Holice', 'Holíč', 'Holiša', 'Holumnica', 'Honce', 'Hontianska Vrbica',
'Hontianske Moravce', 'Hontianske Nemce', 'Hontianske Tesáre',
'Hontianske Trsťany', 'Horná Breznica', 'Horná Kráľová', 'Horná Krupá',
'Horná Lehota', 'Horná Lehota', 'Horná Mariková', 'Horná Mičiná',
'Horná Poruba', 'Horná Potôň', 'Horná Seč', 'Horná Streda',
'Horná Strehová', 'Horná Súča', 'Horná Štubňa', 'Horná Ves',
'Horná Ves', 'Horná Ždaňa', 'Horné Dubové', 'Horné Hámre',
'Horné Chlebany', 'Horné Lefantovce', 'Horné Mladonice', 'Horné Mýto',
'Horné Naštice', 'Horné Obdokovce', 'Horné Orešany', 'Horné Otrokovce',
'Horné Plachtince', 'Horné Pršany', 'Horné Saliby', 'Horné Semerovce',
'Horné Srnie', 'Horné Strháre', 'Horné Štitáre', 'Horné Trhovište',
'Horné Turovce', 'Horné Vestenice', 'Horné Zahorany', 'Horné Zelenice',
'Horný Badín', 'Horný Bar', 'Horný Hričov', 'Horný Kalník',
'Horný Lieskov', 'Horný Pial', 'Horný Tisovník', 'Horný Vadičov',
'Horňa', 'Horňany', 'Horovce', 'Horovce', 'Hoste', 'Hostice', 'Hostie',
'Hostišovce', 'Hostovice', 'Hosťová', 'Hosťovce', 'Hosťovce',
'Hozelec', 'Hôrka', 'Hôrka nad Váhom', 'Hôrky', 'Hrabičov', 'Hrabkov',
'Hrabová Roztoka', 'Hrabovčík', 'Hrabovec', 'Hrabovec nad Laborcom',
'Hrabské', 'Hrabušice', 'Hradisko', 'Hradište', 'Hradište',
'Hradište pod Vrátnom', 'Hrádok', 'Hrachovište', 'Hrachovo',
'Hraničné', 'Hranovnica', 'Hraň', 'Hrašné', 'Hrašovík', 'Hrčeľ',
'Hrhov', 'Hriadky', 'Hričovské Podhradie', 'Hriňová', 'Hrišovce',
'Hrkovce', 'Hrlica', 'Hrnčiarovce nad Parnou', 'Hrnčiarska Ves',
'Hrnčiarske Zalužany', 'Hrochoť', 'Hromoš', 'Hronec', 'Hronovce',
'Hronsek', 'Hronská Breznica', 'Hronská Dúbrava', 'Hronské Kľačany',
'Hronské Kosihy', 'Hronský Beňadik', 'Hrubá Borša', 'Hruboňovo',
'Hrubov', 'Hrubý Šúr', 'Hrušov', 'Hrušov', 'Hrušovany', 'Hrušovo',
'Hruštín', 'Hubice', 'Hubina', 'Hubošovce', 'Hubová', 'Hubovo',
'Hucín', 'Hudcovce', 'Hul', 'Humenné', 'Huncovce', 'Hunkovce',
'Hurbanova Ves', 'Hurbanovo', 'Husák', 'Husiná', 'Hutka', 'Huty',
'Hviezdoslavov', 'Hvozdnica', 'Hybe', 'Hýľov', 'Chanava', 'Chlebnice',
'Chlmec', 'Chľaba', 'Chmeľnica', 'Chmeľov', 'Chmeľová', 'Chmeľovec',
'Chminianska Nová Ves', 'Chminianske Jakubovany', 'Chmiňany', 'Choča',
'Chocholná - Velčice', 'Choňkovce', 'Chorvátsky Grob', 'Chorváty',
'Chotča', 'Chotín', 'Chrabrany', 'Chrámec', 'Chrastince', 'Chrastné',
'Chrasť nad Hornádom', 'Chrenovec - Brusno', 'Chropov', 'Chrťany',
'Chtelnica', 'Chudá Lehota', 'Chvalová', 'Chvojnica', 'Chvojnica',
'Chynorany', 'Chyžné', 'Igram', 'Ihľany', 'Ihráč', 'Ilava', 'Iliašovce',
'Ilija', 'Imeľ', 'Inovce', 'Iňa', 'Iňačovce', 'Ipeľské Predmostie',
'Ipeľské Úľany', 'Ipeľský Sokolec', 'Istebné', 'Ivachnová', 'Ivančiná',
'Ivanice', 'Ivanka pri Dunaji', 'Ivanka pri Nitre', 'Ivanovce', 'Iža',
'Ižipovce', 'Ižkovce', 'Jablonec', 'Jablonica', 'Jablonka', 'Jablonov',
'Jablonov nad Turňou', 'Jablonové', 'Jablonové', 'Jabloň', 'Jabloňovce',
'Jacovce', 'Jahodná', 'Jaklovce', 'Jakovany', 'Jakubany', 'Jakubov',
'Jakubova Voľa', 'Jakubovany', 'Jakubovany', 'Jakušovce', 'Jalová',
'Jalovec', 'Jalovec', 'Jalšové', 'Jalšovík', 'Jamník', 'Jamník',
'Janice', 'Janík', 'Janíky', 'Jankovce', 'Janov', 'Janova Lehota',
'Janovce', 'Jánovce', 'Jánovce', 'Janovík', 'Jarabá', 'Jarabina',
'Jarok', 'Jarovnice', 'Jasenica', 'Jasenie', 'Jasenov', 'Jasenov',
'Jasenová', 'Jasenovce', 'Jasenové', 'Jasenovo', 'Jaslovské Bohunice',
'Jasov', 'Jasová', 'Jastrabá', 'Jastrabie nad Topľou',
'Jastrabie pri Michalovciach', 'Jatov', 'Javorina (vojenský obvod)',
'Jazernica', 'Jedlinka', 'Jedľové Kostoľany', 'Jelenec', 'Jelka',
'Jelšava', 'Jelšovce', 'Jelšovec', 'Jenkovce', 'Jesenské', 'Jesenské',
'Jestice', 'Ješkova Ves', 'Jezersko', 'Jovice', 'Jovsa',
'Jur nad Hronom', 'Jurkova Voľa', 'Jurová', 'Jurské', 'Juskova Voľa',
'Kačanov', 'Kajal', 'Kalameny', 'Kalinkovo', 'Kalinov', 'Kalinovo',
'Kalná nad Hronom', 'Kalná Roztoka', 'Kálnica', 'Kalnište', 'Kalonda',
'Kalša', 'Kaloša', 'Kaluža', 'Kaľamenová', 'Kaľava', 'Kamanová',
'Kamenec pod Vtáčnikom', 'Kamenica', 'Kamenica nad Cirochou',
'Kamenica nad Hronom', 'Kameničany', 'Kameničná', 'Kamenín',
'Kamenná Poruba', 'Kamenná Poruba', 'Kamenné Kosihy', 'Kamenný Most',
'Kameňany', 'Kamienka', 'Kamienka', 'Kanianka', 'Kapišová', 'Kaplna',
'Kapušany', 'Kapušianske Kľačany', 'Karlová', 'Karná', 'Kašov',
'Kátlovce', 'Kátov', 'Kazimír', 'Kecerovce', 'Kecerovský Lipovec',
'Kečkovce', 'Kečovo', 'Kechnec', 'Kendice', 'Kesovce', 'Keť',
'Kežmarok', 'Kiarov', 'Kladzany', 'Klasov', 'Kláštor pod Znievom',
'Klátova Nová Ves', 'Klčov', 'Klenov', 'Klenová', 'Klenovec',
'Kleňany', 'Klieština', 'Klin', 'Klin nad Bodrogom', 'Klížska Nemá',
'Klokoč', 'Klokočov', 'Klokočov', 'Klubina', 'Kluknava', 'Kľačany',
'Kľače', 'Kľačno', 'Kľak', 'Kľúčovec', 'Kľušov', 'Kmeťovo',
'Kobeliarovo', 'Kobylnice', 'Kobyly', 'Koceľovce', 'Kociha',
'Kocurany', 'Kočín - Lančár', 'Kočovce', 'Kochanovce', 'Kochanovce',
'Kojatice', 'Kojšov', 'Kokava nad Rimavicou', 'Kokošovce',
'Kokšov - Bakša', 'Kolačkov', 'Kolačno', 'Koláre', 'Kolárovice',
'Kolárovo', 'Kolbasov', 'Kolbovce', 'Kolibabovce', 'Kolinovce',
'Kolíňany', 'Kolonica', 'Kolta', 'Komárany', 'Komárno', 'Komárov',
'Komárovce', 'Komjatice', 'Komjatná', 'Komoča', 'Koniarovce',
'Konrádovce', 'Konská', 'Konská', 'Koňuš', 'Kopčany', 'Kopernica',
'Koplotovce', 'Koprivnica', 'Kordíky', 'Korejovce', 'Korňa', 'Koromľa',
'Korunková', 'Korytárky', 'Korytné', 'Kosihovce', 'Kosihy nad Ipľom',
'Kosorín', 'Kostolec', 'Kostolište', 'Kostolná pri Dunaji',
'Kostolná Ves', 'Kostolná - Záriečie', 'Kostolné', 'Kostolné Kračany',
'Kostoľany pod Tribečom', 'Koš', 'Košariská', 'Košarovce', 'Košeca',
'Košecké Podhradie', 'Košice', 'Okres Košice II', 'Okres Košice III',
'Okres Košice IV', 'Košická Belá', 'Košická Polianka',
'Košické Oľšany', 'Košický Klečenov', 'Koškovce', 'Košolná', 'Košúty',
'Košťany nad Turcom', 'Kotešová', 'Kotmanová', 'Kotrčiná Lúčka',
'Kováčová', 'Kováčová', 'Kováčovce', 'Koválov', 'Koválovec', 'Kovarce',
'Kozárovce', 'Kozelník', 'Kozí Vrbovok', 'Kožany', 'Kožuchov',
'Kožuchovce', 'Kračúnovce', 'Krahule', 'Krajná Bystrá', 'Krajná Poľana',
'Krajná Porúbka', 'Krajné', 'Krajné Čierno', 'Krakovany', 'Králiky',
'Kráľ', 'Kráľov Brod', 'Kráľova Lehota', 'Kráľová nad Váhom',
'Kráľová pri Senci', 'Kraľovany', 'Kráľovce', 'Kráľovce - Krnišov',
'Kráľovičove Kračany', 'Kráľovský Chlmec', 'Kraskovo', 'Krásna Lúka',
'Krásna Ves', 'Krásno', 'Krásno nad Kysucou', 'Krásnohorská Dlhá Lúka',
'Krásnohorské Podhradie', 'Krásnovce', 'Krásny Brod', 'Krasňany',
'Kravany', 'Kravany', 'Kravany nad Dunajom', 'Krčava', 'Kremná',
'Kremnica', 'Kremnické Bane', 'Kristy', 'Krišľovce',
'Krišovská Liesková', 'Krivá', 'Krivany', 'Kriváň', 'Krivé',
'Krivoklát', 'Krivosúd - Bodovka', 'Kríže', 'Krížová Ves', 'Krížovany',
'Križovany nad Dudváhom', 'Krná', 'Krnča', 'Krokava', 'Krompachy',
'Krpeľany', 'Krškany', 'Krtovce', 'Kručov', 'Krupina', 'Krušetnica',
'Krušinec', 'Krušovce', 'Kružlov', 'Kružlová', 'Kružná', 'Kružno',
'Kšinná', 'Kubáňovo', 'Kučín', 'Kučín', 'Kuchyňa', 'Kuklov', 'Kuková',
'Kukučínov', 'Kunerad', 'Kunešov', 'Kunova Teplica', 'Kuraľany',
'Kurima', 'Kurimany', 'Kurimka', 'Kurov', 'Kusín', 'Kútniky', 'Kúty',
'Kuzmice', 'Kuzmice', 'Kvačany', 'Kvačany', 'Kvakovce', 'Kvašov',
'Kvetoslavov', 'Kyjatice', 'Kyjov', 'Kynceľová', 'Kysak', 'Kyselica',
'Kysta', 'Kysucké Nové Mesto', 'Kysucký Lieskovec', 'Láb', 'Lackov',
'Lacková', 'Lada', 'Ladce', 'Ladice', 'Ladmovce', 'Ladomerská Vieska',
'Ladomirov', 'Ladomirová', 'Ladzany', 'Lakšárska Nová Ves', 'Lascov',
'Laskár', 'Lastomír', 'Lastovce', 'Laškovce', 'Látky', 'Lazany',
'Lazisko', 'Lazy pod Makytou', 'Lažany', 'Lednica', 'Lednické Rovne',
'Legnava', 'Lehnice', 'Lehota', 'Lehota nad Rimavicou',
'Lehota pod Vtáčnikom', 'Lehôtka', 'Lehôtka pod Brehmi', 'Lechnica',
'Lekárovce', 'Leles', 'Leľa', 'Lemešany', 'Lenartov', 'Lenartovce',
'Lendak', 'Lenka', 'Lentvora', 'Leopoldov', 'Lesenice', 'Lesíček',
'Lesné', 'Lesnica', 'Leštiny', 'Lešť (vojenský obvod)', 'Letanovce',
'Letničie', 'Leváre', 'Levice', 'Levkuška', 'Levoča', 'Ležiachov',
'Libichava', 'Licince', 'Ličartovce', 'Liesek', 'Lieskovany',
'Lieskovec', 'Lieskovec', 'Liešno', 'Liešťany', 'Lietava',
'Lietavská Lúčka', 'Lietavská Svinná - Babkov', 'Likavka', 'Limbach',
'Lipany', 'Lipník', 'Lipníky', 'Lipová', 'Lipová', 'Lipovany',
'Lipovce', 'Lipové', 'Lipovec', 'Lipovec', 'Lipovník', 'Lipovník',
'Liptovská Anna', 'Liptovská Kokava', 'Liptovská Lúžna',
'Liptovská Osada', 'Liptovská Porúbka', 'Liptovská Sielnica',
'Liptovská Štiavnica', 'Liptovská Teplá', 'Liptovská Teplička',
'Liptovské Beharovce', 'Liptovské Kľačany', 'Liptovské Matiašovce',
'Liptovské Revúce', 'Liptovské Sliače', 'Liptovský Hrádok',
'Liptovský Ján', 'Liptovský Michal', 'Liptovský Mikuláš',
'Liptovský Ondrej', 'Liptovský Peter', 'Liptovský Trnovec', 'Lisková',
'Lišov', 'Litava', 'Litmanová', 'Livina', 'Livinské Opatovce', 'Livov',
'Livovská Huta', 'Lodno', 'Lok', 'Lokca', 'Lom nad Rimavicou', 'Lomná',
'Lomné', 'Lomnička', 'Lontov', 'Lopašov', 'Lopúchov', 'Lopušné Pažite',
'Lošonec', 'Lovce', 'Lovča', 'Lovčica - Trubín', 'Lovinobaňa',
'Lozorno', 'Ložín', 'Lubeník', 'Lubina', 'Lúč na Ostrove', 'Lučatín',
'Lučenec', 'Lúčina', 'Lučivná', 'Lúčka', 'Lúčka', 'Lúčka', 'Lúčka',
'Lúčky', 'Lúčky', 'Lúčky', 'Lúčnica nad Žitavou', 'Ludanice',
'Ludrová', 'Luhyňa', 'Lúka', 'Lukačovce', 'Lukáčovce', 'Lukavica',
'Lukavica', 'Lukov', 'Lukovištia', 'Lúky', 'Lula', 'Lupoč', 'Lutila',
'Lutiše', 'Lužany', 'Lužany pri Topli', 'Lužianky', 'Lysá pod Makytou',
'Lysica', 'Ľubá', 'Ľubela', 'Ľubica', 'Ľubietová', 'Ľubiša', 'Ľubochňa',
'Ľuboreč', 'Ľuboriečka', 'Ľubotice', 'Ľubotín', 'Ľubovec', 'Ľudovítová',
'Ľutina', 'Ľutov', 'Macov', 'Mad', 'Madunice', 'Magnezitovce',
'Machulince', 'Majcichov', 'Majere', 'Majerovce', 'Makov', 'Makovce',
'Malacky', 'Malachov', 'Malá Čalomija', 'Malá Čausa', 'Malá Čierna',
'Malá Domaša', 'Malá Franková', 'Malá Hradná', 'Malá Ida',
'Malá Lehota', 'Malá Lodina', 'Malá nad Hronom', 'Malá Poľana',
'Malá Tŕňa', 'Málaš', 'Malatiná', 'Malatíny', 'Malcov', 'Malčice',
'Malé Borové', 'Malé Dvorníky', 'Malé Chyndice', 'Malé Hoste',
'Malé Kosihy', 'Malé Kozmálovce', 'Malé Kršteňany', 'Malé Lednice',
'Malé Leváre', 'Malé Ludince', 'Malé Ozorovce', 'Malé Raškovce',
'Malé Ripňany', 'Malé Straciny', 'Malé Trakany', 'Malé Uherce',
'Malé Vozokany', 'Malé Zálužie', 'Malé Zlievce', 'Málinec', 'Malinová',
'Malinovo', 'Malužiná', 'Malý Cetín', 'Malý Čepčín', 'Malý Horeš',
'Malý Kamenec', 'Malý Krtíš', 'Malý Lapáš', 'Malý Lipník',
'Malý Slavkov', 'Malý Slivník', 'Malý Šariš', 'Malženice', 'Mankovce',
'Maňa', 'Marcelová', 'Margecany', 'Marhaň', 'Marianka', 'Markovce',
'Markuška', 'Markušovce', 'Maršová - Rašov', 'Martin',
'Martin nad Žitavou', 'Martinček', 'Martinová', 'Martovce', 'Mašková',
'Maškovce', 'Matejovce nad Hornádom', 'Matiaška', 'Matiašovce',
'Matovce', 'Matúškovo', 'Matysová', 'Maťovské Vojkovce', 'Medovarce',
'Medvedie', 'Medveďov', 'Medzany', 'Medzev', 'Medzianky', 'Medzibrod',
'Medzibrodie nad Oravou', 'Medzilaborce', 'Melčice - Lieskové', 'Melek',
'Meliata', 'Mengusovce', 'Merašice', 'Merník', 'Mestečko', 'Mestisko',
'Mičakovce', 'Mierovo', 'Miezgovce', 'Michajlov', 'Michal na Ostrove',
'Michal nad Žitavou', 'Michalková', 'Michalok', 'Michalová',
'Michalovce', 'Michaľany', 'Miklušovce', 'Miková', 'Mikulášová',
'Mikušovce', 'Mikušovce', 'Milhosť', 'Miloslavov', 'Milpoš', 'Miňovce',
'Mirkovce', 'Miroľa', 'Mládzovo', 'Mlynárovce', 'Mlynčeky', 'Mlynica',
'Mlynky', 'Mníchova Lehota', 'Mníšek nad Hnilcom',
'Mníšek nad Popradom', 'Moča', 'Močenok', 'Močiar', 'Modra',
'Modra nad Cirochou', 'Modrany', 'Modrová', 'Modrovka', 'Modrý Kameň',
'Mojmírovce', 'Mojš', 'Mojtín', 'Mojzesovo', 'Mokrá Lúka', 'Mokrance',
'Mokroluh', 'Mokrý Háj', 'Moldava nad Bodvou', 'Moravany',
'Moravany nad Váhom', 'Moravské Lieskové', 'Moravský Svätý Ján',
'Most pri Bratislave', 'Mostová', 'Moškovec', 'Mošovce', 'Moštenica',
'Mošurov', 'Motešice', 'Motyčky', 'Môlča', 'Mrázovce', 'Mučín',
'Mudroňovo', 'Mudrovce', 'Muľa', 'Muráň', 'Muránska Dlhá Lúka',
'Muránska Huta', 'Muránska Lehota', 'Muránska Zdychava', 'Mútne',
'Mužla', 'Myjava', 'Myslina', 'Mýtna', 'Mýtne Ludany',
'Mýto pod Ďumbierom', 'Nacina Ves', 'Nadlice', 'Naháč', 'Nálepkovo',
'Námestovo', 'Nána', 'Nandraž', 'Necpaly', 'Nedanovce', 'Nedašovce',
'Neded', 'Nededza', 'Nedožery - Brezany', 'Nechválova Polianka',
'Nemce', 'Nemcovce', 'Nemcovce', 'Nemčice', 'Nemčiňany', 'Nemecká',
'Nemečky', 'Nemešany', 'Nemšová', 'Nenince', 'Neporadza', 'Neporadza',
'Nesvady', 'Nesluša', 'Neverice', 'Nevidzany', 'Nevidzany', 'Nevoľné',
'Nezbudská Lúčka', 'Nimnica', 'Nitra', 'Nitra nad Ipľom',
'Nitrianska Blatnica', 'Nitrianska Streda', 'Nitrianske Hrnčiarovce',
'Nitrianske Pravno', 'Nitrianske Rudno', 'Nitrianske Sučany', 'Nitrica',
'Nižná', 'Nižná', 'Nižná Boca', 'Nižná Hutka', 'Nižná Jablonka',
'Nižná Jedľová', 'Nižná Kamenica', 'Nižná Myšľa', 'Nižná Olšava',
'Nižná Pisaná', 'Nižná Polianka', 'Nižná Rybnica', 'Nižná Sitnica',
'Nižná Slaná', 'Nižná Voľa', 'Nižné Ladičkovce', 'Nižné Nemecké',
'Nižné Repaše', 'Nižné Ružbachy', 'Nižný Čaj', 'Nižný Hrabovec',
'Nižný Hrušov', 'Nižný Klátov', 'Nižný Komárnik', 'Nižný Kručov',
'Nižný Lánec', 'Nižný Mirošov', 'Nižný Orlík', 'Nižný Skálnik',
'Nižný Slavkov', 'Nižný Tvarožec', 'Nižný Žipov', 'Nolčovo', 'Norovce',
'Nová Baňa', 'Nová Bašta', 'Nová Bošáca', 'Nová Bystrica',
'Nová Dedina', 'Nová Dedinka', 'Nová Dubnica', 'Nová Kelča',
'Nová Lehota', 'Nová Lesná', 'Nová Ľubovňa', 'Nová Polhora',
'Nová Polianka', 'Nová Sedlica', 'Nová Ves', 'Nová Ves nad Váhom',
'Nová Ves nad Žitavou', 'Nová Vieska', 'Nováčany', 'Nováky', 'Nové Hony',
'Nové Mesto nad Váhom', 'Nové Sady', 'Nové Zámky', 'Novosad', 'Novoť',
'Nový Ruskov', 'Nový Salaš', 'Nový Tekov', 'Nový Život', 'Nýrovce',
'Ňagov', 'Ňárad', 'Obeckov', 'Obišovce', 'Oborín', 'Obručné', 'Obyce',
'Očkov', 'Očová', 'Odorín', 'Ohrady', 'Ohradzany', 'Ochodnica',
'Ochtiná', 'Okoč', 'Okoličná na Ostrove', 'Okrúhle', 'Okružná',
'Olcnava', 'Olejníkov', 'Olešná', 'Olováry', 'Olšovany', 'Oľdza',
'Oľka', 'Oľšavce', 'Oľšavica', 'Oľšavka', 'Oľšavka', 'Oľšinkov',
'Oľšov', 'Omastiná', 'Omšenie', 'Ondavka', 'Ondavské Matiašovce',
'Ondrašovce', 'Ondrašová', 'Ondrejovce', 'Opátka', 'Opatovce',
'Opatovce nad Nitrou', 'Opatovská Nová Ves', 'Opava', 'Opiná', 'Opoj',
'Oponice', 'Oravce', 'Orávka', 'Oravská Jasenica', 'Oravská Lesná',
'Oravská Polhora', 'Oravská Poruba', 'Oravský Biely Potok',
'Oravský Podzámok', 'Ordzovany', 'Orechová', 'Orechová Potôň',
'Oravské Veselé', 'Oreské', 'Oreské', 'Orešany', 'Orlov', 'Orovnica',
'Ortuťová', 'Osádka', 'Osadné', 'Osikov', 'Oslany', 'Osrblie',
'Ostrá Lúka', 'Ostratice', 'Ostrov', 'Ostrov', 'Ostrovany',
'Ostrý Grúň', 'Osturňa', 'Osuské', 'Oščadnica', 'Otrhánky', 'Otročok',
'Ovčiarsko', 'Ovčie', 'Ozdín', 'Ožďany', 'Pača', 'Padáň', 'Padarovce',
'Pakostov', 'Palárikovo', 'Palín', 'Palota', 'Panické Dravce', 'Paňa',
'Paňovce', 'Papín', 'Papradno', 'Parchovany', 'Parihuzovce', 'Párnica',
'Partizánska Ľupča', 'Partizánske', 'Pastovce', 'Pastuchov', 'Pašková',
'Paština Závada', 'Pata', 'Pataš', 'Pavčina Lehota', 'Pavlice',
'Pavlová', 'Pavlova Ves', 'Pavlovce', 'Pavlovce', 'Pavlovce nad Uhom',
'Pavľany', 'Pažiť', 'Pčoliné', 'Pečenice', 'Pečeňady', 'Pečeňany',
'Pečovská Nová Ves', 'Peder', 'Perín - Chym', 'Pernek', 'Petkovce',
'Petrikovce', 'Petrová', 'Petrova Lehota', 'Petrova Ves', 'Petrovany',
'Petrovce', 'Petrovce', 'Petrovce', 'Petrovce nad Laborcom',
'Petrovice', 'Petrovo', 'Pezinok', 'Piešťany', 'Pichne', 'Píla',
'Píla', 'Píla', 'Pinciná', 'Pinkovce', 'Piskorovce', 'Pitelová',
'Plášťovce', 'Plavé Vozokany', 'Plavecké Podhradie', 'Plavecký Mikuláš',
'Plavecký Peter', 'Plavecký Štvrtok', 'Plaveč', 'Plavnica',
'Plechotice', 'Pleš', 'Plešivec', 'Plevník - Drienové', 'Pliešovce',
'Ploské', 'Ploské', 'Pobedim', 'Počarová', 'Počúvadlo', 'Podbiel',
'Podbranč', 'Podbrezová', 'Podhájska', 'Podhorany', 'Podhorany',
'Podhorany', 'Podhorie', 'Podhorie', 'Podhoroď', 'Podhradie',
'Podhradie', 'Podhradie', 'Podhradík', 'Podkonice', 'Podkriváň',
'Podkylava', 'Podlužany', 'Podlužany', 'Podolie', 'Podolínec',
'Podrečany', 'Podskalie', 'Podtureň', 'Podvysoká', 'Podzámčok',
'Pohorelá', 'Pohranice', 'Pohronská Polhora', 'Pohronský Bukovec',
'Pohronský Ruskov', 'Pochabany', 'Pokryváč', 'Poliakovce', 'Polianka',
'Polichno', 'Polina', 'Poloma', 'Polomka', 'Poltár', 'Poluvsie',
'Poľanovce', 'Poľany', 'Poľný Kesov', 'Pongrácovce', 'Poniky',
'Poprad', 'Poproč', 'Poproč', 'Popudinské Močidľany', 'Poráč',
'Poriadie', 'Porostov', 'Poruba', 'Poruba pod Vihorlatom', 'Porúbka',
'Porúbka', 'Porúbka', 'Porúbka', 'Poša', 'Potok', 'Potok', 'Potoky',
'Potôčky', 'Potvorice', 'Považany', 'Považská Bystrica', 'Povina',
'Povoda', 'Povrazník', 'Pozba', 'Pozdišovce', 'Pôtor', 'Praha',
'Prakovce', 'Prašice', 'Prašník', 'Pravenec', 'Pravica', 'Pravotice',
'Práznovce', 'Prečín', 'Predajná', 'Predmier', 'Prenčov', 'Preseľany',
'Prestavlky', 'Prešov', 'Príbelce', 'Pribeník', 'Pribeta', 'Pribiš',
'Príbovce', 'Pribylina', 'Priechod', 'Priekopa', 'Priepasné',
'Prietrž', 'Prietržka', 'Prievaly', 'Prievidza', 'Prihradzany',
'Príkra', 'Príslop', 'Prituľany', 'Proč', 'Prochot', 'Prosačov',
'Prosiek', 'Prša', 'Pruské', 'Prusy', 'Pružina', 'Pstriná', 'Ptičie',
'Ptrukša', 'Pucov', 'Púchov', 'Pukanec', 'Pusté Čemerné', 'Pusté Pole',
'Pusté Sady', 'Pusté Úľany', 'Pušovce', 'Rabča', 'Rabčice', 'Rad',
'Radatice', 'Radava', 'Radimov', 'Radnovce', 'Radobica', 'Radoľa',
'Radoma', 'Radošina', 'Radošovce', 'Radošovce', 'Radôstka',
'Radvanovce', 'Radvaň nad Dunajom', 'Radvaň nad Laborcom', 'Radzovce',
'Rafajovce', 'Rajčany', 'Rajec', 'Rajecká Lesná', 'Rajecké Teplice',
'Rákoš', 'Rákoš', 'Raková', 'Rakovčík', 'Rakovec nad Ondavou',
'Rakovice', 'Rakovnica', 'Rakovo', 'Rakša', 'Rakúsy', 'Rakytník',
'Rankovce', 'Rapovce', 'Raslavice', 'Rastislavice', 'Rašice', 'Ratka',
'Ratková', 'Ratkovce', 'Ratkovo', 'Ratkovská Lehota', 'Ratkovská Suchá',
'Ratkovské Bystré', 'Ratnovce', 'Ratvaj', 'Ráztočno', 'Ráztoka',
'Ražňany', 'Reca', 'Regetovka', 'Rejdová', 'Reľov', 'Remeniny',
'Remetské Hámre', 'Renčišov', 'Repejov', 'Repište', 'Rešica', 'Rešov',
'Revúca', 'Revúcka Lehota', 'Riečka', 'Riečka', 'Richnava', 'Richvald',
'Rimavská Baňa', 'Rimavská Seč', 'Rimavská Sobota', 'Rimavské Brezovo',
'Rimavské Janovce', 'Rimavské Zalužany', 'Rohov', 'Rohovce', 'Rohožník',
'Rohožník', 'Rochovce', 'Rokycany', 'Rokytov', 'Rokytov pri Humennom',
'Rokytovce', 'Rosina', 'Roškovce', 'Roštár', 'Rovensko', 'Rovinka',
'Rovné', 'Rovné', 'Rovné', 'Rovňany', 'Rozhanovce', 'Rozložná',
'Roztoky', 'Rožkovany', 'Rožňava', 'Rožňavské Bystré', 'Rúbaň',
'Rudina', 'Rudinka', 'Rudinská', 'Rudlov', 'Rudná', 'Rudnianska Lehota',
'Rudník', 'Rudník', 'Rudno', 'Rudno nad Hronom', 'Rudňany', 'Rumanová',
'Rumince', 'Runina', 'Ruská', 'Ruská Bystrá', 'Ruská Kajňa',
'Ruská Nová Ves', 'Ruská Poruba', 'Ruská Volová', 'Ruská Voľa',
'Ruská Voľa nad Popradom', 'Ruskov', 'Ruskovce', 'Ruskovce',
'Ruský Hrabovec', 'Ruský Potok', 'Ružiná', 'Ružindol', 'Ružomberok',
'Rybany', 'Rybky', 'Rybník', 'Rybník', 'Rykynčice', 'Sabinov',
'Sačurov', 'Sádočné', 'Sady nad Torysou', 'Salka', 'Santovka', 'Sap',
'Sása', 'Sása', 'Sasinkovo', 'Sazdice', 'Sebedín - Bečov', 'Sebedražie',
'Sebechleby', 'Seč', 'Sečianky', 'Sečovce', 'Sečovská Polianka',
'Sedliacka Dubová', 'Sedliská', 'Sedmerovec', 'Sejkov', 'Sekule',
'Selce', 'Selce', 'Selce', 'Selec', 'Selice', 'Seľany', 'Semerovo',
'Senec', 'Seniakovce', 'Senica', 'Senné', 'Senné', 'Senohrad', 'Seňa',
'Sereď', 'Sielnica', 'Sihelné', 'Sihla', 'Sikenica', 'Sikenička',
'Siladice', 'Silica', 'Silická Brezová', 'Silická Jablonica', 'Sirk',
'Sirník', 'Skačany', 'Skalica', 'Skalité', 'Skalka nad Váhom', 'Skároš',
'Skerešovo', 'Sklabiná', 'Sklabinský Podzámok', 'Sklabiňa', 'Sklené',
'Sklené Teplice', 'Skrabské', 'Skýcov', 'Sládkovičovo', 'Slančík',
'Slanec', 'Slanská Huta', 'Slanské Nové Mesto', 'Slaská', 'Slatina',
'Slatina nad Bebravou', 'Slatinka nad Bebravou', 'Slatinské Lazy',
'Slatvina', 'Slavec', 'Slavkovce', 'Slavnica', 'Slavoška', 'Slavošovce',
'Slepčany', 'Sliač', 'Sliepkovce', 'Slizké', 'Slivník', 'Slopná',
'Slovany', 'Slovenská Kajňa', 'Slovenská Ľupča', 'Slovenská Nová Ves',
'Slovenská Ves', 'Slovenská Volová', 'Slovenské Ďarmoty',
'Slovenské Kľačany', 'Slovenské Krivé', 'Slovenské Nové Mesto',
'Slovenské Pravno', 'Slovenský Grob', 'Slovinky', 'Sľažany', 'Smilno',
'Smižany', 'Smolenice', 'Smolinské', 'Smolnícka Huta', 'Smolník',
'Smrdáky', 'Smrečany', 'Snakov', 'Snežnica', 'Snina', 'Socovce',
'Soblahov', 'Soboš', 'Sobotište', 'Sobrance', 'Sokolce', 'Sokolovce',
'Sokoľ', 'Sokoľany', 'Solčany', 'Solčianky', 'Sološnica', 'Soľ',
'Soľnička', 'Soľník', 'Somotor', 'Sopkovce', 'Spišská Belá',
'Spišská Nová Ves', 'Spišská Stará Ves', 'Spišská Teplica',
'Spišské Bystré', 'Spišské Hanušovce', 'Spišské Podhradie',
'Spišské Tomášovce', 'Spišské Vlachy', 'Spišský Hrhov', 'Spišský Hrušov',
'Spišský Štiavnik', 'Spišský Štvrtok', 'Stakčín', 'Stakčínska Roztoka',
'Stanča', 'Stankovany', 'Stankovce', 'Stará Bašta', 'Stará Bystrica',
'Stará Halič', 'Stará Huta', 'Stará Kremnička', 'Stará Lehota',
'Stará Lesná', 'Stará Ľubovňa', 'Stará Myjava', 'Stará Turá',
'Stará Voda', 'Staré', 'Staré Hory', 'Starina', 'Starý Hrádok',
'Starý Tekov', 'Staškov', 'Staškovce', 'Stebnícka Huta', 'Stebník',
'Stožok', 'Stráne pod Tatrami', 'Stránska', 'Stránske', 'Stráňany',
'Stráňavy', 'Stratená', 'Stráža', 'Strážne', 'Strážske', 'Strečno',
'Streda nad Bodrogom', 'Stredné Plachtince', 'Strekov', 'Strelníky',
'Stretava', 'Stretavka', 'Streženice', 'Strihovce', 'Stročín',
'Stropkov', 'Studená', 'Studenec', 'Studienka', 'Stuľany', 'Stupava',
'Stupné', 'Sučany', 'Sudince', 'Súdovce', 'Suchá Dolina', 'Suchá Hora',
'Suchá nad Parnou', 'Sucháň', 'Suché', 'Suché Brezovo', 'Suchohrad',
'Sukov', 'Sulín', 'Súlovce', 'Súľov - Hradná', 'Sušany', 'Sútor',
'Svätá Mária', 'Svätoplukovo', 'Svätuš', 'Svätuše', 'Svätý Anton',
'Svätý Jur', 'Svätý Kríž', 'Svätý Peter', 'Svederník', 'Sverepec',
'Sveržov', 'Svetlice', 'Svidnička', 'Svidník', 'Svinia', 'Svinica',
'Svinice', 'Svinná', 'Svit', 'Svodín', 'Svrbice', 'Svrčinovec', 'Šahy',
'Šajdíkove Humence', 'Šalgovce', 'Šalgočka', 'Šalov', 'Šaľa', 'Šambron',
'Šamorín', 'Šamudovce', 'Šandal', 'Šarbov', 'Šarišská Poruba',
'Šarišská Trstená', 'Šarišské Bohdanovce', 'Šarišské Čierne',
'Šarišské Dravce', 'Šarišské Jastrabie', 'Šarišské Michaľany',
'Šarišské Sokolovce', 'Šarišský Štiavnik', 'Šarkan', 'Šarovce',
'Šašová', 'Šaštín - Stráže', 'Šávoľ', 'Šelpice', 'Šemetkovce', 'Šemša',
'Šenkvice', 'Šiatorská Bukovinka', 'Šiba', 'Šíd', 'Šimonovce',
'Šindliar', 'Šintava', 'Šípkov', 'Šípkové', 'Širákov', 'Širkovce',
'Široké', 'Šišov', 'Šivetice', 'Šmigovec', 'Šoltýska', 'Šoporňa',
'Špačince', 'Špania Dolina', 'Španie Pole', 'Šrobárová', 'Štefanov',
'Štefanov nad Oravou', 'Štefanová', 'Štefanovce', 'Štefanovce',
'Štefanovičová', 'Štefurov', 'Šterusy', 'Štiavnické Bane',
'Štiavnička', 'Štiavnik', 'Štítnik', 'Štós', 'Štôla', 'Štrba',
'Štrkovec', 'Štúrovo', 'Štvrtok', 'Štvrtok na Ostrove', 'Šuľa',
'Šumiac', 'Šuňava', 'Šurany', 'Šurianky', 'Šurice', 'Šúrovce',
'Šútovo', 'Šútovce', 'Švábovce', 'Švedlár', 'Švošov', 'Tachty',
'Tajná', 'Tajov', 'Tarnov', 'Tatranská Javorina', 'Tašuľa', 'Tehla',
'Tekolďany', 'Tekovská Breznica', 'Tekovské Lužany', 'Tekovské Nemce',
'Tekovský Hrádok', 'Telgárt', 'Telince', 'Temeš', 'Teplička',
'Teplička nad Váhom', 'Tepličky', 'Teplý Vrch', 'Terany', 'Terchová',
'Teriakovce', 'Terňa', 'Tesáre', 'Tesárske Mlyňany', 'Tešedíkovo',
'Tibava', 'Tichý Potok', 'Timoradza', 'Tisinec', 'Tisovec', 'Tlmače',
'Točnica', 'Tokajík', 'Tomášikovo', 'Tomášov', 'Tomášovce',
'Tomášovce', 'Topoľa', 'Topoľčany', 'Topoľčianky', 'Topoľnica',
'Topoľníky', 'Topoľovka', 'Toporec', 'Tornaľa', 'Torysa', 'Torysky',
'Tovarné', 'Tovarnianska Polianka', 'Tovarníky', 'Tôň', 'Trakovice',
'Trávnica', 'Trávnik', 'Trebatice', 'Trebejov', 'Trebeľovce',
'Trebichava', 'Trebišov', 'Trebostovo', 'Trebušovce', 'Trenč',
'Trenčianska Teplá', 'Trenčianska Turná', 'Trenčianske Bohuslavice',
'Trenčianske Jastrabie', 'Trenčianske Mitice', 'Trenčianske Stankovce',
'Trenčianske Teplice', 'Trenčín', 'Trhová Hradská', 'Trhovište',
'Trnava', 'Trnavá Hora', 'Trnava pri Laborci', 'Trnávka', 'Trnávka',
'Trnkov', 'Trnovec', 'Trnovec nad Váhom', 'Trnovo', 'Tročany', 'Trpín',
'Trstená', 'Trstená na Ostrove', 'Trstené', 'Trstené pri Hornáde',
'Trstice', 'Trstín', 'Trsťany', 'Tŕnie', 'Tuhár', 'Tuhrina', 'Tuchyňa',
'Tulčík', 'Tupá', 'Turá', 'Turany', 'Turany nad Ondavou', 'Turcovce',
'Turček', 'Turčianky', 'Turčianska Štiavnička', 'Turčianske Jaseno',
'Turčianske Kľačany', 'Turčianske Teplice', 'Turčiansky Ďur',
'Turčiansky Peter', 'Turčok', 'Turecká', 'Tureň', 'Turie', 'Turík',
'Turnianska Nová Ves', 'Turňa nad Bodvou', 'Turová', 'Turzovka',
'Tušice', 'Tušická Nová Ves', 'Tužina', 'Tvarožná', 'Tvrdomestice',
'Tvrdošín', 'Tvrdošovce', 'Ťapešovo', 'Ubľa', 'Úbrež', 'Udavské',
'Udiča', 'Údol', 'Uhliská', 'Úhorná', 'Uhorská Ves', 'Uhorské',
'Uhrovec', 'Uhrovské Podhradie', 'Ulič', 'Uličské Krivé', 'Uloža',
'Úľany nad Žitavou', 'Unín', 'Uňatín', 'Urmince', 'Utekáč', 'Uzovce',
'Uzovská Panica', 'Uzovské Pekľany', 'Uzovský Šalgov', 'Vaďovce',
'Vagrinec', 'Váhovce', 'Vajkovce', 'Valaliky', 'Valaská',
'Valaská Belá', 'Valaská Dubová', 'Valaškovce (vojenský obvod)',
'Valča', 'Valentovce', 'Valice', 'Valkovce', 'Vaľkovňa', 'Vaniškovce',
'Vápeník', 'Varadka', 'Varechovce', 'Varhaňovce', 'Varín', 'Vasiľov',
'Vavrečka', 'Vavrinec', 'Vavrišovo', 'Važec', 'Vechec', 'Velčice',
'Veličná', 'Velušovce', 'Veľaty', 'Veľká Čausa', 'Veľká Čierna',
'Veľká Dolina', 'Veľká Franková', 'Veľká Hradná', 'Veľká Ida',
'Veľká Lesná', 'Veľká Lodina', 'Veľká Lomnica', 'Veľká Mača',
'Veľká Paka', 'Veľká Tŕňa', 'Veľké Bierovce', 'Veľké Blahovo',
'Veľké Borové', 'Veľké Držkovce', 'Veľké Dvorany', 'Veľké Dvorníky',
'Veľké Hoste', 'Veľké Chlievany', 'Veľké Chyndice', 'Veľké Kapušany',
'Veľké Kosihy', 'Veľké Kostoľany', 'Veľké Kozmálovce', 'Veľké Kršteňany',
'Veľké Leváre', 'Veľké Lovce', 'Veľké Ludince', 'Veľké Orvište',
'Veľké Ozorovce', 'Veľké Raškovce', 'Veľké Revištia', 'Veľké Ripňany',
'Veľké Rovné', 'Veľké Slemence', 'Veľké Trakany', 'Veľké Turovce',
'Veľké Uherce', 'Veľké Úľany', 'Veľké Vozokany', 'Veľké Zálužie',
'Veľkrop', 'Veľký Biel', 'Veľký Cetín', 'Veľký Čepčín', 'Veľký Ďur',
'Veľký Folkmar', 'Veľký Grob', 'Veľký Horeš', 'Veľký Kamenec',
'Veľký Klíž', 'Veľký Krtíš', 'Veľký Kýr', 'Veľký Lapáš', 'Veľký Lipník',
'Veľký Meder', 'Veľký Slavkov', 'Veľký Slivník', 'Veľký Šariš',
'Veľopolie', 'Vernár', 'Veselé', 'Veterná Poruba', 'Vieska', 'Vieska',
'Vieska nad Žitavou', 'Vikartovce', 'Vinica', 'Viničky', 'Viničné',
'Vinné', 'Vinodol', 'Vinohrady nad Váhom', 'Vinosady', 'Virt',
'Vislanka', 'Vislava', 'Visolaje', 'Višňov', 'Višňové', 'Višňové',
'Vištuk', 'Vitanová', 'Vítkovce', 'Víťaz', 'Víťazovce', 'Vlača',
'Vladiča', 'Vlachovo', 'Vlachy', 'Vlčany', 'Vlčkovce', 'Vlkas',
'Vlková', 'Vlkovce', 'Vlky', 'Voderady', 'Vojany', 'Vojčice', 'Vojka',
'Vojka nad Dunajom', 'Vojkovce', 'Vojnatina', 'Vojňany', 'Vojtovce',
'Volica', 'Volkovce', 'Voľa', 'Vozokany', 'Vozokany', 'Vráble',
'Vrádište', 'Vrakúň', 'Vranov nad Topľou', 'Vrbnica', 'Vrbov',
'Vrbovce', 'Vrbová nad Váhom', 'Vrbové', 'Vrchteplá', 'Vrícko',
'Vršatské Podhradie', 'Vrútky', 'Vtáčkovce', 'Výborná',
'Výčapy - Opatovce', 'Vydrany', 'Vydrná', 'Vydrník', 'Východná',
'Výrava', 'Vysočany', 'Vysoká', 'Vysoká', 'Vysoká nad Kysucou',
'Vysoká nad Uhom', 'Vysoká pri Morave', 'Vysoké Tatry', 'Vyškovce',
'Vyškovce nad Ipľom', 'Vyšná Boca', 'Vyšná Hutka', 'Vyšná Jablonka',
'Vyšná Jedľová', 'Vyšná Kamenica', 'Vyšná Myšľa', 'Vyšná Olšava',
'Vyšná Pisaná', 'Vyšná Polianka', 'Vyšná Rybnica', 'Vyšná Sitnica',
'Vyšná Slaná', 'Vyšná Šebastová', 'Vyšná Voľa', 'Vyšné Ladičkovce',
'Vyšné nad Hronom', 'Vyšné Nemecké', 'Vyšné Remety', 'Vyšné Repaše',
'Vyšné Ružbachy', 'Vyšný Čaj', 'Vyšný Hrabovec', 'Vyšný Hrušov',
'Vyšný Kazimír', 'Vyšný Klátov', 'Vyšný Komárnik', 'Vyšný Kručov',
'Vyšný Kubín', 'Vyšný Mirošov', 'Vyšný Orlík', 'Vyšný Slavkov',
'Vyšný Tvarožec', 'Vyšný Žipov', 'Zábiedovo', 'Záborie', 'Záborské',
'Zádiel', 'Záhor', 'Záhorie (vojenský obvod)', 'Záhorská Ves',
'Záhradné', 'Zákamenné', 'Zákopčie', 'Zalaba', 'Zálesie', 'Zálesie',
'Zalužice', 'Zamarovce', 'Zámutov', 'Záriečie', 'Záskalie', 'Zatín',
'Závada', 'Závada', 'Závadka', 'Závadka', 'Závadka', 'Zavar',
'Závažná Poruba', 'Závod', 'Zázrivá', 'Zbehňov', 'Zbehy', 'Zboj',
'Zbojné', 'Zborov', 'Zborov nad Bystricou', 'Zbrojníky',
'Zbudská Belá', 'Zbudské Dlhé', 'Zbudza', 'Zbyňov', 'Zeleneč',
'Zemianska Olča', 'Zemianske Kostoľany', 'Zemianske Podhradie',
'Zemianske Sady', 'Zemné', 'Zemplín', 'Zemplínska Nová Ves',
'Zemplínska Široká', 'Zemplínska Teplica', 'Zemplínske Hámre',
'Zemplínske Hradište', 'Zemplínske Jastrabie', 'Zemplínske Kopčany',
'Zemplínsky Branč', 'Zlatá Baňa', 'Zlatá Idka', 'Zlaté', 'Zlaté Klasy',
'Zlaté Moravce', 'Zlatná na Ostrove', 'Zlatník', 'Zlatníky', 'Zlatno',
'Zlatno', 'Zliechov', 'Zohor', 'Zubák', 'Zuberec', 'Zubné',
'Zubrohlava', 'Zvolen', 'Zvončín', 'Žabokreky', 'Žabokreky nad Nitrou',
'Žakarovce', 'Žakovce', 'Žalobín', 'Žarnov', 'Žarnovica', 'Žaškov',
'Žbince', 'Ždaňa', 'Ždiar', 'Žehňa', 'Žehra', 'Železník', 'Želiezovce',
'Želmanovce', 'Žemberovce', 'Žemliare', 'Žiar', 'Žiar',
'Žiar nad Hronom', 'Žihárec', 'Žikava', 'Žilina', 'Žipov', 'Žirany',
'Žitavany', 'Žitavce', 'Žitná - Radiša', 'Žlkovce', 'Župčany', )
streets = (
'Adámiho', 'Agátová', 'Ahoj', 'Albánska', 'Albrechtova', 'Alejová',
'Alešova', 'Alstrova', 'Alžbetínska', 'Alžbety Gwerkovej',
'Amarelková', 'Ambroseho', 'Ambrova', 'Ambrušova', 'Americká',
'Americké námestie', 'Americké námestie', 'Amurská', 'Andreja Mráza',
'Andreja Plávku', 'Andrusovova', 'Anenská', 'Anenská', 'Anízová',
'Antická', 'Antolská', 'Arménska', 'Astronomická', 'Astrová',
'Avarská', 'Azalková', 'Azovská', 'Babuškova', 'Bagarova', 'Báger',
'Bahniatková', 'Bachova', 'Bajkalská', 'Bajkalská', 'Bajkalská',
'Bajkalská', 'Bajkalská', 'Bajkalská', 'Bajzova', 'Bakošova',
'Balkánska', 'Baltská', 'Bancíkovej', 'Banícka', 'Baničova',
'Baníkova', 'Banskobystrická', 'Banšelova', 'Bardejovská', 'Bárdošova',
'Barónka', 'Bartókova', 'Bartoňova', 'Bartoškova', 'Baštová',
'Batkova', 'Bazalková', 'Bazová', 'Bazovského', 'Bažantia',
'Beblavého', 'Bebravská', 'Beckovská', 'Bedľová', 'Begóniová',
'Belániková', 'Belehradská', 'Belianska', 'Belinského', 'Bellova',
'Belopotockého', 'Beňadická', 'Bencúrova', 'Benediktiho', 'Beniakova',
'Beňovského', 'Bernolákova', 'Beskydská', 'Betliarska', 'Bezekova',
'Bezručova', 'Biela', 'Bielkova', 'Bieloruská', 'Bilíkova',
'Biskupická', 'Björnsonova', 'Blagoevova', 'Blatnická', 'Blatúchová',
'Bleduľová', 'Blumentálska', 'Blyskáčová', 'Bočná', 'Bodliaková',
'Bodrocká', 'Bodvianska', 'Bohrova', 'Bohúňova', 'Bojnická',
'Boragová', 'Borekova', 'Borievková', 'Borinská', 'Borodáčova',
'Borovicová', 'Borská', 'Bosákova', 'Boskovičova', 'Bošániho',
'Botanická', 'Bottova', 'Boženy Němcovej', 'Bôrik', 'Bradáčova',
'Bradlianska', 'Brančská', 'Bratislava-Vinohrady', 'Bratislavská',
'Bratská', 'Brečtanová', 'Brestová', 'Brezová', 'Brezovská', 'Brežná',
'Bridlicová', 'Briežky', 'Brigádnická', 'Brižitská', 'Brnianska',
'Brodná', 'Brodská', 'Brokolicová', 'Bronzová', 'Broskyňová',
'Bršlenová', 'Brumovická', 'Brusnicová', 'Břeclavská', 'Bučinová',
'Budatínska', 'Budatínska', 'Budatínska', 'Búdkova cesta',
'Budovateľská', 'Budyšínska', 'Budyšínska', 'Bujnáková', 'Buková',
'Bukovinská', 'Bukureštská', 'Bulharská', 'Bulíkova', 'Bullova',
'Burgundská', 'Buzalkova', 'Bystrého', 'Bystrická', 'BzovIcka',
'Cabanova', 'Cablkova', 'Cádrova', 'Cesta mládeže', 'Cesta mládeže',
'Cesta na Červený most', 'Cesta na Červený most', 'Cesta na Kamzík',
'Cesta na Klanec', 'Cesta na Senec', 'Cígeľská', 'Cikkerova',
'Cintorínska', 'Cintulova', 'Colnícka', 'Cukrová', 'Cyklámenová',
'Cyprichova', 'Cyprichova', 'Cyrilova', 'Čachtická', 'Čajakova',
'Čajakova', 'Čajkovského', 'Čakanková', 'Čaklovská', 'Čalovská',
'Čapajevova', 'Čapkova', 'Čárskeho', 'Čavojského', 'Čečinová',
'Čelakovského', 'Čerešňová', 'Černicová', 'Černockého', 'Černockého',
'Černyševského', 'Červená', 'Červeňákova', 'Červeňova', 'Česká',
'Československých par', 'Československých tan', 'Čiernohorská',
'Čiernovodská', 'Čierny chodník', 'Čiližská', 'Čipkárska', 'Čmelíkova',
'Čmeľovec', 'Čremchová', 'Čučoriedková', 'Čulenova',
'Daliborovo námestie', 'Damborského', 'Dankovského', 'Dargovská',
'Ďatelinová', 'Daxnerovo námestie', 'Delená', 'Delená cesta',
'Demänovská', 'Desiata', 'Detvianska', 'Devätinová', 'Deviata',
'Devínska cesta', 'Devínska cesta - kam', 'Devínske jazero', 'Dlhá',
'Dlhé diely I.', 'Dlhé diely II.', 'Dlhé diely III.', 'Dneperská',
'Dobrovičova', 'Dobrovičova', 'Dobrovského', 'Dobšinského',
'Dohnalova', 'Dohnányho', 'Doležalova', 'Dolná', 'Dolné Koruny',
'Dolnokorunská', 'Dolnozemská cesta', 'Domašská', 'Domkárska',
'Domové role', 'Donnerova', 'Donovalova', 'Donská', 'Dopravná',
'Dorastenecká', 'Dostojevského rad', 'Dr. Vladimíra Clemen',
'Dražická', 'Drevená', 'Drieňová', 'Drieňová', 'Drieňová', 'Drobného',
'Drotárska cesta', 'Drotárska cesta', 'Drotárska cesta', 'Druhá',
'Druidská', 'Družicová', 'Družobná', 'Družstevná', 'Dubnická',
'Dubová', 'Dúbravčická', 'Dúbravská cesta', 'Dudova', 'Dudvážska',
'Dulovo námestie', 'Dulovo námestie', 'Ďumbierska', 'Dunajská',
'Ďurgalova', 'Dvanásta', 'Dvojkrížna', 'Dvojkrížna',
'Dvořákovo nábrežie', 'Edisonova', 'Egrešová', 'Einsteinova',
'Eisnerova', 'Elektrárenská', 'Estónska', 'Estónska', 'Exnárova',
'F. Kostku', 'Fadruszova', 'Fajnorovo nábrežie', 'Fándlyho', 'Farebná',
'Farská', 'Farského', 'Fazuľová', 'Fedákova', 'Fedinova',
'Ferienčíkova', 'Fialkové údolie', 'Fibichova', 'Fikusová',
'Filiálne nádražie', 'Fláviovská', 'Flöglova', 'Floriánske námestie',
'Fraňa Kráľa', 'Francisciho', 'Francúzskych partizá', 'Frankovská',
'Františkánska', 'Františkánske námest', 'Františka Schmuckera',
'Furdekova', 'Furdekova', 'Furmanská', 'Furmintská', 'Gabčíkova',
'Gagarinova', 'Gagarinova', 'Gagarinova', 'Gajarská', 'Gajc', 'Gajova',
'Galaktická', 'Galandova', 'Galbavého', 'Gallayova', 'Gallova',
'Galvaniho', 'Gašparíkova', 'Gaštanová', 'Gavlovičova', 'Gbelská',
'Gelnická', 'Gemerská', 'Geologická', 'Georgínová', 'Gercenova',
'Gerulatská', 'Gessayova', 'Gettingová', 'Glavica', 'Godrova',
'Gogoľova', 'Goláňova', 'Gondova', 'Goralská', 'Gorazdova', 'Gorkého',
'Gregorovej', 'Gronárska', 'Grösslingova', 'Gruzínska', 'Gunduličova',
'Guothova', 'Gusevova', 'Haanova', 'Haburská', 'Hadia cesta',
'Hadriánová', 'Hagarova', 'Hagarova', 'Hájová', 'Halašova', 'Hálkova',
'Hálova', 'Hamuliakova', 'Hanácka', 'Handlovská', 'Hanulova',
'Hanulova', 'Hany Meličkovej', 'Hargašova', 'Harmanecká', 'Harmincova',
'Hasičská', 'Hattalova', 'Havelkova', 'Havlíčkova', 'Havrania',
'Haydnova', 'Hečkova', 'Herlianska', 'Herlianska', 'Heydukova',
'Heyrovského', 'Hlaváčikova', 'Hlavatého', 'Hlavná', 'Hlavné námestie',
'Hlbinná', 'Hlboká cesta', 'Hlboká cesta', 'Hlinická', 'Hlinická',
'Hlivová', 'Hlohová', 'Hlučínska', 'Hnilecká', 'Hodálova',
'Hodonínska', 'Hodonínska', 'Hodonínska', 'Hodžovo námestie',
'Holekova', 'Holíčska', 'Hollého', 'Holubyho', 'Homolova',
'Hontianska', 'Horárska', 'Horcová', 'Horčičná', 'Horná',
'Horná Vančurová', 'Hornádska', 'Horné Židiny', 'Horská', 'Horská',
'Horská', 'Hospodárska', 'Hrabový chodník', 'Hrad', 'Hradištná',
'Hradná', 'Hradné údolie', 'Hradská', 'Hrachová', 'Hraničiarska',
'Hraničná', 'Hraničný priechod-Ču', 'Hrdličkova', 'Hrebendova',
'Hríbová', 'Hriňovská', 'Hrobákova', 'Hrobárska', 'Hroboňova',
'Hronska', 'Hroznová', 'Hrušková', 'Hrušovská', 'Hubeného', 'Hubeného',
'Hudecova', 'Humenské námestie', 'Hummelova', 'Hurbanovo námestie',
'Hurbanovo námestie', 'Husova', 'Húščavova', 'Hutnícka', 'Hviezdna',
'Hviezdicová', 'Hviezdoslavova', 'Hviezdoslavovo námes', 'Hyacintová',
'Hybešova', 'Hydinárska', 'Hýrošova', 'Chalupkova', 'Charkovská',
'Chemická', 'Chladná', 'Chlumeckého', 'Chmeľová', 'Chorvátska',
'Chorvátska', 'Chotárna', 'Chrasťová', 'Chrenová', 'Chrobákova',
'Ihličnatá', 'Ihrisková', 'Iľjušinova', 'Ilkovičova', 'Ílová',
'Ilýrska', 'Imelová', 'Inovecká', 'Inovecká', 'Ipeľská', 'Irisová',
'Irkutská', 'Iršajská', 'Iskerníková', 'Istrijská', 'Ivana Blazeviča',
'Ivana Bukovčana', 'Ivana Horvátha', 'Ivánska cesta', 'J.C.Hronského',
'Jabloňová', 'Jačmenná', 'Jadranská', 'Jadrová', 'Jahodová',
'Jakabova', 'Jakubíkova', 'Jakubovo námestie', 'Jakubská', 'Jalovcová',
'Jamnického', 'Jána Jonáša', 'Jána Poničana', 'Jána Raka',
'Jána Smreka', 'Jána Stanislava', 'Janáčkova', 'Jančova',
'Janíkove role', 'Janka Kráľa', 'Jankolova', 'Jánošíkova', 'Jánoškova',
'Janotova', 'Janšákova', 'Jantárová', 'Jantárová', 'Jantárová cesta',
'Jarabinková', 'Jarná', 'Jaroslavova', 'Jarošova', 'Jasencová',
'Jaseňová', 'Jaskový rad', 'Jasná', 'Jasovská', 'Jastrabia', 'Jašíkova',
'Javorinská', 'Javorová', 'Jazdecká', 'Jazerná', 'Jazmínová',
'Jedenásta', 'Jedlíkova', 'Jedľová', 'Jégého', 'Jegeneš', 'Jelačičova',
'Jelenia', 'Jelšová', 'Jeséniova', 'Jesenná', 'Jesenského',
'Jesienková', 'Jiráskova', 'Jiskrova', 'Jókaiho', 'Jozefa Mikisitsa',
'Jozefa Vachovského', 'Jozefská', 'Júlová', 'Junácka', 'Jungmannova',
'Júnová', 'Jurigovo námestie', 'Jurkovičova', 'Jurovského', 'Jurská',
'Justičná', 'K horárskej studni', 'K lomu', 'K pasienkom',
'K Železnej studienke', 'Kadnárova', 'Kadnárova', 'Kadnárova',
'Kadnárova', 'Kadnárova', 'Kafendova', 'Kalinčiakova', 'Kalinová',
'Kalištná', 'Kaméliová', 'Kamenárska', 'Kamenné námestie', 'Kamilková',
'Kamilková', 'Kamzík', 'Kapicova', 'Kapitulská', 'Kapitulský dvor',
'Kaplinská', 'Kapucínska', 'Kapušianska', 'Karadžičova', 'Karadžičova',
'Karadžičova', 'Karadžičova', 'Karloveská', 'Karloveské rameno',
'Karpatská', 'Karpatské námestie', 'Kašmírska', 'Kaštielska',
'Kataríny Brúderovej', 'Kaukazská', 'Kazanská', 'Kazanská', 'Kazanská',
'Keltská', 'Kempelenova', 'Ketelec', 'Kežmarské námestie',
'Kladnianska', 'Klariská', 'Klásková', 'Kláštorská', 'Klatovská',
'Klatovská', 'Klemensova', 'Klenová', 'Klimkovičova', 'Klincová',
'Klobučnícka', 'Klokočova', 'Kľukatá', 'Kĺzavá', 'Kmeťovo námestie',
'Knižková dolina', 'Koceľova', 'Kočánkova', 'Kohútova', 'Koľajná',
'Kolárska', 'Kolískova', 'Kollárova', 'Kollárovo námestie',
'Kollárovo námestie', 'Kolmá', 'Komárňanská', 'Komárnická',
'Komárnická', 'Komárovská', 'Komenského námestie', 'Kominárska',
'Komonicová', 'Koncová', 'Koniarkova', 'Konopná', 'Konvalinková',
'Konventná', 'Kopanice', 'Kopčianska', 'Koperníkova', 'Koprivnická',
'Koprivnická', 'Koprivnická', 'Korabinského', 'Kórejská', 'Koreničova',
'Koreňová', 'Korunská', 'Korytnická', 'Kosatcová', 'Kosodrevinová',
'Kostlivého', 'Kostolná', 'Košická', 'Košická', 'Košická', 'Kovácsova',
'Kováčska', 'Kovorobotnícka', 'Kovová', 'Kozia', 'Koziarka',
'Kozičova', 'Kozmonautická', 'Kožušnícka', 'Kôprová', 'Kôstková',
'Krahulčia', 'Krajinská', 'Krajinská cesta', 'Krajná', 'Krakovská',
'Kráľovské údolie', 'Krasinského', 'Kraskova', 'Krásna',
'Krásnohorská', 'Krasovského', 'Kratiny', 'Krátka', 'Krčméryho',
'Kremeľská', 'Kremencová', 'Kremnická', 'Kresánkova', 'Kríková',
'Krivá', 'Križkova', 'Krížna', 'Krížna', 'Krížna', 'Krížna',
'Krmanova', 'Krokusová', 'Krompašská', 'Krupinská', 'Kubačova',
'Kubániho', 'Kubínska', 'Kudlákova', 'Kuklovská', 'Kúkoľová',
'Kukučínova', 'Kukuričná', 'Kulíškova', 'Kultúrna', 'Kuneradská',
'Kupeckého', 'Kúpeľná', 'Kurucova', 'Kutlíkova', 'Kútska',
'Kutuzovova', 'Kuzmányho', 'Kvačalova', 'Kvetinárska', 'Kvetná',
'Kýčerského', 'Kyjevská', 'Kysucká', 'Laborecká', 'Lackova',
'Ladislava Batthyányh', 'Ladislava Dérera', 'Ladislava Sáru', 'Ľadová',
'Ladzianskeho', 'Lachova', 'Ľaliová', 'Lamačská cesta',
'Lamačská cesta', 'Lamačská cesta', 'Lamanského', 'Landauova',
'Landererova', 'Langsfeldova', 'Ľanová', 'Laskomerského', 'Laténská',
'Latorická', 'Laučekova', 'Laurinská', 'Lazaretská', 'Lazaretská',
'Leánska', 'Lediny', 'Legerského', 'Legionárska', 'Legionárska',
'Lehotského', 'Lehotského', 'Leknová', 'Lenardova', 'Lermontovova',
'Lesná', 'Lesnícka', 'Leškova', 'Letecká', 'Letisko M.R.Štefánik',
'Letná', 'Levanduľová', 'Levárska', 'Levická', 'Levočská', 'Lidická',
'Lieskovec', 'Lieskovcová', 'Lieskovská cesta', 'Lietavská',
'Lichardova', 'Likavská', 'Limbová', 'Linzbothova', 'Lipnicová',
'Lipová', 'Lipského', 'Liptovská', 'Lisovňa', 'Listová', 'Líščie nivy',
'Líščie údolie', 'Litovská', 'Lodná', 'Lombardiniho', 'Lomnická',
'Lomonosovova', 'Longobardská', 'Lónyaiová', 'Lopenícka', 'Lotyšská',
'Lovinského', 'Lozornianská', 'Ľubietovská', 'Ľubinská', 'Ľubľanská',
'Ľubochnianska', 'Ľubovnianska', 'Ľubovníková', 'Ľudové námestie',
'Ľudovíta Fullu', 'Luhačovická', 'Lužická', 'Lúčna', 'Lužná',
'Lýcejná', 'Lykovcová', 'Lysákova', 'M. Hella', 'Madáchova', 'Maďarská',
'Magnetová', 'Magnezitová', 'Magnóliová', 'Magurská', 'Macharova',
'Máchova', 'Majakovského', 'Majerníkova', 'Majerská', 'Májkova',
'Majoránová', 'Májová', 'Maková', 'Makovického', 'Malá', 'Malagová',
'Malé pálenisko', 'Malinová', 'Malodunajská', 'Malokarpatské námest',
'Malý Draždiak', 'Malý trh', 'Mamateyova', 'Mamateyova', 'Mandľová',
'Mandľovníková', 'Mánesovo námestie', 'Margarétková', 'Marhuľová',
'Mariánska', 'Marie Curie-Sklodows', 'Márie Medveďovej', 'Markova',
'Marótyho', 'Martákovej', 'Martinčekova', 'Martinčekova',
'Martinengova', 'Martinská', 'Mateja Bela', 'Matejkova', 'Matičná',
'Mätová', 'Matúškova', 'Matúšova', 'Mečíkova', 'Medená', 'Medová',
'Medovková', 'Medzierka', 'Medzilaborecká', 'Mesačná', 'Mestská',
'Meteorová', 'Metodova', 'Mickiewiczova', 'Mierová', 'Michalská',
'Mikovíniho', 'Mikulášska', 'Milana Marečka', 'Milana Pišúta',
'Miletičova', 'Miletičova', 'Mišíkova', 'Mišíkova', 'Mišíkova',
'Mládežnícka', 'Mliekárenská', 'Mlynarovičova', 'Mlynská',
'Mlynská dolina', 'Mlynská dolina', 'Mlynská dolina', 'Mlynské luhy',
'Mlynské nivy', 'Mlynské nivy', 'Mlynské nivy', 'Mlynské nivy',
'Mlynské nivy', 'Modranská', 'Modricová', 'Modrý chodník', 'Mojmírova',
'Mokráň záhon', 'Mokrohájska cesta', 'Moldavská', 'Molecova',
'Monardová', 'Morava', 'Moravská', 'Morušova', 'Moskovská', 'Most SNP',
'Mostná', 'Mostová', 'Mošovského', 'Motýlia', 'Moyšova', 'Moyzesova',
'Mozartova', 'Mramorová', 'Mraziarenská', 'Mrázova', 'Mudrochova',
'Mudroňova', 'Mudroňova', 'Mudroňova', 'Muchovo námestie', 'Muránska',
'Murgašova', 'Murnice', 'Muškátová', 'Muštová', 'Múzejná', 'Myjavská',
'Mýtna', 'Mýtna', 'Na Baránku', 'Na barine', 'Na Brezinách',
'Na doline', 'Na grbe', 'Na Grunte', 'Na Holom vrchu', 'Na hrádzi',
'Na Hrebienku', 'Na hriadkach', 'Na Kalvárii', 'Na kaštieli',
'Na kopci', 'Na križovatkách', 'Na lánoch', 'Na medzi', 'Na mýte',
'Na pántoch', 'Na pasekách', 'Na paši', 'Na pažiti', 'Na piesku',
'Na Revíne', 'Na Riviére', 'Na rozhliadke', 'Na Sitine', 'Na skale',
'Na Slanci', 'Na Slavíne', 'Na spojke', 'Na stráni', 'Na Štyridsiatku',
'Na úvrati', 'Na varte', 'Na Vlkovkách', 'Na vrátkach', 'Na vŕšku',
'Na vyhliadke', 'Na výslní', 'Na Zlatej nohe', 'Nábělkova',
'Nábrežie arm. gen. L', 'Nábrežná', 'Nad Dunajom', 'Nad Gronárom',
'Nad jazierkom', 'Nad kúriou', 'Nad lomom', 'Nad lúčkami',
'Nad lúčkami', 'Nad ostrovom', 'Nad Sihoťou', 'Nákovná', 'Nákupná',
'Námestie 1. mája', 'Námestie 6. apríla', 'Námestie Alexandra D',
'Námestie Andreja Hli', 'Námestie Biely kríž', 'Námestie Hraničiarov',
'Námestie Jána Kostru', 'Námestie Jána Pavla', 'Námestie Ľudovíta Št',
'Námestie Martina Ben', 'Námestie Rodiny', 'Námestie slobody',
'Námestie slobody', 'Námestie SNP', 'Námestie SNP',
'Námestie sv. Františ', 'Námestie sv. Petra a', 'Narcisová',
'Nedbalova', 'Nechtíková', 'Nejedlého', 'Nekrasovova', 'Nemčíkova',
'Nerudova', 'Nevädzová', 'Nevská', 'Nezábudková', 'Nezvalova',
'Niťová', 'Nitrianska', 'Nížinná', 'Nobelova', 'Nobelovo námestie',
'Nová', 'Nová Bellova', 'Nová hora', 'Novackého', 'Nové pálenisko',
'Nové záhrady I', 'Nové záhrady II', 'Nové záhrady III',
'Nové záhrady IV', 'Nové záhrady V', 'Nové záhrady VI',
'Nové záhrady VII', 'Novinárska', 'Novobanská', 'Novodvorská',
'Novohorská', 'Novohradská', 'Novosadná', 'Novosvetská', 'Novosvetská',
'Novosvetská', 'Novoveská', 'Nový záhon', 'Obežná', 'Obchodná',
'Oblačná', 'Oblúková', 'Očovská', 'Odbojárov', 'Odborárska',
'Odborárske námestie', 'Odborárske námestie', 'Odeská', 'Ohnicová',
'Okánikova', 'Okružná', 'Olbrachtova', 'Oleandrová', 'Olejkárska',
'Olivová', 'Olšová', 'Ondavská', 'Ondrejovova', 'Ondrejská', 'Opavská',
'Opletalova', 'Oráčska', 'Oravská', 'Orechová', 'Orechová cesta',
'Orechový rad', 'Orenburská', 'Orgovánová', 'Orchideová', 'Oriešková',
'Ormisova', 'Osadná', 'Osiková', 'Oskorušová', 'Osloboditeľská',
'Ostravská', 'Ostredková', 'Ostružinová', 'Osuského', 'Osvetová',
'Otonelská', 'Ovčiarska', 'Ovocná', 'Ovručská', 'Ovsená',
'Ovsištské námestie', 'Ožvoldíkova', 'Ôsma', 'Pajštúnska', 'Palackého',
'Palárikova', 'Palárikova', 'Palinová', 'Palisády', 'Palisády',
'Palisády', 'Palkovičova', 'Palmová', 'Panenská', 'Pankúchova',
'Panónska cesta', 'Panská', 'Papánkovo námestie', 'Papraďová',
'Parcelná', 'Páričkova', 'Parková', 'Partizánska', 'Pasienková',
'Pasienky', 'Pastierska', 'Paulínyho', 'Pave Vukoviča', 'Pavla Blaha',
'Pavla Horova', 'Pavlovičova', 'Pavlovova', 'Pavlovská', 'Pažického',
'Pažítková', 'Pečnianska', 'Pekná cesta', 'Pekná cesta', 'Pekná cesta',
'Pekná vyhliadka', 'Pekníkova', 'Pernecká', 'Perličková',
'Pestovateľská', 'Petara Pasicha', 'Peterská', 'Petöfiho',
'Petržalská', 'Petúniová', 'Pezinská', 'Piata', 'Pieskovcová',
'Piesočná', 'Piešťanská', 'Pifflova', 'Pilárikova', 'Pílová',
'Píniová', 'Pionierska', 'Pionierska', 'Pivoňková', 'Plachého',
'Plachého', 'Planckova', 'Planét', 'Plánky', 'Platanová', 'Plátenícka',
'Plavecká', 'Plickova', 'Pluhová', 'Plynárenská', 'Plzenská',
'Pobrežná', 'Pod agátmi', 'Pod Bôrikom', 'Pod brehmi', 'Pod gaštanmi',
'Pod Kalváriou', 'Pod Klepáčom', 'Pod Kobylou', 'Pod Krásnou hôrkou',
'Pod lesom', 'Pod lipami', 'Pod Lipovým', 'Pod násypom',
'Pod Rovnicami', 'Pod skalou', 'Pod srdcom', 'Pod Strážami',
'Pod Vachmajstrom', 'Pod Válkom', 'Pod vinicami', 'Pod záhradami',
'Pod záhradami', 'Pod Zečákom', 'Podbeľová', 'Podbrezovská', 'Podháj',
'Podhorská', 'Podhorského', 'Podjavorinskej', 'Podkarpatská',
'Podkerepušky', 'Podkolibská', 'Podkorunská', 'Podlesná',
'Podlučinského', 'Podniková', 'Podpriehradná', 'Podtatranského',
'Podunajská', 'Podunajská', 'Podzáhradná', 'Pohánková', 'Pohraničníkov',
'Pohronská', 'Polárna', 'Polianky', 'Poľná', 'Poľnohospodárska',
'Poľný mlyn', 'Poloreckého', 'Poľská', 'Poludníková', 'Poniklecová',
'Popolná', 'Popovova', 'Popradská', 'Porubského', 'Poštová', 'Potočná',
'Považanova', 'Považská', 'Povoznícka', 'Povraznícka', 'Povraznícka',
'Požiarnická', 'Pračanská', 'Prasličková', 'Pražská', 'Pražská',
'Predstaničné námesti', 'Prepoštská', 'Prešernova', 'Prešovská',
'Prešovská', 'Prešovská', 'Pri Bielom kríži', 'Pri dvore',
'Pri Dynamitke', 'Pri Habánskom mlyne', 'Pri hradnej studni',
'Pri hrádzi', 'Pri kolíske', 'Pri kríži', 'Pri mlyne', 'Pri Rochu',
'Pri seči', 'Pri Starej Prachárni', 'Pri Starom háji',
'Pri starom letisku', 'Pri Starom Mýte', 'Pri strelnici', 'Pri Struhe',
'Pri Suchom mlyne', 'Pri Šajbách', 'Pri tehelni', 'Pri trati',
'Pri vinohradoch', 'Pri zvonici', 'Priama cesta', 'Pribylinská',
'Pribinova', 'Pribinova', 'Pribinova', 'Pribišova', 'Prídanky',
'Prídavková', 'Priečna', 'Priehradná', 'Priekopnícka', 'Priekopy',
'Priemyselná', 'Priemyselná', 'Prievozská', 'Prievozská', 'Prievozská',
'Príjazdná', 'Príkopova', 'Primaciálne námestie', 'Prímoravská',
'Prípojná', 'Prístav', 'Prístavná', 'Prokofievova', 'Prokopa Veľkého',
'Prokopova', 'Prúdová', 'Prvá', 'Prvosienková', 'Pšeničná',
'Púchovská', 'Púpavová', 'Pustá', 'Puškinova', 'Pútnická',
'Pyrenejská', 'Rácova', 'Račianska', 'Račianska', 'Račianska',
'Račianska', 'Račianska', 'Račianska', 'Račianske mýto', 'Radarová',
'Rádiová', 'Radlinského', 'Radničná', 'Radničné námestie', 'Radvanská',
'Rajčianska', 'Rajecká', 'Rajská', 'Rajtákova', 'Raketová', 'Rákosová',
'Rascová', 'Rascová', 'Rastislavova', 'Rastlinná', 'Rašelinová',
'Ráztočná', 'Rázusovo nábrežie', 'Ražná', 'Rebarborová', 'Regrútska',
'Remeselnícka', 'Repašského', 'Repíková', 'Repná', 'Rešetkova',
'Revolučná', 'Révová', 'Revúcka', 'Rezedová', 'Riazanská', 'Riazanská',
'Ribayová', 'Ríbezľová', 'Riečna', 'Rigeleho', 'Rímska', 'Rízlingová',
'Riznerova', 'Robotnícka', 'Roľnícka', 'Romanova', 'Röntgenova',
'Rosná', 'Rostovská', 'Rošického', 'Rovná', 'Rovniankova', 'Rovníková',
'Royova', 'Rozálska', 'Rozmarínová', 'Rozvodná', 'Rožňavská',
'Rožňavská', 'Rožňavská', 'Rubínová', 'Rubinsteinova',
'Rudnayovo námestie', 'Rudnícka', 'Rulandská', 'Rumančeková',
'Rumunská', 'Rusovce', 'Rusovská cesta', 'Rustaveliho', 'Ružičková',
'Ružinovská', 'Ružinovská', 'Ružinovská', 'Ružomberská',
'Ružová dolina', 'Ružová dolina', 'Rybárska brána', 'Rybné námestie',
'Rybničná', 'Rybničná', 'Rybničná', 'Rýdziková', 'Rytierska',
'Sabinovská', 'Sabinovská', 'Sad Janka Kráľa', 'Sadmelijská', 'Sadová',
'Samova', 'Saratovská', 'Sartorisova', 'Sasanková', 'Sasinkova',
'Savignonská', 'Seberíniho', 'Sečovská', 'Sedlárska', 'Sedmokrásková',
'Segnáre', 'Segnerova', 'Sekulská', 'Sekurisova', 'Sekýľska',
'Semenárska', 'Semianova', 'Semilonská', 'Senická', 'Senná',
'Septimiova', 'Schengenská', 'Schillerova', 'Schneidera -Trnavské',
'Schody pri starej vo', 'Sibírska', 'Siedma', 'Sienkiewiczova',
'Silvánska', 'Sinokvetná', 'Skalická cesta', 'Skalná', 'Skerličova',
'Sklabinská', 'Sklenárova', 'Sklenárska', 'Skoroceľová', 'Skuteckého',
'Skýcovská', 'Sládkovičova', 'Sladová', 'Slatinská', 'Slávičie údolie',
'Slavín', 'Slepá', 'Sliačska', 'Sliezska', 'Slivková', 'Sĺňavská',
'Slnečná', 'Slnečnicová', 'Slovanské nábrežie', 'Slovienska',
'Slovinec', 'Slovinská', 'Slovnaftská', 'Slovnaftská', 'Slowackého',
'Smetanova', 'Smikova', 'Smolenická', 'Smolnícka', 'Smrečianska',
'Smrečianska', 'Snežienková', 'Soferove schody', 'Socháňova',
'Sochorova', 'Sokolíkova', 'Sokolská', 'Solivarská', 'Sološnická',
'Somolického', 'Somolického', 'Sosnová', 'Sovia', 'Spádová',
'Spätná cesta', 'Spišská', 'Spojná', 'Spoločenská', 'Sputniková',
'Sreznevského', 'Srnčia', 'Stachanovská', 'Stálicová', 'Stanekova',
'Staničná', 'Stará Černicová', 'Stará Ivánska cesta', 'Stará Klenová',
'Stará Prievozská', 'Stará Stupavská', 'Stará Vajnorská',
'Stará vinárska', 'Staré Grunty', 'Staré ihrisko', 'Staré záhrady',
'Starhradská', 'Starohájska', 'Staromestská', 'Staromlynská',
'Starorímska', 'Staroturský chodník', 'Stavbárska', 'Staviteľská',
'Stepná cesta', 'Stodolova', 'Stoklasová', 'Stolárska', 'Strakova',
'Stratená', 'Strážna', 'Strážnická', 'Strážny dom', 'Strečnianska',
'Stredná', 'Strelecká', 'Strelkova', 'Strmá cesta', 'Strmé sady',
'Strmý bok', 'Strmý vŕšok', 'Strojnícka', 'Stromová', 'Stropkovská',
'Struková', 'Studená', 'Studenohorská', 'Stuhová', 'Stupavská',
'Súbežná', 'Sudová', 'Súhvezdná', 'Suchá', 'Suché mýto', 'Suchohradská',
'Súkennícka', 'Súľovská', 'Sumbalova', 'Súmračná', 'Súťažná',
'Svätého Vincenta', 'Svätoplukova', 'Svätoplukova', 'Svätovojtešská',
'Svébska', 'Svetlá', 'Svíbová', 'Svidnícka', 'Svoradova', 'Svrčia',
'Syslia', 'Šafárikovo námestie', 'Šafárikovo námestie', 'Šafránová',
'Šagátova', 'Šachorová', 'Šalátová', 'Šaldova', 'Šalviová',
'Šamorínska', 'Šancová', 'Šancová', 'Šancová', 'Šancová', 'Šándorova',
'Šarišská', 'Šášovská', 'Šaštínska', 'Ševčenkova', 'Šiesta', 'Šikmá',
'Šinkovské', 'Šintavská', 'Šípková', 'Šípová', 'Šíravská', 'Široká',
'Škarniclova', 'Školská', 'Škovránčia', 'Škultétyho', 'Šoltésovej',
'Šošovicová', 'Špieszova', 'Špitálska', 'Športová',
'Šrobárovo námestie', 'Šťastná', 'Štedrá', 'Štefana Králika',
'Štefana Králika', 'Štefana Majera', 'Štefánikova', 'Štefánikova',
'Štefánikova', 'Štefanovičova', 'Štefunkova', 'Štepná', 'Štetinova',
'Štiavnická', 'Štítová', 'Štrbská', 'Štúrova', 'Štvrtá', 'Štyndlova',
'Šulekova', 'Šulekova', 'Šulekova', 'Šumavská', 'Šuňavcova', 'Šúrska',
'Šustekova', 'Šuty', 'Švabinského', 'Švantnerova', 'Tabaková',
'Tablicova', 'Táborská', 'Tajovského', 'Talichova', 'Tallerova',
'Tatranská', 'Tavaríkova osada', 'Tbiliská', 'Tehelná', 'Tehelňa',
'Tehliarska', 'Technická', 'Tekovská', 'Tekvicová', 'Telocvičná',
'Tematínska', 'Teplická', 'Terchovská', 'Teslova', 'Tešedíkova',
'Tetmayerova', 'Thurzova', 'Tibenského', 'Tibériová', 'Tichá',
'Tilgnerova', 'Timravina', 'Tobrucká', 'Tokajícka', 'Tolstého',
'Tománkova', 'Tomanova', 'Tomášikova', 'Tomášikova', 'Tomášikova',
'Tomášikova', 'Tomášikova', 'Toplianska', 'Topoľčianska', 'Topoľová',
'Toryská', 'Továrenská', 'Trajánova', 'Tramínová', 'Tranovského',
'Trávna', 'Trebišovská', 'Trebišovská', 'Trebišovská', 'Trenčianska',
'Treskoňova', 'Tretia', 'Trhová', 'Trinásta', 'Trnavská cesta',
'Trnavská cesta', 'Trnavská cesta', 'Trnavská cesta', 'Trnavská cesta',
'Trnavské mýto', 'Trnková', 'Tŕňová', 'Trojdomy', 'Trojičné námestie',
'Trstínska', 'Tučkova', 'Tuhovská', 'Tulipánová', 'Tupého',
'Tupolevova', 'Turbínova', 'Turčianska', 'Turistická', 'Turnianska',
'Tvarožkova', 'Tylova', 'Tymiánová', 'Tyršovo nábrežie', 'Učiteľská',
'Údernícka', 'Údolná', 'Uhliská', 'Uhorková', 'Uhrova', 'Uhrovecká',
'Ukrajinská', 'Ulica 1. mája', 'Ulica 29. augusta',
'Ulica 29. augusta', 'Ulica 29. augusta', 'Ulica 29. augusta',
'Ulica 8. mája', 'Ulica Alviano', 'Ulica Imricha Karvaš',
'Ulica J. Valašťana D', 'Ulica Janka Alexyho', 'Ulica Jozefa Krónera',
'Ulica Juraja Hronca', 'Ulica Karola Adlera', 'Ulica kpt. Rašu',
'Ulica Leopoldov maje', 'Ulica Ľuda Zúbka', 'Ulica Nad Válkom',
'Ulica padlých hrdino', 'Ulica Pri gaštanovej', 'Ulica Pri pastierni',
'Ulica Pri Vápeníckom', 'Ulica Pri vodnej nád', 'Ulica svornosti',
'Ulica Viktora Tegelh', 'Úprkova', 'Úradnícka', 'Uránová', 'Urbánkova',
'Urbárska', 'Ursínyho', 'Uršulínska', 'Ušiakova', 'Úvozná', 'Uzbecká',
'Úzka', 'Úžiny', 'V záhradách', 'Vajanského nábrežie', 'Vajnorská',
'Vajnorská', 'Vajnorská', 'Vajnorská', 'Vajnorská', 'Vajnorská',
'Vajnorská', 'Vajnorská', 'Vajnorská', 'Valachovej', 'Valašská',
'Valchárska', 'Vančurova', 'Vansovej', 'Vápencová', 'Vápenka',
'Vápenná', 'Varínska', 'Varšavská', 'Varšavská', 'Vavilovova',
'Vavrinecká', 'Vavrínova', 'Vazovova', 'Vážska', 'Včelárska',
'Velehradská', 'Veľké Štepnice', 'Veltlínska', 'Vendelínska',
'Ventúrska', 'Veterná', 'Veternicová', 'Vetvárska', 'Vetvová',
'Vidlicová', 'Viedenská cesta', 'Viedenská cesta', 'Viedenská cesta',
'Vietnamská', 'Vígľašská', 'Vihorlatská', 'Viktorínova', 'Vilová',
'Viničná', 'Vínna', 'Vinohradnícka', 'Višňová', 'Víťazná', 'Vlárska',
'Vlastenecké námestie', 'Vlčie hrdlo', 'Vlčkova', 'Vlčkova', 'Vlčkova',
'Vodné elektrárne', 'Vodný vrch', 'Vosková', 'Votrubova', 'Vrábeľská',
'Vrakunská', 'Vrakunská cesta', 'Vrakunská cesta', 'Vrančovičova',
'Vranovská', 'Vrbánska', 'Vrbenského', 'Vŕbová', 'Vresová',
'Vretenová', 'Vrchná', 'Vrútocká', 'Vtáčikova', 'Vtáčnik', 'Vyhliadka',
'Vyhnianska cesta', 'Výhonská', 'Východná', 'Vysoká', 'Vysokohorská',
'Vyšehradská', 'Vyšná', 'Výtvarná', 'Vývojová', 'Wattova', 'Wilsonova',
'Wolkrova', 'Za bránou', 'Za farou', 'Za Kasárňou', 'Za mlynom',
'Za sokolovňou', 'Za Stanicou', 'Za tehelňou', 'Záborského',
'Zadunajská cesta', 'Záhorácka', 'Záhorská', 'Záhradkárska', 'Záhradná',
'Záhradnícka', 'Záhradnícka', 'Záhradnícka', 'Záhradnícka', 'Záhrady',
'Záhrebská', 'Záhrebská', 'Záhumenná', 'Záhumenská', 'Zákutie',
'Zálužická', 'Zámocká', 'Zámocké schody', 'Zámočnícka', 'Západná',
'Západný rad', 'Záporožská', 'Záruby', 'Zátišie', 'Zátureckého',
'Zavadilová', 'Závadská', 'Záveterná', 'Závodná', 'Závodníkova',
'Zbrody', 'Zdravotnícka', 'Zelená', 'Zeleninová', 'Zelenohorská',
'Zelinárska', 'Zhorínska', 'Zidiny', 'Zimná', 'Zlatá', 'Zlaté piesky',
'Zlaté schody', 'Zlatohorská', 'Znievska', 'Zohorská', 'Zochova',
'Zrinského', 'Zvolenská', 'Zvončeková', 'Žabí majer', 'Žabotova',
'Žarnovická', 'Žatevná', 'Žehrianska', 'Železná', 'Železničiarska',
'Železničná', 'Želiarska', 'Žellova', 'Žiacka', 'Žiarska', 'Židovská',
'Žihľavová', 'Žilinská', 'Žilinská', 'Žitavská', 'Žitná', 'Živnostenská',
'Žižkova', 'Žulová', 'Župné námestie', 'Borágova', 'Parenicová',
'Loparová', 'Jegnešská', 'Jonatanová', 'Monardová', 'Perličková', )
states = (
'Bratislavský kraj', 'Trnavský kraj', 'Trenčiansky kraj',
'Nitriansky kraj', 'Žilinský kraj', 'Banskobystrický kraj',
'Prešovský kraj', 'Košický kraj', )
countries = (
'Afganistan', 'Afghanistanská islamská republika', 'Ålandy',
'Albánsko', 'Albánska republika', 'Alžírsko',
'Alžírska demokratická ľudová republika', 'Americká Samoa', 'Andorra',
'Andorrské kniežatstvo', 'Angola', 'Angolská republika', 'Anguilla',
'Antarktída', 'Antigua a Barbuda', 'Argentína',
'Argentínska republika', 'Arménsko', 'Arménska republika', 'Aruba',
'Austrália', 'Rakúsko', 'Rakúska republika', 'Azerbajdžan',
'Azerbajdžanská republika', 'Bahamy', 'Bahamské spoločenstvo',
'Bahrajn', 'Bahrajnské kráľovstvo', 'Bangladéš',
'Bangladéšska ľudová republika', 'Barbados', 'Bielorusko',
'Bieloruská republika', 'Belgicko', 'Belgické kráľovstvo', 'Belize',
'Benin', 'Beninská republika', 'Bermudy', 'Bhután',
'Bhutánske kráľovstvo', 'Bolívijská republika', 'Bolívijská republika',
'Bolívia', 'Bosna a Hercegovina', 'Republika Bosny a Hercegoviny',
'Botswana', 'Botswanská republika', 'Bouvetov ostrov', 'Brazília',
'Brazílska federatívna republika', 'Britské indickooceánske územie',
'Brunejsko-darussalamský štát', 'Bulharsko', 'Bulharská republika',
'Burkina Faso', 'Burundi', 'Burundská republika', 'Kambodža',
'Kambodžské kráľovstvo', 'Kamerun', 'Kamerunská republika', 'Kanada',
'Kapverdy', 'Kapverdská republika', 'Kajmanie ostrovy',
'Stredoafrická republika', 'Čad', 'Čadská republika', 'Čile',
'Čilská republika', 'Čína', 'Čínska ľudová republika',
'Vianočný ostrov', 'Kokosové ostrovy', 'Kolumbia',
'Kolumbijská republika', 'Komory', 'Komorský zväz', 'Kongo',
'Konžská republika', 'Konžská demokratická republika',
'Cookove ostrovy', 'Kostarika', 'Kostarická republika',
'Pobrežie Slonoviny', 'Republika Pobrežia Slonoviny', 'Chorvátsko',
'Chorvátska republika', 'Kuba', 'Kubánska republika', 'Cyprus',
'Cyperská republika', 'Česká republika', 'Dánsko', 'Dánske kráľovstvo',
'Džibutsko', 'Džibutská republika', 'Dominika',
'Dominické spoločenstvo', 'Dominikánska republika', 'Ekvádor',
'Ekvádorská republika', 'Egypt', 'Egyptská arabská republika',
'Salvádor', 'Salvádorská republika', 'Rovníková Guinea',
'Republika Rovníkovej Guiney', 'Eritrea', 'Estónsko',
'Estónska republika', 'Etiópia',
'Etiópska federatívna demokratická republika', 'Falklandy (Malvíny)',
'Faerské ostrovy', 'Fidži', 'Fínsko', 'Fínska republika', 'Francúzsko',
'Francúzska republika', 'Francúzska Guyana', 'Francúzska Polynézia',
'Francúzske južné a antarktické územia', 'Gabon', 'Gabonská republika',
'Gambia', 'Gambijská republika', 'Gruzínsko', 'Nemecko',
'Nemecká spolková republika', 'Ghana', 'Ghanská republika',
'Gibraltár', 'Grécko', 'Grécka republika', 'Grónsko', 'Grenada',
'Guadeloupe', 'Guam', 'Guatemala', 'Guatemalská republika', 'Guernsey',
'Guinea', 'Guinejská republika', 'Guinea-Bissau',
'Guinejsko-bissauská republika', 'Guyana',
'Guyanská kooperatívna republika', 'Haiti', 'Haitská republika',
'Heardov ostrov', 'Svätá stolica (Vatikánsky mestský štát)',
'Honduras', 'Honduraská republika', 'Hongkong',
'Osobitná administratívna oblasť Číny Hongkong', 'Maďarsko',
'Maďarská republika', 'Island', 'Islandská republika', 'India',
'Indická republika', 'Indonézia', 'Indonézska republika',
'Iránska islamská republika', 'Iránska islamská republika', 'Irak',
'Iracká republika', 'Írsko', 'Man', 'Izrael', 'Izraelský štát',
'Taliansko', 'Talianska republika', 'Jamajka', 'Japonsko', 'Jersey',
'Jordánsko', 'Jordánske hášimovské kráľovstvo', 'Kazachstan',
'Kazašská republika', 'Keňa', 'Kenská republika', 'Kiribati',
'Kiribatská republika', 'Kórejská ľudovodemokratická republika',
'Kórejská ľudovodemokratická republika', 'Kórejská republika',
'Kuvajt', 'Kuvajtský štát', 'Kirgizsko', 'Kirgizská republika',
'Laoská ľudovodemokratická republika', 'Lotyšsko',
'Lotyšská republika', 'Libanon', 'Libanonská republika', 'Lesotho',
'Lesothské kráľovstvo', 'Libéria', 'Libérijská republika', 'Líbya',
'Lichtenštajnsko', 'Lichtenštajnské kniežatstvo', 'Litva',
'Litovská republika', 'Luxembursko', 'Luxemburské veľkovojvodstvo',
'Macao', 'Osobitná administratívna oblasť Číny Macao',
'Macedónska republika', 'Bývalá juhoslovanská republika Macedónsko',
'Madagaskar', 'Madagaskarská republika', 'Malawi',
'Malawijská republika', 'Malajzia', 'Maldivy', 'Maldivská republika',
'Mali', 'Malijská republika', 'Malta', 'Maltská republika',
'Marshallove ostrovy', 'Republika Marshallových ostrovov', 'Martinik',
'Mauritánia', 'Mauritánska islamská republika', 'Maurícius',
'Maurícijská republika', 'Mayotte', 'Mexiko', 'Spojené štáty mexické',
'Mikronézske federatívne štáty', 'Mikronézske federatívne štáty',
'Moldavská republika', 'Moldavská republika', 'Moldavsko', 'Monako',
'Monacké kniežatstvo', 'Mongolsko', 'Čierna Hora', 'Montserrat',
'Maroko', 'Marocké kráľovstvo', 'Mozambik', 'Mozambická republika',
'Mjanmarsko', 'Namíbia', 'Namíbijská republika', 'Nauru',
'Nauruská republika', 'Nepál',
'Nepálska federatívna demokratická republika', 'Holandsko',
'Holandské kráľovstvo', 'Nová Kaledónia', 'Nový Zéland', 'Nikaragua',
'Nikaragujská republika', 'Niger', 'Nigerská republika', 'Nigéria',
'Nigérijská federatívna republika', 'Niue', 'Norfolk',
'Severné Mariány', 'Spoločenstvo Severných Marián', 'Nórsko',
'Nórske kráľovstvo', 'Omán', 'Ománsky sultanát', 'Pakistan',
'Pakistanská islamská republika', 'Palau', 'Palauská republika',
'palestínske územie, Okupované', 'Okupované palestínske územie',
'Panama', 'Panamská republika', 'Papua - Nová Guinea', 'Paraguaj',
'Paraguajská republika', 'Peru', 'Peruánska republika', 'Filipíny',
'Filipínska republika', 'Pitcairnove ostrovy', 'Poľsko',
'Poľská republika', 'Portugalsko', 'Portugalská republika',
'Portoriko', 'Katar', 'Katarský štát', 'Réunion', 'Rumunsko',
'Ruská federácia', 'Rwanda', 'Rwandská republika', 'Svätý Bartolomej',
'Svätá Helena, Ascension a Tristan da Cunha', 'Svätý Krištof a Nevis',
'Svätá Lucia', 'Saint Martin', 'Saint Pierre a Miquelon',
'Svätý Vincent a Grenadíny', 'Samoa', 'Samojský nezávislý štát',
'San Maríno', 'Sanmarínska republika', 'Svätý Tomáš a Princov ostrov',
'Demokratická republika Svätého Tomáša a Princovho ostrova',
'Saudská Arábia', 'Saudskoarabské kráľovstvo', 'Senegal',
'Senegalská republika', 'Srbsko', 'Srbská republika', 'Seychely',
'Seychelská republika', 'Sierra Leone', 'Sierraleonská republika',
'Singapur', 'Singapurská republika', 'Slovensko',
'Slovenská republika', 'Slovinsko', 'Slovinská republika',
'Šalamúnove ostrovy', 'Somálsko', 'Somálska republika', 'Južná Afrika',
'Juhoafrická republika', 'Južná Georgia a Južné Sandwichove ostrovy',
'Španielsko', 'Španielske kráľovstvo', 'Srí Lanka',
'Srílanská demokratická socialistická republika', 'Sudán',
'Sudánska republika', 'Surinam', 'Surinamská republika',
'Svalbard a Jan Mayen', 'Svazijsko', 'Svazijské kráľovstvo', 'Švédsko',
'Švédske kráľovstvo', 'Švajčiarsko', 'Švajčiarska konfederácia',
'Sýrska arabská republika', 'Taiwan, provincia Číny', 'Taiwan',
'Tadžikistan', 'Tadžická republika', 'Tanzánijská zjednotená republika',
'Tanzánijská zjednotená republika', 'Thajsko', 'Thajské kráľovstvo',
'Východný Timor', 'Východotimorská demokratická republika', 'Togo',
'Togská republika', 'Tokelau', 'Tonga', 'Tongské kráľovstvo',
'Trinidad a Tobago', 'Republika Trinidadu a Tobaga', 'Tunisko',
'Tuniská republika', 'Turecko', 'Turecká republika', 'Turkménsko',
'Ostrovy Turks a Caicos', 'Tuvalu', 'Uganda', 'Ugandská republika',
'Ukrajina', 'Spojené arabské emiráty', 'Spojené kráľovstvo',
'Spojené kráľovstvo Veľkej Británie a Severného Írska',
'Spojené štáty', 'Spojené štáty americké',
'Menšie odľahlé ostrovy Spojených štátov', 'Uruguaj',
'Uruguajská východná republika', 'Uzbekistan', 'Uzbecká republika',
'Vanuatu', 'Vanuatská republika', 'Venezuelská bolívarovská republika',
'Venezuela', 'Vietnam', 'Vietnamská socialistická republika',
'Panenské ostrovy, Britské', 'Britské Panenské ostrovy',
'Panenské ostrovy, Americké', 'Panenské ostrovy Spojených štátov',
'Wallis a Futuna', 'Západná Sahara', 'Jemen', 'Jemenská republika',
'Zambia', 'Zambijská republika', 'Zimbabwe', 'Zimbabwianska republika',
'Britské antarktické územie', 'Socialistická republika Barmský zväz',
'Bieloruská sovietska socialistická republika',
'ostrovy Canton a Enderbury',
'Československo, Československá socialistická republika', 'Dahome',
'Zem kráľovnej Maud', 'Východný Timor', 'Metropolitné Francúzsko',
'Francúzske pobrežie Afarov a Isasov',
'Francúzske južné a antarktické územia',
'Nemecká demokratická republika', 'Nemecká spolková republika',
'Gilbertove a lagúnové ostrovy', 'Johnston', 'Midwajské ostrovy',
'Holandské Antily', 'neutrálne pôdy', 'Nové Hebridy',
'Poručnícke územie tichomorských ostrovov', 'Panamská republika',
'Panamské prieplavové pásmo', 'Rumunská socialistická republika',
'Svätý Krištof', 'Srbsko a Čierna Hora', 'Sikkim', 'Rodézia',
'Španielska Sahara', 'Tichomorské ostrovy pod správou USA',
'ZSSR, Zväz sovietskych socialistických republík',
'Republika Horná Volta', 'Vatikánsky mestský štát (Svätá stolica)',
'Vietnamská demokratická republika', 'Wake',
'Jemenská ľudovodemokratická republika', 'Jemenská arabská republika',
'Socialistická federatívna republika Juhoslávia', 'Zairská republika', )
@classmethod
def street_suffix_short(cls):
return cls.random_element(cls.street_suffixes_short)
@classmethod
def street_suffix_long(cls):
return cls.random_element(cls.street_suffixes_long)
@classmethod
def city_name(cls):
return cls.random_element(cls.cities)
@classmethod
def street_name(cls):
return cls.random_element(cls.streets)
@classmethod
def state(cls):
return cls.random_element(cls.states)
|
stevekuznetsov/ansible | refs/heads/devel | lib/ansible/module_utils/docker_common.py | 24 | #
# Copyright 2016 Red Hat | Ansible
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import os
import re
import json
import sys
import copy
from distutils.version import LooseVersion
from ansible.module_utils.basic import AnsibleModule, BOOLEANS_TRUE, BOOLEANS_FALSE
from ansible.module_utils.six.moves.urllib.parse import urlparse
HAS_DOCKER_PY = True
HAS_DOCKER_PY_2 = False
HAS_DOCKER_ERROR = None
try:
from requests.exceptions import SSLError
from docker import __version__ as docker_version
from docker.errors import APIError, TLSParameterError, NotFound
from docker.tls import TLSConfig
from docker.constants import DEFAULT_TIMEOUT_SECONDS, DEFAULT_DOCKER_API_VERSION
from docker import auth
if LooseVersion(docker_version) >= LooseVersion('2.0.0'):
HAS_DOCKER_PY_2 = True
from docker import APIClient as Client
from docker.types import Ulimit, LogConfig
else:
from docker import Client
from docker.utils.types import Ulimit, LogConfig
except ImportError as exc:
HAS_DOCKER_ERROR = str(exc)
HAS_DOCKER_PY = False
DEFAULT_DOCKER_HOST = 'unix://var/run/docker.sock'
DEFAULT_TLS = False
DEFAULT_TLS_VERIFY = False
MIN_DOCKER_VERSION = "1.7.0"
DOCKER_COMMON_ARGS = dict(
docker_host=dict(type='str', aliases=['docker_url']),
tls_hostname=dict(type='str'),
api_version=dict(type='str', aliases=['docker_api_version']),
timeout=dict(type='int'),
cacert_path=dict(type='str', aliases=['tls_ca_cert']),
cert_path=dict(type='str', aliases=['tls_client_cert']),
key_path=dict(type='str', aliases=['tls_client_key']),
ssl_version=dict(type='str'),
tls=dict(type='bool'),
tls_verify=dict(type='bool'),
debug=dict(type='bool', default=False),
filter_logger=dict(type='bool', default=False),
)
DOCKER_MUTUALLY_EXCLUSIVE = [
['tls', 'tls_verify']
]
DOCKER_REQUIRED_TOGETHER = [
['cert_path', 'key_path']
]
DEFAULT_DOCKER_REGISTRY = 'https://index.docker.io/v1/'
EMAIL_REGEX = '[^@]+@[^@]+\.[^@]+'
BYTE_SUFFIXES = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
if not HAS_DOCKER_PY:
# No docker-py. Create a place holder client to allow
# instantiation of AnsibleModule and proper error handing
class Client(object):
def __init__(self, **kwargs):
pass
class DockerBaseClass(object):
def __init__(self):
self.debug = False
def log(self, msg, pretty_print=False):
pass
# if self.debug:
# log_file = open('docker.log', 'a')
# if pretty_print:
# log_file.write(json.dumps(msg, sort_keys=True, indent=4, separators=(',', ': ')))
# log_file.write(u'\n')
# else:
# log_file.write(msg + u'\n')
class AnsibleDockerClient(Client):
def __init__(self, argument_spec=None, supports_check_mode=False, mutually_exclusive=None,
required_together=None, required_if=None):
merged_arg_spec = dict()
merged_arg_spec.update(DOCKER_COMMON_ARGS)
if argument_spec:
merged_arg_spec.update(argument_spec)
self.arg_spec = merged_arg_spec
mutually_exclusive_params = []
mutually_exclusive_params += DOCKER_MUTUALLY_EXCLUSIVE
if mutually_exclusive:
mutually_exclusive_params += mutually_exclusive
required_together_params = []
required_together_params += DOCKER_REQUIRED_TOGETHER
if required_together:
required_together_params += required_together
self.module = AnsibleModule(
argument_spec=merged_arg_spec,
supports_check_mode=supports_check_mode,
mutually_exclusive=mutually_exclusive_params,
required_together=required_together_params,
required_if=required_if)
if not HAS_DOCKER_PY:
self.fail("Failed to import docker-py - %s. Try `pip install docker-py`" % HAS_DOCKER_ERROR)
if LooseVersion(docker_version) < LooseVersion(MIN_DOCKER_VERSION):
self.fail("Error: docker-py version is %s. Minimum version required is %s." % (docker_version,
MIN_DOCKER_VERSION))
self.debug = self.module.params.get('debug')
self.check_mode = self.module.check_mode
self._connect_params = self._get_connect_params()
try:
super(AnsibleDockerClient, self).__init__(**self._connect_params)
except APIError as exc:
self.fail("Docker API error: %s" % exc)
except Exception as exc:
self.fail("Error connecting: %s" % exc)
def log(self, msg, pretty_print=False):
pass
# if self.debug:
# log_file = open('docker.log', 'a')
# if pretty_print:
# log_file.write(json.dumps(msg, sort_keys=True, indent=4, separators=(',', ': ')))
# log_file.write(u'\n')
# else:
# log_file.write(msg + u'\n')
def fail(self, msg):
self.module.fail_json(msg=msg)
@staticmethod
def _get_value(param_name, param_value, env_variable, default_value):
if param_value is not None:
# take module parameter value
if param_value in BOOLEANS_TRUE:
return True
if param_value in BOOLEANS_FALSE:
return False
return param_value
if env_variable is not None:
env_value = os.environ.get(env_variable)
if env_value is not None:
# take the env variable value
if param_name == 'cert_path':
return os.path.join(env_value, 'cert.pem')
if param_name == 'cacert_path':
return os.path.join(env_value, 'ca.pem')
if param_name == 'key_path':
return os.path.join(env_value, 'key.pem')
if env_value in BOOLEANS_TRUE:
return True
if env_value in BOOLEANS_FALSE:
return False
return env_value
# take the default
return default_value
@property
def auth_params(self):
# Get authentication credentials.
# Precedence: module parameters-> environment variables-> defaults.
self.log('Getting credentials')
params = dict()
for key in DOCKER_COMMON_ARGS:
params[key] = self.module.params.get(key)
if self.module.params.get('use_tls'):
# support use_tls option in docker_image.py. This will be deprecated.
use_tls = self.module.params.get('use_tls')
if use_tls == 'encrypt':
params['tls'] = True
if use_tls == 'verify':
params['tls_verify'] = True
result = dict(
docker_host=self._get_value('docker_host', params['docker_host'], 'DOCKER_HOST',
DEFAULT_DOCKER_HOST),
tls_hostname=self._get_value('tls_hostname', params['tls_hostname'],
'DOCKER_TLS_HOSTNAME', 'localhost'),
api_version=self._get_value('api_version', params['api_version'], 'DOCKER_API_VERSION',
'auto'),
cacert_path=self._get_value('cacert_path', params['cacert_path'], 'DOCKER_CERT_PATH', None),
cert_path=self._get_value('cert_path', params['cert_path'], 'DOCKER_CERT_PATH', None),
key_path=self._get_value('key_path', params['key_path'], 'DOCKER_CERT_PATH', None),
ssl_version=self._get_value('ssl_version', params['ssl_version'], 'DOCKER_SSL_VERSION', None),
tls=self._get_value('tls', params['tls'], 'DOCKER_TLS', DEFAULT_TLS),
tls_verify=self._get_value('tls_verfy', params['tls_verify'], 'DOCKER_TLS_VERIFY',
DEFAULT_TLS_VERIFY),
timeout=self._get_value('timeout', params['timeout'], 'DOCKER_TIMEOUT',
DEFAULT_TIMEOUT_SECONDS),
)
if result['tls_hostname'] is None:
# get default machine name from the url
parsed_url = urlparse(result['docker_host'])
if ':' in parsed_url.netloc:
result['tls_hostname'] = parsed_url.netloc[:parsed_url.netloc.rindex(':')]
else:
result['tls_hostname'] = parsed_url
return result
def _get_tls_config(self, **kwargs):
self.log("get_tls_config:")
for key in kwargs:
self.log(" %s: %s" % (key, kwargs[key]))
try:
tls_config = TLSConfig(**kwargs)
return tls_config
except TLSParameterError as exc:
self.fail("TLS config error: %s" % exc)
def _get_connect_params(self):
auth = self.auth_params
self.log("connection params:")
for key in auth:
self.log(" %s: %s" % (key, auth[key]))
if auth['tls'] or auth['tls_verify']:
auth['docker_host'] = auth['docker_host'].replace('tcp://', 'https://')
if auth['tls'] and auth['cert_path'] and auth['key_path']:
# TLS with certs and no host verification
tls_config = self._get_tls_config(client_cert=(auth['cert_path'], auth['key_path']),
verify=False,
ssl_version=auth['ssl_version'])
return dict(base_url=auth['docker_host'],
tls=tls_config,
version=auth['api_version'],
timeout=auth['timeout'])
if auth['tls']:
# TLS with no certs and not host verification
tls_config = self._get_tls_config(verify=False,
ssl_version=auth['ssl_version'])
return dict(base_url=auth['docker_host'],
tls=tls_config,
version=auth['api_version'],
timeout=auth['timeout'])
if auth['tls_verify'] and auth['cert_path'] and auth['key_path']:
# TLS with certs and host verification
if auth['cacert_path']:
tls_config = self._get_tls_config(client_cert=(auth['cert_path'], auth['key_path']),
ca_cert=auth['cacert_path'],
verify=True,
assert_hostname=auth['tls_hostname'],
ssl_version=auth['ssl_version'])
else:
tls_config = self._get_tls_config(client_cert=(auth['cert_path'], auth['key_path']),
verify=True,
assert_hostname=auth['tls_hostname'],
ssl_version=auth['ssl_version'])
return dict(base_url=auth['docker_host'],
tls=tls_config,
version=auth['api_version'],
timeout=auth['timeout'])
if auth['tls_verify'] and auth['cacert_path']:
# TLS with cacert only
tls_config = self._get_tls_config(ca_cert=auth['cacert_path'],
assert_hostname=auth['tls_hostname'],
verify=True,
ssl_version=auth['ssl_version'])
return dict(base_url=auth['docker_host'],
tls=tls_config,
version=auth['api_version'],
timeout=auth['timeout'])
if auth['tls_verify']:
# TLS with verify and no certs
tls_config = self._get_tls_config(verify=True,
assert_hostname=auth['tls_hostname'],
ssl_version=auth['ssl_version'])
return dict(base_url=auth['docker_host'],
tls=tls_config,
version=auth['api_version'],
timeout=auth['timeout'])
# No TLS
return dict(base_url=auth['docker_host'],
version=auth['api_version'],
timeout=auth['timeout'])
def _handle_ssl_error(self, error):
match = re.match(r"hostname.*doesn\'t match (\'.*\')", str(error))
if match:
msg = "You asked for verification that Docker host name matches %s. The actual hostname is %s. " \
"Most likely you need to set DOCKER_TLS_HOSTNAME or pass tls_hostname with a value of %s. " \
"You may also use TLS without verification by setting the tls parameter to true." \
% (self.auth_params['tls_hostname'], match.group(1))
self.fail(msg)
self.fail("SSL Exception: %s" % (error))
def get_container(self, name=None):
'''
Lookup a container and return the inspection results.
'''
if name is None:
return None
search_name = name
if not name.startswith('/'):
search_name = '/' + name
result = None
try:
for container in self.containers(all=True):
self.log("testing container: %s" % (container['Names']))
if isinstance(container['Names'], list) and search_name in container['Names']:
result = container
break
if container['Id'].startswith(name):
result = container
break
if container['Id'] == name:
result = container
break
except SSLError as exc:
self._handle_ssl_error(exc)
except Exception as exc:
self.fail("Error retrieving container list: %s" % exc)
if result is not None:
try:
self.log("Inspecting container Id %s" % result['Id'])
result = self.inspect_container(container=result['Id'])
self.log("Completed container inspection")
except Exception as exc:
self.fail("Error inspecting container: %s" % exc)
return result
def find_image(self, name, tag):
'''
Lookup an image and return the inspection results.
'''
if not name:
return None
self.log("Find image %s:%s" % (name, tag))
images = self._image_lookup(name, tag)
if len(images) == 0:
# In API <= 1.20 seeing 'docker.io/<name>' as the name of images pulled from docker hub
registry, repo_name = auth.resolve_repository_name(name)
if registry == 'docker.io':
# the name does not contain a registry, so let's see if docker.io works
lookup = "docker.io/%s" % name
self.log("Check for docker.io image: %s" % lookup)
images = self._image_lookup(lookup, tag)
if len(images) > 1:
self.fail("Registry returned more than one result for %s:%s" % (name, tag))
if len(images) == 1:
try:
inspection = self.inspect_image(images[0]['Id'])
except Exception as exc:
self.fail("Error inspecting image %s:%s - %s" % (name, tag, str(exc)))
return inspection
self.log("Image %s:%s not found." % (name, tag))
return None
def _image_lookup(self, name, tag):
'''
Including a tag in the name parameter sent to the docker-py images method does not
work consistently. Instead, get the result set for name and manually check if the tag
exists.
'''
try:
response = self.images(name=name)
except Exception as exc:
self.fail("Error searching for image %s - %s" % (name, str(exc)))
images = response
if tag:
lookup = "%s:%s" % (name, tag)
images = []
for image in response:
tags = image.get('RepoTags')
if tags and lookup in tags:
images = [image]
break
return images
def pull_image(self, name, tag="latest"):
'''
Pull an image
'''
self.log("Pulling image %s:%s" % (name, tag))
alreadyToLatest = False
try:
for line in self.pull(name, tag=tag, stream=True, decode=True):
self.log(line, pretty_print=True)
if line.get('status'):
if line.get('status').startswith('Status: Image is up to date for'):
alreadyToLatest = True
if line.get('error'):
if line.get('errorDetail'):
error_detail = line.get('errorDetail')
self.fail("Error pulling %s - code: %s message: %s" % (name,
error_detail.get('code'),
error_detail.get('message')))
else:
self.fail("Error pulling %s - %s" % (name, line.get('error')))
except Exception as exc:
self.fail("Error pulling image %s:%s - %s" % (name, tag, str(exc)))
return self.find_image(name, tag), alreadyToLatest
|
t-tran/libcloud | refs/heads/trunk | libcloud/test/compute/test_ovh.py | 15 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
from mock import patch
from libcloud.utils.py3 import httplib
from libcloud.compute.drivers.ovh import OvhNodeDriver
from libcloud.test.common.test_ovh import BaseOvhMockHttp
from libcloud.test.secrets import OVH_PARAMS
from libcloud.test.file_fixtures import ComputeFileFixtures
class OvhMockHttp(BaseOvhMockHttp):
"""Fixtures needed for tests related to rating model"""
fixtures = ComputeFileFixtures('ovh')
def _json_1_0_auth_time_get(self, method, url, body, headers):
body = self.fixtures.load('auth_time_get.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _json_1_0_cloud_project_project_id_region_get(self, method, url, body, headers):
body = self.fixtures.load('region_get.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _json_1_0_cloud_project_project_id_flavor_get(self, method, url, body, headers):
body = self.fixtures.load('flavor_get.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _json_1_0_cloud_project_project_id_flavor_region_SBG1_get(self, method, url, body, headers):
body = self.fixtures.load('flavor_get.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _json_1_0_cloud_project_project_id_flavor_foo_id_get(self, method, url, body, headers):
body = self.fixtures.load('flavor_get_detail.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _json_1_0_cloud_project_project_id_image_get(self, method, url, body, headers):
body = self.fixtures.load('image_get.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _json_1_0_cloud_project_project_id_image_foo_id_get(self, method, url, body, headers):
body = self.fixtures.load('image_get_detail.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _json_1_0_cloud_project_project_id_sshkey_region_SBG1_get(self, method, url, body, headers):
body = self.fixtures.load('ssh_get.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _json_1_0_cloud_project_project_id_sshkey_post(self, method, url, body, headers):
body = self.fixtures.load('ssh_get_detail.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _json_1_0_ssh_mykey_get(self, method, url, body, headers):
body = self.fixtures.load('ssh_get_detail.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _json_1_0_cloud_project_project_id_instance_get(self, method, url, body, headers):
body = self.fixtures.load('instance_get.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _json_1_0_cloud_project_project_id_instance_foo_get(self, method, url, body, headers):
body = self.fixtures.load('instance_get_detail.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _json_1_0_cloud_project_project_id_instance_foo_delete(self, method, url, body, headers):
return (httplib.OK, '', {}, httplib.responses[httplib.OK])
def _json_1_0_cloud_project_project_id_instance_post(self, method, url, body, headers):
body = self.fixtures.load('instance_get_detail.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _json_1_0_cloud_project_project_id_volume_get(self, method, url, body, headers):
body = self.fixtures.load('volume_get.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _json_1_0_cloud_project_project_id_volume_post(self, method, url, body, headers):
body = self.fixtures.load('volume_get_detail.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _json_1_0_cloud_project_project_id_volume_foo_get(self, method, url, body, headers):
body = self.fixtures.load('volume_get_detail.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _json_1_0_cloud_project_project_id_volume_foo_delete(self, method, url, body, headers):
return (httplib.OK, '', {}, httplib.responses[httplib.OK])
def _json_1_0_cloud_project_project_id_volume_foo_attach_post(self, method, url, body, headers):
body = self.fixtures.load('volume_get_detail.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _json_1_0_cloud_project_project_id_volume_foo_detach_post(self, method, url, body, headers):
body = self.fixtures.load('volume_get_detail.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _json_1_0_cloud_project_project_id_volume_snapshot_region_SBG_1_get(self, method, url, body, headers):
body = self.fixtures.load('volume_snapshot_get.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _json_1_0_cloud_project_project_id_volume_snapshot_get(self, method, url, body, headers):
body = self.fixtures.load('volume_snapshot_get.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _json_1_0_cloud_project_project_id_volume_snapshot_foo_get(self, method, url, body, headers):
body = self.fixtures.load('volume_snapshot_get_details.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _json_1_0_cloud_project_project_id_volume_snapshot_foo_snap_delete(self, method, url, body, headers):
return (httplib.OK, None, {}, httplib.responses[httplib.OK])
def _json_1_0_cloud_project_project_id_volume_foo_snapshot__post(self, method, url, body, headers):
body = self.fixtures.load('volume_snapshot_get_details.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
@patch('libcloud.common.ovh.OvhConnection._timedelta', 42)
class OvhTests(unittest.TestCase):
def setUp(self):
OvhNodeDriver.connectionCls.conn_class = OvhMockHttp
OvhMockHttp.type = None
self.driver = OvhNodeDriver(*OVH_PARAMS)
def test_list_locations(self):
images = self.driver.list_locations()
self.assertTrue(len(images) > 0)
def test_list_images(self):
images = self.driver.list_images()
self.assertTrue(len(images) > 0)
def test_get_image(self):
image = self.driver.get_image('foo-id')
self.assertEqual(image.id, 'foo-id')
def test_list_sizes(self):
sizes = self.driver.list_sizes()
self.assertTrue(len(sizes) > 0)
def test_get_size(self):
size = self.driver.ex_get_size('foo-id')
self.assertEqual(size.id, 'foo-id')
def test_list_key_pairs(self):
keys = self.driver.list_sizes()
self.assertTrue(len(keys) > 0)
def test_get_key_pair(self):
location = self.driver.list_locations()[0]
key = self.driver.get_key_pair('mykey', location)
self.assertEqual(key.name, 'mykey')
def test_import_key_pair_from_string(self):
location = self.driver.list_locations()[0]
key = self.driver.import_key_pair_from_string('mykey', 'material',
location)
self.assertEqual(key.name, 'mykey')
def test_list_nodes(self):
nodes = self.driver.list_nodes()
self.assertTrue(len(nodes) > 0)
def test_get_node(self):
node = self.driver.ex_get_node('foo')
self.assertEqual(node.name, 'test_vm')
def test_create_node(self):
location = self.driver.list_locations()[0]
image = self.driver.list_sizes(location)[0]
size = self.driver.list_sizes(location)[0]
node = self.driver.create_node(name='test_vm', image=image, size=size,
location=location)
self.assertEqual(node.name, 'test_vm')
def test_destroy_node(self):
node = self.driver.list_nodes()[0]
self.driver.destroy_node(node)
def test_list_volumes(self):
volumes = self.driver.list_volumes()
self.assertTrue(len(volumes) > 0)
def test_get_volume(self):
volume = self.driver.ex_get_volume('foo')
self.assertEqual(volume.name, 'testvol')
def test_create_volume(self):
location = self.driver.list_locations()[0]
volume = self.driver.create_volume(size=10, name='testvol',
location=location)
self.assertEqual(volume.name, 'testvol')
def test_destroy_volume(self):
volume = self.driver.list_volumes()[0]
self.driver.destroy_volume(volume)
def test_attach_volume(self):
node = self.driver.list_nodes()[0]
volume = self.driver.ex_get_volume('foo')
response = self.driver.attach_volume(node=node, volume=volume)
self.assertTrue(response)
def test_detach_volume(self):
node = self.driver.list_nodes()[0]
volume = self.driver.ex_get_volume('foo')
response = self.driver.detach_volume(ex_node=node, volume=volume)
self.assertTrue(response)
def test_ex_list_snapshots(self):
self.driver.ex_list_snapshots()
def test_ex_get_volume_snapshot(self):
self.driver.ex_get_volume_snapshot('foo')
def test_list_volume_snapshots(self):
volume = self.driver.ex_get_volume('foo')
self.driver.list_volume_snapshots(volume)
def test_create_volume_snapshot(self):
volume = self.driver.ex_get_volume('foo')
self.driver.create_volume_snapshot(volume)
def test_destroy_volume_snapshot(self):
snapshot = self.driver.ex_get_volume_snapshot('foo')
result = self.driver.destroy_volume_snapshot(snapshot)
self.assertTrue(result)
if __name__ == '__main__':
sys.exit(unittest.main())
|
ta2-1/pootle | refs/heads/master | pootle/core/contextmanagers.py | 1 | # -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
from contextlib import contextmanager, nested
from django.dispatch import receiver
from django.dispatch.dispatcher import _make_id
from pootle.core.signals import (
create, delete, update,
update_checks, update_data, update_revisions, update_scores)
class BulkUpdated(object):
create = None
delete_qs = None
delete = None
delete_ids = None
update_qs = None
update = None
updates = None
update_fields = None
update_objects = None
@contextmanager
def suppress_signal(signal, suppress=None):
handlers = signal.receivers
receiver_cache = signal.sender_receivers_cache.copy()
signal.receivers = []
if suppress:
refs = [_make_id(sup) for sup in suppress]
signal.receivers = [h for h in handlers if not h[0][1] in refs]
else:
signal.receivers = []
signal.sender_receivers_cache.clear()
try:
yield
finally:
signal.sender_receivers_cache = receiver_cache
signal.receivers = handlers
@contextmanager
def keep_data(keep=True, signals=None, suppress=None):
signals = (
signals
or (update_checks,
update_data,
update_revisions,
update_scores))
if keep:
with nested(*[suppress_signal(s, suppress) for s in signals]):
yield
else:
yield
def _create_handler(updated, **kwargs):
to_create = kwargs.get("objects") or []
to_create += (
[kwargs.get("instance")]
if kwargs.get("instance")
else [])
if to_create:
updated.create = (updated.create or []) + to_create
def _delete_handler(updated, **kwargs):
if "objects" in kwargs:
if updated.delete_qs is None:
updated.delete_qs = kwargs["objects"]
else:
updated.delete_qs = (
updated.delete_qs
| kwargs["objects"])
if "instance" in kwargs:
if updated.delete_ids is None:
updated.delete_ids = set()
updated.delete_ids.add(kwargs["instance"].pk)
def _update_handler(updated, **kwargs):
if kwargs.get("update_fields"):
if updated.update_fields is None:
updated.update_fields = set()
# update these fields (~only)
updated.update_fields = (
updated.update_fields
| set(kwargs["update_fields"]))
if "updates" in kwargs:
# dict of pk: dict(up=date)
updated.updates = (
kwargs["updates"]
if updated.updates is None
else (updated.updates.update(kwargs["updates"])
or updated.updates))
if "objects" in kwargs:
updated.update_objects = (
kwargs["objects"]
if updated.update_objects is None
else (updated.update_objects
+ kwargs["objects"]))
if "instance" in kwargs:
updated.update_objects = (
[kwargs["instance"]]
if updated.update_objects is None
else (updated.update_objects
+ [kwargs["instance"]]))
def _callback_handler(model, updated):
# delete
to_delete = None
if updated.delete_ids is not None:
to_delete = model.objects.filter(
pk__in=updated.delete_ids)
if updated.delete_qs is not None:
to_delete = (
updated.delete_qs
if to_delete is None
else to_delete | updated.delete_qs)
if to_delete is not None:
delete.send(
model,
objects=to_delete)
# create
if updated.create is not None:
create.send(
model,
objects=updated.create)
# update
should_update = (
updated.update_objects is not None
or updated.updates is not None)
if should_update:
update.send(
model,
objects=updated.update_objects,
updates=updated.updates,
update_fields=updated.update_fields)
@contextmanager
def bulk_context(model=None, **kwargs):
updated = BulkUpdated()
signals = [create, delete, update]
create_handler = kwargs.pop("create", _create_handler)
delete_handler = kwargs.pop("delete", _delete_handler)
update_handler = kwargs.pop("update", _update_handler)
callback_handler = kwargs.pop("callback", _callback_handler)
with keep_data(signals=signals, suppress=(model, )):
@receiver(create, sender=model)
def handle_create(**kwargs):
create_handler(updated, **kwargs)
@receiver(delete, sender=model)
def handle_delete(**kwargs):
delete_handler(updated, **kwargs)
@receiver(update, sender=model)
def handle_update(**kwargs):
update_handler(updated, **kwargs)
yield
callback_handler(model, updated)
@contextmanager
def bulk_operations(model=None, models=None, **kwargs):
if models is None and model is not None:
models = [model]
with nested(*(bulk_context(m, **kwargs) for m in models)):
yield
|
jing-bao/pa-chromium | refs/heads/master | chrome/browser/ui/libgtk2ui/PRESUBMIT.py | 67 | #!/usr/bin/python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Chromium presubmit script for src/chrome/browser/externsions.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into gcl.
"""
def GetPreferredTrySlaves():
return ['linux_aura']
|
jbenden/ansible | refs/heads/devel | lib/ansible/modules/cloud/amazon/ec2_ami_copy.py | 7 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ec2_ami_copy
short_description: copies AMI between AWS regions, return new image id
description:
- Copies AMI from a source region to a destination region. B(Since version 2.3 this module depends on boto3.)
version_added: "2.0"
options:
source_region:
description:
- The source region the AMI should be copied from.
required: true
source_image_id:
description:
- The ID of the AMI in source region that should be copied.
required: true
name:
description:
- The name of the new AMI to copy. (As of 2.3 the default is 'default', in prior versions it was 'null'.)
required: false
default: "default"
description:
description:
- An optional human-readable string describing the contents and purpose of the new AMI.
required: false
default: null
encrypted:
description:
- Whether or not the destination snapshots of the copied AMI should be encrypted.
required: false
default: null
version_added: "2.2"
kms_key_id:
description:
- KMS key id used to encrypt image. If not specified, uses default EBS Customer Master Key (CMK) for your account.
required: false
default: null
version_added: "2.2"
wait:
description:
- Wait for the copied AMI to be in state 'available' before returning.
required: false
default: "no"
choices: [ "yes", "no" ]
wait_timeout:
description:
- How long before wait gives up, in seconds. (As of 2.3 this option is deprecated. See boto3 Waiters)
required: false
default: 1200
tags:
description:
- A hash/dictionary of tags to add to the new copied AMI; '{"key":"value"}' and '{"key":"value","key":"value"}'
required: false
default: null
author: "Amir Moulavi <amir.moulavi@gmail.com>, Tim C <defunct@defunct.io>"
extends_documentation_fragment:
- aws
- ec2
requirements:
- boto3
'''
EXAMPLES = '''
# Basic AMI Copy
- ec2_ami_copy:
source_region: us-east-1
region: eu-west-1
source_image_id: ami-xxxxxxx
# AMI copy wait until available
- ec2_ami_copy:
source_region: us-east-1
region: eu-west-1
source_image_id: ami-xxxxxxx
wait: yes
register: image_id
# Named AMI copy
- ec2_ami_copy:
source_region: us-east-1
region: eu-west-1
source_image_id: ami-xxxxxxx
name: My-Awesome-AMI
description: latest patch
# Tagged AMI copy
- ec2_ami_copy:
source_region: us-east-1
region: eu-west-1
source_image_id: ami-xxxxxxx
tags:
Name: My-Super-AMI
Patch: 1.2.3
# Encrypted AMI copy
- ec2_ami_copy:
source_region: us-east-1
region: eu-west-1
source_image_id: ami-xxxxxxx
encrypted: yes
# Encrypted AMI copy with specified key
- ec2_ami_copy:
source_region: us-east-1
region: eu-west-1
source_image_id: ami-xxxxxxx
encrypted: yes
kms_key_id: arn:aws:kms:us-east-1:XXXXXXXXXXXX:key/746de6ea-50a4-4bcb-8fbc-e3b29f2d367b
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (boto3_conn, ec2_argument_spec, get_aws_connection_info)
import traceback
try:
import boto
import boto.ec2
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
try:
import boto3
from botocore.exceptions import ClientError, NoCredentialsError, NoRegionError, WaiterError
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
def copy_image(module, ec2):
"""
Copies an AMI
module : AnsibleModule object
ec2: ec2 connection object
"""
tags = module.params.get('tags')
params = {'SourceRegion': module.params.get('source_region'),
'SourceImageId': module.params.get('source_image_id'),
'Name': module.params.get('name'),
'Description': module.params.get('description'),
'Encrypted': module.params.get('encrypted'),
}
if module.params.get('kms_key_id'):
params['KmsKeyId'] = module.params.get('kms_key_id')
try:
image_id = ec2.copy_image(**params)['ImageId']
if module.params.get('wait'):
ec2.get_waiter('image_available').wait(ImageIds=[image_id])
if module.params.get('tags'):
ec2.create_tags(
Resources=[image_id],
Tags=[{'Key' : k, 'Value': v} for k,v in module.params.get('tags').items()]
)
module.exit_json(changed=True, image_id=image_id)
except WaiterError as we:
module.fail_json(msg='An error occurred waiting for the image to become available. (%s)' % str(we), exception=traceback.format_exc())
except ClientError as ce:
module.fail_json(msg=ce.message)
except NoCredentialsError:
module.fail_json(msg='Unable to authenticate, AWS credentials are invalid.')
except Exception as e:
module.fail_json(msg='Unhandled exception. (%s)' % str(e))
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
source_region=dict(required=True),
source_image_id=dict(required=True),
name=dict(default='default'),
description=dict(default=''),
encrypted=dict(type='bool', default=False, required=False),
kms_key_id=dict(type='str', required=False),
wait=dict(type='bool', default=False),
wait_timeout=dict(default=1200),
tags=dict(type='dict')))
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
# TODO: Check botocore version
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
if HAS_BOTO3:
try:
ec2 = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url,
**aws_connect_params)
except NoRegionError:
module.fail_json(msg='AWS Region is required')
else:
module.fail_json(msg='boto3 required for this module')
copy_image(module, ec2)
if __name__ == '__main__':
main()
|
jtorrents/networkx | refs/heads/master | networkx/algorithms/components/tests/test_attracting.py | 35 | #!/usr/bin/env python
from nose.tools import *
import networkx as nx
class TestAttractingComponents(object):
def setUp(self):
self.G1 = nx.DiGraph()
self.G1.add_edges_from([(5,11),(11,2),(11,9),(11,10),
(7,11),(7,8),(8,9),(3,8),(3,10)])
self.G2 = nx.DiGraph()
self.G2.add_edges_from([(0,1),(0,2),(1,1),(1,2),(2,1)])
self.G3 = nx.DiGraph()
self.G3.add_edges_from([(0,1),(1,2),(2,1),(0,3),(3,4),(4,3)])
def test_attracting_components(self):
ac = nx.attracting_components(self.G1)
assert_true([2] in ac)
assert_true([9] in ac)
assert_true([10] in ac)
ac = nx.attracting_components(self.G2)
ac = [tuple(sorted(x)) for x in ac]
assert_true(ac == [(1,2)])
ac = nx.attracting_components(self.G3)
ac = [tuple(sorted(x)) for x in ac]
assert_true((1,2) in ac)
assert_true((3,4) in ac)
assert_equal(len(ac), 2)
def test_number_attacting_components(self):
assert_equal(len(nx.attracting_components(self.G1)), 3)
assert_equal(len(nx.attracting_components(self.G2)), 1)
assert_equal(len(nx.attracting_components(self.G3)), 2)
def test_is_attracting_component(self):
assert_false(nx.is_attracting_component(self.G1))
assert_false(nx.is_attracting_component(self.G2))
assert_false(nx.is_attracting_component(self.G3))
g2 = self.G3.subgraph([1,2])
assert_true(nx.is_attracting_component(g2))
def test_attracting_component_subgraphs(self):
subgraphs = nx.attracting_component_subgraphs(self.G1)
for subgraph in subgraphs:
assert_equal(len(subgraph), 1)
self.G2.add_edge(1,2,eattr='red') # test attrs copied to subgraphs
self.G2.node[2]['nattr']='blue'
self.G2.graph['gattr']='green'
subgraphs = nx.attracting_component_subgraphs(self.G2)
assert_equal(len(subgraphs), 1)
SG2=subgraphs[0]
assert_true(1 in SG2)
assert_true(2 in SG2)
assert_equal(SG2[1][2]['eattr'],'red')
assert_equal(SG2.node[2]['nattr'],'blue')
assert_equal(SG2.graph['gattr'],'green')
SG2.add_edge(1,2,eattr='blue')
assert_equal(SG2[1][2]['eattr'],'blue')
assert_equal(self.G2[1][2]['eattr'],'red')
|
cryptobanana/ansible | refs/heads/devel | lib/ansible/modules/database/influxdb/influxdb_database.py | 46 | #!/usr/bin/python
# (c) 2016, Kamil Szczygiel <kamil.szczygiel () intel.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: influxdb_database
short_description: Manage InfluxDB databases
description:
- Manage InfluxDB databases.
version_added: 2.1
author: "Kamil Szczygiel (@kamsz)"
requirements:
- "python >= 2.6"
- "influxdb >= 0.9"
- requests
options:
database_name:
description:
- Name of the database.
required: true
state:
description:
- Determines if the database should be created or destroyed.
choices: [ present, absent ]
default: present
extends_documentation_fragment: influxdb
'''
EXAMPLES = '''
# Example influxdb_database command from Ansible Playbooks
- name: Create database
influxdb_database:
hostname: "{{influxdb_ip_address}}"
database_name: "{{influxdb_database_name}}"
- name: Destroy database
influxdb_database:
hostname: "{{influxdb_ip_address}}"
database_name: "{{influxdb_database_name}}"
state: absent
- name: Create database using custom credentials
influxdb_database:
hostname: "{{influxdb_ip_address}}"
username: "{{influxdb_username}}"
password: "{{influxdb_password}}"
database_name: "{{influxdb_database_name}}"
ssl: yes
validate_certs: yes
'''
RETURN = '''
# only defaults
'''
try:
import requests.exceptions
from influxdb import exceptions
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.influxdb import InfluxDb
def find_database(module, client, database_name):
database = None
try:
databases = client.get_list_database()
for db in databases:
if db['name'] == database_name:
database = db
break
except requests.exceptions.ConnectionError as e:
module.fail_json(msg=str(e))
return database
def create_database(module, client, database_name):
if not module.check_mode:
try:
client.create_database(database_name)
except requests.exceptions.ConnectionError as e:
module.fail_json(msg=str(e))
module.exit_json(changed=True)
def drop_database(module, client, database_name):
if not module.check_mode:
try:
client.drop_database(database_name)
except exceptions.InfluxDBClientError as e:
module.fail_json(msg=e.content)
module.exit_json(changed=True)
def main():
argument_spec = InfluxDb.influxdb_argument_spec()
argument_spec.update(
database_name=dict(required=True, type='str'),
state=dict(default='present', type='str', choices=['present', 'absent'])
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
state = module.params['state']
influxdb = InfluxDb(module)
client = influxdb.connect_to_influxdb()
database_name = influxdb.database_name
database = find_database(module, client, database_name)
if state == 'present':
if database:
module.exit_json(changed=False)
else:
create_database(module, client, database_name)
if state == 'absent':
if database:
drop_database(module, client, database_name)
else:
module.exit_json(changed=False)
if __name__ == '__main__':
main()
|
caphrim007/ansible | refs/heads/devel | lib/ansible/modules/storage/infinidat/infini_host.py | 70 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Gregory Shulov (gregory.shulov@gmail.com)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: infini_host
version_added: 2.3
short_description: Create, Delete and Modify Hosts on Infinibox
description:
- This module creates, deletes or modifies hosts on Infinibox.
author: Gregory Shulov (@GR360RY)
options:
name:
description:
- Host Name
required: true
state:
description:
- Creates/Modifies Host when present or removes when absent
required: false
default: present
choices: [ "present", "absent" ]
wwns:
description:
- List of wwns of the host
required: false
volume:
description:
- Volume name to map to the host
required: false
extends_documentation_fragment:
- infinibox
'''
EXAMPLES = '''
- name: Create new new host
infini_host:
name: foo.example.com
user: admin
password: secret
system: ibox001
- name: Make sure host bar is available with wwn ports
infini_host:
name: bar.example.com
wwns:
- "00:00:00:00:00:00:00"
- "11:11:11:11:11:11:11"
system: ibox01
user: admin
password: secret
- name: Map host foo.example.com to volume bar
infini_host:
name: foo.example.com
volume: bar
system: ibox01
user: admin
password: secret
'''
RETURN = '''
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.infinibox import HAS_INFINISDK, api_wrapper, get_system, infinibox_argument_spec
@api_wrapper
def get_host(module, system):
host = None
for h in system.hosts.to_list():
if h.get_name() == module.params['name']:
host = h
break
return host
@api_wrapper
def create_host(module, system):
changed = True
if not module.check_mode:
host = system.hosts.create(name=module.params['name'])
if module.params['wwns']:
for p in module.params['wwns']:
host.add_fc_port(p)
if module.params['volume']:
host.map_volume(system.volumes.get(name=module.params['volume']))
module.exit_json(changed=changed)
@api_wrapper
def update_host(module, host):
changed = False
module.exit_json(changed=changed)
@api_wrapper
def delete_host(module, host):
changed = True
if not module.check_mode:
host.delete()
module.exit_json(changed=changed)
def main():
argument_spec = infinibox_argument_spec()
argument_spec.update(
dict(
name=dict(required=True),
state=dict(default='present', choices=['present', 'absent']),
wwns=dict(type='list'),
volume=dict()
)
)
module = AnsibleModule(argument_spec, supports_check_mode=True)
if not HAS_INFINISDK:
module.fail_json(msg='infinisdk is required for this module')
state = module.params['state']
system = get_system(module)
host = get_host(module, system)
if module.params['volume']:
try:
system.volumes.get(name=module.params['volume'])
except:
module.fail_json(msg='Volume {} not found'.format(module.params['volume']))
if host and state == 'present':
update_host(module, host)
elif host and state == 'absent':
delete_host(module, host)
elif host is None and state == 'absent':
module.exit_json(changed=False)
else:
create_host(module, system)
if __name__ == '__main__':
main()
|
sdague/home-assistant | refs/heads/dev | homeassistant/components/kiwi/lock.py | 14 | """Support for the KIWI.KI lock platform."""
import logging
from kiwiki import KiwiClient, KiwiException
import voluptuous as vol
from homeassistant.components.lock import PLATFORM_SCHEMA, LockEntity
from homeassistant.const import (
ATTR_ID,
ATTR_LATITUDE,
ATTR_LONGITUDE,
CONF_PASSWORD,
CONF_USERNAME,
STATE_LOCKED,
STATE_UNLOCKED,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.event import async_call_later
_LOGGER = logging.getLogger(__name__)
ATTR_TYPE = "hardware_type"
ATTR_PERMISSION = "permission"
ATTR_CAN_INVITE = "can_invite_others"
UNLOCK_MAINTAIN_TIME = 5
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Required(CONF_USERNAME): cv.string, vol.Required(CONF_PASSWORD): cv.string}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the KIWI lock platform."""
try:
kiwi = KiwiClient(config[CONF_USERNAME], config[CONF_PASSWORD])
except KiwiException as exc:
_LOGGER.error(exc)
return
available_locks = kiwi.get_locks()
if not available_locks:
# No locks found; abort setup routine.
_LOGGER.info("No KIWI locks found in your account")
return
add_entities([KiwiLock(lock, kiwi) for lock in available_locks], True)
class KiwiLock(LockEntity):
"""Representation of a Kiwi lock."""
def __init__(self, kiwi_lock, client):
"""Initialize the lock."""
self._sensor = kiwi_lock
self._client = client
self.lock_id = kiwi_lock["sensor_id"]
self._state = STATE_LOCKED
address = kiwi_lock.get("address")
address.update(
{
ATTR_LATITUDE: address.pop("lat", None),
ATTR_LONGITUDE: address.pop("lng", None),
}
)
self._device_attrs = {
ATTR_ID: self.lock_id,
ATTR_TYPE: kiwi_lock.get("hardware_type"),
ATTR_PERMISSION: kiwi_lock.get("highest_permission"),
ATTR_CAN_INVITE: kiwi_lock.get("can_invite"),
**address,
}
@property
def name(self):
"""Return the name of the lock."""
name = self._sensor.get("name")
specifier = self._sensor["address"].get("specifier")
return name or specifier
@property
def is_locked(self):
"""Return true if lock is locked."""
return self._state == STATE_LOCKED
@property
def device_state_attributes(self):
"""Return the device specific state attributes."""
return self._device_attrs
@callback
def clear_unlock_state(self, _):
"""Clear unlock state automatically."""
self._state = STATE_LOCKED
self.async_write_ha_state()
def unlock(self, **kwargs):
"""Unlock the device."""
try:
self._client.open_door(self.lock_id)
except KiwiException:
_LOGGER.error("failed to open door")
else:
self._state = STATE_UNLOCKED
self.hass.add_job(
async_call_later,
self.hass,
UNLOCK_MAINTAIN_TIME,
self.clear_unlock_state,
)
|
srm912/servo | refs/heads/master | tests/wpt/web-platform-tests/tools/wptserve/tests/functional/test_cookies.py | 299 | import os
import unittest
import urllib2
import json
import wptserve
from base import TestUsingServer, doc_root
class TestResponseSetCookie(TestUsingServer):
def test_name_value(self):
@wptserve.handlers.handler
def handler(request, response):
response.set_cookie("name", "value")
return "Test"
route = ("GET", "/test/name_value", handler)
self.server.router.register(*route)
resp = self.request(route[1])
self.assertEquals(resp.info()["Set-Cookie"], "name=value; Path=/")
def test_unset(self):
@wptserve.handlers.handler
def handler(request, response):
response.set_cookie("name", "value")
response.unset_cookie("name")
return "Test"
route = ("GET", "/test/unset", handler)
self.server.router.register(*route)
resp = self.request(route[1])
self.assertTrue("Set-Cookie" not in resp.info())
def test_delete(self):
@wptserve.handlers.handler
def handler(request, response):
response.delete_cookie("name")
return "Test"
route = ("GET", "/test/delete", handler)
self.server.router.register(*route)
resp = self.request(route[1])
parts = dict(item.split("=") for
item in resp.info()["Set-Cookie"].split("; ") if item)
self.assertEquals(parts["name"], "")
self.assertEquals(parts["Path"], "/")
#Should also check that expires is in the past
class TestRequestCookies(TestUsingServer):
def test_set_cookie(self):
@wptserve.handlers.handler
def handler(request, response):
return request.cookies["name"].value
route = ("GET", "/test/set_cookie", handler)
self.server.router.register(*route)
resp = self.request(route[1], headers={"Cookie": "name=value"})
self.assertEquals(resp.read(), "value")
if __name__ == '__main__':
unittest.main()
|
kmike/django-easy-maps | refs/heads/master | runtests.py | 1 | #!test/usr/bin/env python
import os
import sys
from django.core.management import execute_from_command_line
os.environ['DJANGO_SETTINGS_MODULE'] = 'test_settings'
def runtests():
argv = [sys.argv[0], 'test']
return execute_from_command_line(argv)
if __name__ == '__main__':
sys.exit(runtests())
|
jabbalaci/PrimCom | refs/heads/master | data/python/PATH.py | 1 | import os
# root directory of the application
ROOT = os.path.dirname(os.path.abspath(__file__))
|
kenshay/ImageScripter | refs/heads/master | Script_Runner/PYTHON/Lib/this.py | 948 | s = """Gur Mra bs Clguba, ol Gvz Crgref
Ornhgvshy vf orggre guna htyl.
Rkcyvpvg vf orggre guna vzcyvpvg.
Fvzcyr vf orggre guna pbzcyrk.
Pbzcyrk vf orggre guna pbzcyvpngrq.
Syng vf orggre guna arfgrq.
Fcnefr vf orggre guna qrafr.
Ernqnovyvgl pbhagf.
Fcrpvny pnfrf nera'g fcrpvny rabhtu gb oernx gur ehyrf.
Nygubhtu cenpgvpnyvgl orngf chevgl.
Reebef fubhyq arire cnff fvyragyl.
Hayrff rkcyvpvgyl fvyraprq.
Va gur snpr bs nzovthvgl, ershfr gur grzcgngvba gb thrff.
Gurer fubhyq or bar-- naq cersrenoyl bayl bar --boivbhf jnl gb qb vg.
Nygubhtu gung jnl znl abg or boivbhf ng svefg hayrff lbh'er Qhgpu.
Abj vf orggre guna arire.
Nygubhtu arire vf bsgra orggre guna *evtug* abj.
Vs gur vzcyrzragngvba vf uneq gb rkcynva, vg'f n onq vqrn.
Vs gur vzcyrzragngvba vf rnfl gb rkcynva, vg znl or n tbbq vqrn.
Anzrfcnprf ner bar ubaxvat terng vqrn -- yrg'f qb zber bs gubfr!"""
d = {}
for c in (65, 97):
for i in range(26):
d[chr(i+c)] = chr((i+13) % 26 + c)
print("".join([d.get(c, c) for c in s]))
|
chfw/moban | refs/heads/master | tests/test_reporter.py | 1 | import sys
from mock import patch
from nose.tools import eq_
from moban.externals import reporter
PY2 = sys.version_info[0] == 2
if PY2:
from StringIO import StringIO
else:
from io import StringIO
def test_partial_run():
patcher = patch("sys.stdout", new_callable=StringIO)
fake_stdout = patcher.start()
reporter.report_partial_run("Actioned", 1, 20)
patcher.stop()
eq_(fake_stdout.getvalue(), "Actioned 1 out of 20 files.\n")
def test_full_run():
patcher = patch("sys.stdout", new_callable=StringIO)
fake_stdout = patcher.start()
reporter.report_full_run("Worked on", 20)
patcher.stop()
eq_(fake_stdout.getvalue(), "Worked on 20 files.\n")
def test_error_message():
patcher = patch("sys.stdout", new_callable=StringIO)
fake_stdout = patcher.start()
reporter.report_error_message("something wrong")
patcher.stop()
eq_(fake_stdout.getvalue(), "Error: something wrong\n")
def test_info_message():
patcher = patch("sys.stdout", new_callable=StringIO)
fake_stdout = patcher.start()
reporter.report_info_message("for your information")
patcher.stop()
eq_(fake_stdout.getvalue(), "Info: for your information\n")
def test_warning_message():
patcher = patch("sys.stdout", new_callable=StringIO)
fake_stdout = patcher.start()
reporter.report_warning_message("Maybe you wanna know")
patcher.stop()
eq_(fake_stdout.getvalue(), "Warning: Maybe you wanna know\n")
def test_report_templating():
patcher = patch("sys.stdout", new_callable=StringIO)
fake_stdout = patcher.start()
reporter.report_templating("Transforming", "a", "b")
patcher.stop()
eq_(fake_stdout.getvalue(), "Transforming a to b\n")
def test_no_action():
patcher = patch("sys.stdout", new_callable=StringIO)
fake_stdout = patcher.start()
reporter.report_no_action()
patcher.stop()
eq_(fake_stdout.getvalue(), "No actions performed\n")
def test_format_single():
message = "1 files"
ret = reporter._format_single(message, 1)
eq_(ret, "1 file")
def test_report_template_not_in_moban_file():
patcher = patch("sys.stdout", new_callable=StringIO)
fake_stdout = patcher.start()
reporter.report_template_not_in_moban_file("test.jj2")
patcher.stop()
eq_(
fake_stdout.getvalue(),
"Warning: test.jj2 is not defined in your moban file!\n",
)
def test_report_file_extension_not_needed():
patcher = patch("sys.stdout", new_callable=StringIO)
fake_stdout = patcher.start()
reporter.report_file_extension_not_needed()
patcher.stop()
eq_(
fake_stdout.getvalue(),
"Info: File extension is not required for ad-hoc type\n",
)
|
eleonrk/SickRage | refs/heads/master | lib/chardet/sbcsgroupprober.py | 273 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetgroupprober import CharSetGroupProber
from .sbcharsetprober import SingleByteCharSetProber
from .langcyrillicmodel import (Win1251CyrillicModel, Koi8rModel,
Latin5CyrillicModel, MacCyrillicModel,
Ibm866Model, Ibm855Model)
from .langgreekmodel import Latin7GreekModel, Win1253GreekModel
from .langbulgarianmodel import Latin5BulgarianModel, Win1251BulgarianModel
# from .langhungarianmodel import Latin2HungarianModel, Win1250HungarianModel
from .langthaimodel import TIS620ThaiModel
from .langhebrewmodel import Win1255HebrewModel
from .hebrewprober import HebrewProber
from .langturkishmodel import Latin5TurkishModel
class SBCSGroupProber(CharSetGroupProber):
def __init__(self):
super(SBCSGroupProber, self).__init__()
self.probers = [
SingleByteCharSetProber(Win1251CyrillicModel),
SingleByteCharSetProber(Koi8rModel),
SingleByteCharSetProber(Latin5CyrillicModel),
SingleByteCharSetProber(MacCyrillicModel),
SingleByteCharSetProber(Ibm866Model),
SingleByteCharSetProber(Ibm855Model),
SingleByteCharSetProber(Latin7GreekModel),
SingleByteCharSetProber(Win1253GreekModel),
SingleByteCharSetProber(Latin5BulgarianModel),
SingleByteCharSetProber(Win1251BulgarianModel),
# TODO: Restore Hungarian encodings (iso-8859-2 and windows-1250)
# after we retrain model.
# SingleByteCharSetProber(Latin2HungarianModel),
# SingleByteCharSetProber(Win1250HungarianModel),
SingleByteCharSetProber(TIS620ThaiModel),
SingleByteCharSetProber(Latin5TurkishModel),
]
hebrew_prober = HebrewProber()
logical_hebrew_prober = SingleByteCharSetProber(Win1255HebrewModel,
False, hebrew_prober)
visual_hebrew_prober = SingleByteCharSetProber(Win1255HebrewModel, True,
hebrew_prober)
hebrew_prober.set_model_probers(logical_hebrew_prober, visual_hebrew_prober)
self.probers.extend([hebrew_prober, logical_hebrew_prober,
visual_hebrew_prober])
self.reset()
|
mitsuhiko/pip | refs/heads/develop | pip/_vendor/colorama/__init__.py | 450 | # Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
from .initialise import init, deinit, reinit
from .ansi import Fore, Back, Style
from .ansitowin32 import AnsiToWin32
VERSION = '0.2.7'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.